Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.263
      1 /*	$NetBSD: if_wm.c,v 1.263 2013/09/08 03:17:02 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Rework how parameters are loaded from the EEPROM.
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.263 2013/09/08 03:17:02 msaitoh Exp $");
     80 
     81 #include <sys/param.h>
     82 #include <sys/systm.h>
     83 #include <sys/callout.h>
     84 #include <sys/mbuf.h>
     85 #include <sys/malloc.h>
     86 #include <sys/kernel.h>
     87 #include <sys/socket.h>
     88 #include <sys/ioctl.h>
     89 #include <sys/errno.h>
     90 #include <sys/device.h>
     91 #include <sys/queue.h>
     92 #include <sys/syslog.h>
     93 
     94 #include <sys/rnd.h>
     95 
     96 #include <net/if.h>
     97 #include <net/if_dl.h>
     98 #include <net/if_media.h>
     99 #include <net/if_ether.h>
    100 
    101 #include <net/bpf.h>
    102 
    103 #include <netinet/in.h>			/* XXX for struct ip */
    104 #include <netinet/in_systm.h>		/* XXX for struct ip */
    105 #include <netinet/ip.h>			/* XXX for struct ip */
    106 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    107 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    108 
    109 #include <sys/bus.h>
    110 #include <sys/intr.h>
    111 #include <machine/endian.h>
    112 
    113 #include <dev/mii/mii.h>
    114 #include <dev/mii/miivar.h>
    115 #include <dev/mii/miidevs.h>
    116 #include <dev/mii/mii_bitbang.h>
    117 #include <dev/mii/ikphyreg.h>
    118 #include <dev/mii/igphyreg.h>
    119 #include <dev/mii/igphyvar.h>
    120 #include <dev/mii/inbmphyreg.h>
    121 
    122 #include <dev/pci/pcireg.h>
    123 #include <dev/pci/pcivar.h>
    124 #include <dev/pci/pcidevs.h>
    125 
    126 #include <dev/pci/if_wmreg.h>
    127 #include <dev/pci/if_wmvar.h>
    128 
    129 #ifdef WM_DEBUG
    130 #define	WM_DEBUG_LINK		0x01
    131 #define	WM_DEBUG_TX		0x02
    132 #define	WM_DEBUG_RX		0x04
    133 #define	WM_DEBUG_GMII		0x08
    134 #define	WM_DEBUG_MANAGE		0x10
    135 #define	WM_DEBUG_NVM		0x20
    136 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    137     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    138 
    139 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    140 #else
    141 #define	DPRINTF(x, y)	/* nothing */
    142 #endif /* WM_DEBUG */
    143 
    144 /*
    145  * Transmit descriptor list size.  Due to errata, we can only have
    146  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    147  * on >= 82544.  We tell the upper layers that they can queue a lot
    148  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    149  * of them at a time.
    150  *
    151  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    152  * chains containing many small mbufs have been observed in zero-copy
    153  * situations with jumbo frames.
    154  */
    155 #define	WM_NTXSEGS		256
    156 #define	WM_IFQUEUELEN		256
    157 #define	WM_TXQUEUELEN_MAX	64
    158 #define	WM_TXQUEUELEN_MAX_82547	16
    159 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
    160 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
    161 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
    162 #define	WM_NTXDESC_82542	256
    163 #define	WM_NTXDESC_82544	4096
    164 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
    165 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
    166 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
    167 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
    168 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
    169 
    170 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
    171 
    172 /*
    173  * Receive descriptor list size.  We have one Rx buffer for normal
    174  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    175  * packet.  We allocate 256 receive descriptors, each with a 2k
    176  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    177  */
    178 #define	WM_NRXDESC		256
    179 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    180 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    181 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    182 
    183 /*
    184  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    185  * a single clump that maps to a single DMA segment to make several things
    186  * easier.
    187  */
    188 struct wm_control_data_82544 {
    189 	/*
    190 	 * The receive descriptors.
    191 	 */
    192 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    193 
    194 	/*
    195 	 * The transmit descriptors.  Put these at the end, because
    196 	 * we might use a smaller number of them.
    197 	 */
    198 	union {
    199 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
    200 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
    201 	} wdc_u;
    202 };
    203 
    204 struct wm_control_data_82542 {
    205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    206 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
    207 };
    208 
    209 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
    210 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
    211 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    212 
    213 /*
    214  * Software state for transmit jobs.
    215  */
    216 struct wm_txsoft {
    217 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    218 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    219 	int txs_firstdesc;		/* first descriptor in packet */
    220 	int txs_lastdesc;		/* last descriptor in packet */
    221 	int txs_ndesc;			/* # of descriptors used */
    222 };
    223 
    224 /*
    225  * Software state for receive buffers.  Each descriptor gets a
    226  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    227  * more than one buffer, we chain them together.
    228  */
    229 struct wm_rxsoft {
    230 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    231 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    232 };
    233 
    234 #define WM_LINKUP_TIMEOUT	50
    235 
    236 static uint16_t swfwphysem[] = {
    237 	SWFW_PHY0_SM,
    238 	SWFW_PHY1_SM,
    239 	SWFW_PHY2_SM,
    240 	SWFW_PHY3_SM
    241 };
    242 
    243 /*
    244  * Software state per device.
    245  */
    246 struct wm_softc {
    247 	device_t sc_dev;		/* generic device information */
    248 	bus_space_tag_t sc_st;		/* bus space tag */
    249 	bus_space_handle_t sc_sh;	/* bus space handle */
    250 	bus_size_t sc_ss;		/* bus space size */
    251 	bus_space_tag_t sc_iot;		/* I/O space tag */
    252 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    253 	bus_size_t sc_ios;		/* I/O space size */
    254 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    255 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    256 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    257 
    258 	struct ethercom sc_ethercom;	/* ethernet common data */
    259 	struct mii_data sc_mii;		/* MII/media information */
    260 
    261 	pci_chipset_tag_t sc_pc;
    262 	pcitag_t sc_pcitag;
    263 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    264 	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
    265 
    266 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
    267 	wm_chip_type sc_type;		/* MAC type */
    268 	int sc_rev;			/* MAC revision */
    269 	wm_phy_type sc_phytype;		/* PHY type */
    270 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    271 	int sc_flags;			/* flags; see below */
    272 	int sc_if_flags;		/* last if_flags */
    273 	int sc_flowflags;		/* 802.3x flow control flags */
    274 	int sc_align_tweak;
    275 
    276 	void *sc_ih;			/* interrupt cookie */
    277 	callout_t sc_tick_ch;		/* tick callout */
    278 
    279 	int sc_ee_addrbits;		/* EEPROM address bits */
    280 	int sc_ich8_flash_base;
    281 	int sc_ich8_flash_bank_size;
    282 	int sc_nvm_k1_enabled;
    283 
    284 	/*
    285 	 * Software state for the transmit and receive descriptors.
    286 	 */
    287 	int sc_txnum;			/* must be a power of two */
    288 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
    289 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    290 
    291 	/*
    292 	 * Control data structures.
    293 	 */
    294 	int sc_ntxdesc;			/* must be a power of two */
    295 	struct wm_control_data_82544 *sc_control_data;
    296 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    297 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
    298 	int sc_cd_rseg;			/* real number of control segment */
    299 	size_t sc_cd_size;		/* control data size */
    300 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    301 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
    302 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
    303 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    304 
    305 #ifdef WM_EVENT_COUNTERS
    306 	/* Event counters. */
    307 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    308 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    309 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    310 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    311 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    312 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    313 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    314 
    315 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    316 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    317 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    318 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    319 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    320 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    321 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    322 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    323 
    324 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    325 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    326 
    327 	struct evcnt sc_ev_tu;		/* Tx underrun */
    328 
    329 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    330 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    331 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    332 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    333 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    334 #endif /* WM_EVENT_COUNTERS */
    335 
    336 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    337 
    338 	int	sc_txfree;		/* number of free Tx descriptors */
    339 	int	sc_txnext;		/* next ready Tx descriptor */
    340 
    341 	int	sc_txsfree;		/* number of free Tx jobs */
    342 	int	sc_txsnext;		/* next free Tx job */
    343 	int	sc_txsdirty;		/* dirty Tx jobs */
    344 
    345 	/* These 5 variables are used only on the 82547. */
    346 	int	sc_txfifo_size;		/* Tx FIFO size */
    347 	int	sc_txfifo_head;		/* current head of FIFO */
    348 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
    349 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
    350 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    351 
    352 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    353 
    354 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    355 	int	sc_rxdiscard;
    356 	int	sc_rxlen;
    357 	struct mbuf *sc_rxhead;
    358 	struct mbuf *sc_rxtail;
    359 	struct mbuf **sc_rxtailp;
    360 
    361 	uint32_t sc_ctrl;		/* prototype CTRL register */
    362 #if 0
    363 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    364 #endif
    365 	uint32_t sc_icr;		/* prototype interrupt bits */
    366 	uint32_t sc_itr;		/* prototype intr throttling reg */
    367 	uint32_t sc_tctl;		/* prototype TCTL register */
    368 	uint32_t sc_rctl;		/* prototype RCTL register */
    369 	uint32_t sc_txcw;		/* prototype TXCW register */
    370 	uint32_t sc_tipg;		/* prototype TIPG register */
    371 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    372 	uint32_t sc_pba;		/* prototype PBA register */
    373 
    374 	int sc_tbi_linkup;		/* TBI link status */
    375 	int sc_tbi_anegticks;		/* autonegotiation ticks */
    376 	int sc_tbi_ticks;		/* tbi ticks */
    377 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
    378 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
    379 
    380 	int sc_mchash_type;		/* multicast filter offset */
    381 
    382 	krndsource_t rnd_source;	/* random source */
    383 };
    384 
    385 #define	WM_RXCHAIN_RESET(sc)						\
    386 do {									\
    387 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    388 	*(sc)->sc_rxtailp = NULL;					\
    389 	(sc)->sc_rxlen = 0;						\
    390 } while (/*CONSTCOND*/0)
    391 
    392 #define	WM_RXCHAIN_LINK(sc, m)						\
    393 do {									\
    394 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    395 	(sc)->sc_rxtailp = &(m)->m_next;				\
    396 } while (/*CONSTCOND*/0)
    397 
    398 #ifdef WM_EVENT_COUNTERS
    399 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    400 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    401 #else
    402 #define	WM_EVCNT_INCR(ev)	/* nothing */
    403 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    404 #endif
    405 
    406 #define	CSR_READ(sc, reg)						\
    407 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    408 #define	CSR_WRITE(sc, reg, val)						\
    409 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    410 #define	CSR_WRITE_FLUSH(sc)						\
    411 	(void) CSR_READ((sc), WMREG_STATUS)
    412 
    413 #define ICH8_FLASH_READ32(sc, reg) \
    414 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    415 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    416 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    417 
    418 #define ICH8_FLASH_READ16(sc, reg) \
    419 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    420 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    421 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    422 
    423 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    424 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    425 
    426 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
    427 #define	WM_CDTXADDR_HI(sc, x)						\
    428 	(sizeof(bus_addr_t) == 8 ?					\
    429 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
    430 
    431 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
    432 #define	WM_CDRXADDR_HI(sc, x)						\
    433 	(sizeof(bus_addr_t) == 8 ?					\
    434 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
    435 
    436 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    437 do {									\
    438 	int __x, __n;							\
    439 									\
    440 	__x = (x);							\
    441 	__n = (n);							\
    442 									\
    443 	/* If it will wrap around, sync to the end of the ring. */	\
    444 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
    445 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    446 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    447 		    (WM_NTXDESC(sc) - __x), (ops));			\
    448 		__n -= (WM_NTXDESC(sc) - __x);				\
    449 		__x = 0;						\
    450 	}								\
    451 									\
    452 	/* Now sync whatever is left. */				\
    453 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    454 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    455 } while (/*CONSTCOND*/0)
    456 
    457 #define	WM_CDRXSYNC(sc, x, ops)						\
    458 do {									\
    459 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    460 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    461 } while (/*CONSTCOND*/0)
    462 
    463 #define	WM_INIT_RXDESC(sc, x)						\
    464 do {									\
    465 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    466 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    467 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    468 									\
    469 	/*								\
    470 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    471 	 * so that the payload after the Ethernet header is aligned	\
    472 	 * to a 4-byte boundary.					\
    473 	 *								\
    474 	 * XXX BRAINDAMAGE ALERT!					\
    475 	 * The stupid chip uses the same size for every buffer, which	\
    476 	 * is set in the Receive Control register.  We are using the 2K	\
    477 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    478 	 * reason, we can't "scoot" packets longer than the standard	\
    479 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    480 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    481 	 * the upper layer copy the headers.				\
    482 	 */								\
    483 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    484 									\
    485 	wm_set_dma_addr(&__rxd->wrx_addr,				\
    486 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
    487 	__rxd->wrx_len = 0;						\
    488 	__rxd->wrx_cksum = 0;						\
    489 	__rxd->wrx_status = 0;						\
    490 	__rxd->wrx_errors = 0;						\
    491 	__rxd->wrx_special = 0;						\
    492 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    493 									\
    494 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    495 } while (/*CONSTCOND*/0)
    496 
    497 static void	wm_start(struct ifnet *);
    498 static void	wm_nq_start(struct ifnet *);
    499 static void	wm_watchdog(struct ifnet *);
    500 static int	wm_ifflags_cb(struct ethercom *);
    501 static int	wm_ioctl(struct ifnet *, u_long, void *);
    502 static int	wm_init(struct ifnet *);
    503 static void	wm_stop(struct ifnet *, int);
    504 static bool	wm_suspend(device_t, const pmf_qual_t *);
    505 static bool	wm_resume(device_t, const pmf_qual_t *);
    506 
    507 static void	wm_reset(struct wm_softc *);
    508 static void	wm_rxdrain(struct wm_softc *);
    509 static int	wm_add_rxbuf(struct wm_softc *, int);
    510 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
    511 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
    512 static int	wm_validate_eeprom_checksum(struct wm_softc *);
    513 static int	wm_check_alt_mac_addr(struct wm_softc *);
    514 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    515 static void	wm_tick(void *);
    516 
    517 static void	wm_set_filter(struct wm_softc *);
    518 static void	wm_set_vlan(struct wm_softc *);
    519 
    520 static int	wm_intr(void *);
    521 static void	wm_txintr(struct wm_softc *);
    522 static void	wm_rxintr(struct wm_softc *);
    523 static void	wm_linkintr(struct wm_softc *, uint32_t);
    524 
    525 static void	wm_tbi_mediainit(struct wm_softc *);
    526 static int	wm_tbi_mediachange(struct ifnet *);
    527 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    528 
    529 static void	wm_tbi_set_linkled(struct wm_softc *);
    530 static void	wm_tbi_check_link(struct wm_softc *);
    531 
    532 static void	wm_gmii_reset(struct wm_softc *);
    533 
    534 static int	wm_gmii_i82543_readreg(device_t, int, int);
    535 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    536 static int	wm_gmii_i82544_readreg(device_t, int, int);
    537 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    538 static int	wm_gmii_i80003_readreg(device_t, int, int);
    539 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    540 static int	wm_gmii_bm_readreg(device_t, int, int);
    541 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    542 static int	wm_gmii_hv_readreg(device_t, int, int);
    543 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    544 static int	wm_gmii_82580_readreg(device_t, int, int);
    545 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    546 static int	wm_sgmii_readreg(device_t, int, int);
    547 static void	wm_sgmii_writereg(device_t, int, int, int);
    548 
    549 static void	wm_gmii_statchg(struct ifnet *);
    550 
    551 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    552 static int	wm_gmii_mediachange(struct ifnet *);
    553 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    554 
    555 static int	wm_kmrn_readreg(struct wm_softc *, int);
    556 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    557 
    558 static void	wm_set_spiaddrbits(struct wm_softc *);
    559 static int	wm_match(device_t, cfdata_t, void *);
    560 static void	wm_attach(device_t, device_t, void *);
    561 static int	wm_detach(device_t, int);
    562 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
    563 static void	wm_get_auto_rd_done(struct wm_softc *);
    564 static void	wm_lan_init_done(struct wm_softc *);
    565 static void	wm_get_cfg_done(struct wm_softc *);
    566 static int	wm_get_swsm_semaphore(struct wm_softc *);
    567 static void	wm_put_swsm_semaphore(struct wm_softc *);
    568 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    569 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    570 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    571 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    572 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    573 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    574 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    575 
    576 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
    577 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    578 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    579 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
    580 		     uint32_t, uint16_t *);
    581 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    582 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    583 static void	wm_82547_txfifo_stall(void *);
    584 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    585 static int	wm_check_mng_mode(struct wm_softc *);
    586 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    587 static int	wm_check_mng_mode_82574(struct wm_softc *);
    588 static int	wm_check_mng_mode_generic(struct wm_softc *);
    589 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    590 static int	wm_check_reset_block(struct wm_softc *);
    591 static void	wm_get_hw_control(struct wm_softc *);
    592 static int	wm_check_for_link(struct wm_softc *);
    593 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    594 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    595 #ifdef WM_WOL
    596 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    597 #endif
    598 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    599 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    600 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    601 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    602 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    603 static void	wm_smbustopci(struct wm_softc *);
    604 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    605 static void	wm_reset_init_script_82575(struct wm_softc *);
    606 static void	wm_release_manageability(struct wm_softc *);
    607 static void	wm_release_hw_control(struct wm_softc *);
    608 static void	wm_get_wakeup(struct wm_softc *);
    609 #ifdef WM_WOL
    610 static void	wm_enable_phy_wakeup(struct wm_softc *);
    611 static void	wm_enable_wakeup(struct wm_softc *);
    612 #endif
    613 static void	wm_init_manageability(struct wm_softc *);
    614 static void	wm_set_eee_i350(struct wm_softc *);
    615 
    616 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    617     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    618 
    619 /*
    620  * Devices supported by this driver.
    621  */
    622 static const struct wm_product {
    623 	pci_vendor_id_t		wmp_vendor;
    624 	pci_product_id_t	wmp_product;
    625 	const char		*wmp_name;
    626 	wm_chip_type		wmp_type;
    627 	int			wmp_flags;
    628 #define	WMP_F_1000X		0x01
    629 #define	WMP_F_1000T		0x02
    630 #define	WMP_F_SERDES		0x04
    631 } wm_products[] = {
    632 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    633 	  "Intel i82542 1000BASE-X Ethernet",
    634 	  WM_T_82542_2_1,	WMP_F_1000X },
    635 
    636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    637 	  "Intel i82543GC 1000BASE-X Ethernet",
    638 	  WM_T_82543,		WMP_F_1000X },
    639 
    640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    641 	  "Intel i82543GC 1000BASE-T Ethernet",
    642 	  WM_T_82543,		WMP_F_1000T },
    643 
    644 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    645 	  "Intel i82544EI 1000BASE-T Ethernet",
    646 	  WM_T_82544,		WMP_F_1000T },
    647 
    648 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    649 	  "Intel i82544EI 1000BASE-X Ethernet",
    650 	  WM_T_82544,		WMP_F_1000X },
    651 
    652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    653 	  "Intel i82544GC 1000BASE-T Ethernet",
    654 	  WM_T_82544,		WMP_F_1000T },
    655 
    656 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    657 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    658 	  WM_T_82544,		WMP_F_1000T },
    659 
    660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    661 	  "Intel i82540EM 1000BASE-T Ethernet",
    662 	  WM_T_82540,		WMP_F_1000T },
    663 
    664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    665 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    666 	  WM_T_82540,		WMP_F_1000T },
    667 
    668 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    669 	  "Intel i82540EP 1000BASE-T Ethernet",
    670 	  WM_T_82540,		WMP_F_1000T },
    671 
    672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    673 	  "Intel i82540EP 1000BASE-T Ethernet",
    674 	  WM_T_82540,		WMP_F_1000T },
    675 
    676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    677 	  "Intel i82540EP 1000BASE-T Ethernet",
    678 	  WM_T_82540,		WMP_F_1000T },
    679 
    680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    681 	  "Intel i82545EM 1000BASE-T Ethernet",
    682 	  WM_T_82545,		WMP_F_1000T },
    683 
    684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    685 	  "Intel i82545GM 1000BASE-T Ethernet",
    686 	  WM_T_82545_3,		WMP_F_1000T },
    687 
    688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    689 	  "Intel i82545GM 1000BASE-X Ethernet",
    690 	  WM_T_82545_3,		WMP_F_1000X },
    691 #if 0
    692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    693 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    694 	  WM_T_82545_3,		WMP_F_SERDES },
    695 #endif
    696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    697 	  "Intel i82546EB 1000BASE-T Ethernet",
    698 	  WM_T_82546,		WMP_F_1000T },
    699 
    700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    701 	  "Intel i82546EB 1000BASE-T Ethernet",
    702 	  WM_T_82546,		WMP_F_1000T },
    703 
    704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    705 	  "Intel i82545EM 1000BASE-X Ethernet",
    706 	  WM_T_82545,		WMP_F_1000X },
    707 
    708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    709 	  "Intel i82546EB 1000BASE-X Ethernet",
    710 	  WM_T_82546,		WMP_F_1000X },
    711 
    712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    713 	  "Intel i82546GB 1000BASE-T Ethernet",
    714 	  WM_T_82546_3,		WMP_F_1000T },
    715 
    716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    717 	  "Intel i82546GB 1000BASE-X Ethernet",
    718 	  WM_T_82546_3,		WMP_F_1000X },
    719 #if 0
    720 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    721 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    722 	  WM_T_82546_3,		WMP_F_SERDES },
    723 #endif
    724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    725 	  "i82546GB quad-port Gigabit Ethernet",
    726 	  WM_T_82546_3,		WMP_F_1000T },
    727 
    728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    729 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    730 	  WM_T_82546_3,		WMP_F_1000T },
    731 
    732 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    733 	  "Intel PRO/1000MT (82546GB)",
    734 	  WM_T_82546_3,		WMP_F_1000T },
    735 
    736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    737 	  "Intel i82541EI 1000BASE-T Ethernet",
    738 	  WM_T_82541,		WMP_F_1000T },
    739 
    740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    741 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    742 	  WM_T_82541,		WMP_F_1000T },
    743 
    744 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    745 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    746 	  WM_T_82541,		WMP_F_1000T },
    747 
    748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    749 	  "Intel i82541ER 1000BASE-T Ethernet",
    750 	  WM_T_82541_2,		WMP_F_1000T },
    751 
    752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    753 	  "Intel i82541GI 1000BASE-T Ethernet",
    754 	  WM_T_82541_2,		WMP_F_1000T },
    755 
    756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    757 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    758 	  WM_T_82541_2,		WMP_F_1000T },
    759 
    760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    761 	  "Intel i82541PI 1000BASE-T Ethernet",
    762 	  WM_T_82541_2,		WMP_F_1000T },
    763 
    764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    765 	  "Intel i82547EI 1000BASE-T Ethernet",
    766 	  WM_T_82547,		WMP_F_1000T },
    767 
    768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    769 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    770 	  WM_T_82547,		WMP_F_1000T },
    771 
    772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    773 	  "Intel i82547GI 1000BASE-T Ethernet",
    774 	  WM_T_82547_2,		WMP_F_1000T },
    775 
    776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    777 	  "Intel PRO/1000 PT (82571EB)",
    778 	  WM_T_82571,		WMP_F_1000T },
    779 
    780 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    781 	  "Intel PRO/1000 PF (82571EB)",
    782 	  WM_T_82571,		WMP_F_1000X },
    783 #if 0
    784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    785 	  "Intel PRO/1000 PB (82571EB)",
    786 	  WM_T_82571,		WMP_F_SERDES },
    787 #endif
    788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    789 	  "Intel PRO/1000 QT (82571EB)",
    790 	  WM_T_82571,		WMP_F_1000T },
    791 
    792 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    793 	  "Intel i82572EI 1000baseT Ethernet",
    794 	  WM_T_82572,		WMP_F_1000T },
    795 
    796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    797 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    798 	  WM_T_82571,		WMP_F_1000T, },
    799 
    800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    801 	  "Intel i82572EI 1000baseX Ethernet",
    802 	  WM_T_82572,		WMP_F_1000X },
    803 #if 0
    804 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    805 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    806 	  WM_T_82572,		WMP_F_SERDES },
    807 #endif
    808 
    809 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    810 	  "Intel i82572EI 1000baseT Ethernet",
    811 	  WM_T_82572,		WMP_F_1000T },
    812 
    813 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    814 	  "Intel i82573E",
    815 	  WM_T_82573,		WMP_F_1000T },
    816 
    817 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    818 	  "Intel i82573E IAMT",
    819 	  WM_T_82573,		WMP_F_1000T },
    820 
    821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    822 	  "Intel i82573L Gigabit Ethernet",
    823 	  WM_T_82573,		WMP_F_1000T },
    824 
    825 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    826 	  "Intel i82574L",
    827 	  WM_T_82574,		WMP_F_1000T },
    828 
    829 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
    830 	  "Intel i82583V",
    831 	  WM_T_82583,		WMP_F_1000T },
    832 
    833 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
    834 	  "i80003 dual 1000baseT Ethernet",
    835 	  WM_T_80003,		WMP_F_1000T },
    836 
    837 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
    838 	  "i80003 dual 1000baseX Ethernet",
    839 	  WM_T_80003,		WMP_F_1000T },
    840 #if 0
    841 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
    842 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
    843 	  WM_T_80003,		WMP_F_SERDES },
    844 #endif
    845 
    846 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
    847 	  "Intel i80003 1000baseT Ethernet",
    848 	  WM_T_80003,		WMP_F_1000T },
    849 #if 0
    850 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
    851 	  "Intel i80003 Gigabit Ethernet (SERDES)",
    852 	  WM_T_80003,		WMP_F_SERDES },
    853 #endif
    854 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
    855 	  "Intel i82801H (M_AMT) LAN Controller",
    856 	  WM_T_ICH8,		WMP_F_1000T },
    857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
    858 	  "Intel i82801H (AMT) LAN Controller",
    859 	  WM_T_ICH8,		WMP_F_1000T },
    860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
    861 	  "Intel i82801H LAN Controller",
    862 	  WM_T_ICH8,		WMP_F_1000T },
    863 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
    864 	  "Intel i82801H (IFE) LAN Controller",
    865 	  WM_T_ICH8,		WMP_F_1000T },
    866 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
    867 	  "Intel i82801H (M) LAN Controller",
    868 	  WM_T_ICH8,		WMP_F_1000T },
    869 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
    870 	  "Intel i82801H IFE (GT) LAN Controller",
    871 	  WM_T_ICH8,		WMP_F_1000T },
    872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
    873 	  "Intel i82801H IFE (G) LAN Controller",
    874 	  WM_T_ICH8,		WMP_F_1000T },
    875 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
    876 	  "82801I (AMT) LAN Controller",
    877 	  WM_T_ICH9,		WMP_F_1000T },
    878 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
    879 	  "82801I LAN Controller",
    880 	  WM_T_ICH9,		WMP_F_1000T },
    881 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
    882 	  "82801I (G) LAN Controller",
    883 	  WM_T_ICH9,		WMP_F_1000T },
    884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
    885 	  "82801I (GT) LAN Controller",
    886 	  WM_T_ICH9,		WMP_F_1000T },
    887 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
    888 	  "82801I (C) LAN Controller",
    889 	  WM_T_ICH9,		WMP_F_1000T },
    890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
    891 	  "82801I mobile LAN Controller",
    892 	  WM_T_ICH9,		WMP_F_1000T },
    893 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
    894 	  "82801I mobile (V) LAN Controller",
    895 	  WM_T_ICH9,		WMP_F_1000T },
    896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
    897 	  "82801I mobile (AMT) LAN Controller",
    898 	  WM_T_ICH9,		WMP_F_1000T },
    899 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
    900 	  "82567LM-4 LAN Controller",
    901 	  WM_T_ICH9,		WMP_F_1000T },
    902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
    903 	  "82567V-3 LAN Controller",
    904 	  WM_T_ICH9,		WMP_F_1000T },
    905 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
    906 	  "82567LM-2 LAN Controller",
    907 	  WM_T_ICH10,		WMP_F_1000T },
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
    909 	  "82567LF-2 LAN Controller",
    910 	  WM_T_ICH10,		WMP_F_1000T },
    911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
    912 	  "82567LM-3 LAN Controller",
    913 	  WM_T_ICH10,		WMP_F_1000T },
    914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
    915 	  "82567LF-3 LAN Controller",
    916 	  WM_T_ICH10,		WMP_F_1000T },
    917 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
    918 	  "82567V-2 LAN Controller",
    919 	  WM_T_ICH10,		WMP_F_1000T },
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
    921 	  "82567V-3? LAN Controller",
    922 	  WM_T_ICH10,		WMP_F_1000T },
    923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
    924 	  "HANKSVILLE LAN Controller",
    925 	  WM_T_ICH10,		WMP_F_1000T },
    926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
    927 	  "PCH LAN (82577LM) Controller",
    928 	  WM_T_PCH,		WMP_F_1000T },
    929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
    930 	  "PCH LAN (82577LC) Controller",
    931 	  WM_T_PCH,		WMP_F_1000T },
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
    933 	  "PCH LAN (82578DM) Controller",
    934 	  WM_T_PCH,		WMP_F_1000T },
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
    936 	  "PCH LAN (82578DC) Controller",
    937 	  WM_T_PCH,		WMP_F_1000T },
    938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
    939 	  "PCH2 LAN (82579LM) Controller",
    940 	  WM_T_PCH2,		WMP_F_1000T },
    941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
    942 	  "PCH2 LAN (82579V) Controller",
    943 	  WM_T_PCH2,		WMP_F_1000T },
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
    945 	  "82575EB dual-1000baseT Ethernet",
    946 	  WM_T_82575,		WMP_F_1000T },
    947 #if 0
    948 	/*
    949 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
    950 	 * disabled for now ...
    951 	 */
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
    953 	  "82575EB dual-1000baseX Ethernet (SERDES)",
    954 	  WM_T_82575,		WMP_F_SERDES },
    955 #endif
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
    957 	  "82575GB quad-1000baseT Ethernet",
    958 	  WM_T_82575,		WMP_F_1000T },
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
    960 	  "82575GB quad-1000baseT Ethernet (PM)",
    961 	  WM_T_82575,		WMP_F_1000T },
    962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
    963 	  "82576 1000BaseT Ethernet",
    964 	  WM_T_82576,		WMP_F_1000T },
    965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
    966 	  "82576 1000BaseX Ethernet",
    967 	  WM_T_82576,		WMP_F_1000X },
    968 #if 0
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
    970 	  "82576 gigabit Ethernet (SERDES)",
    971 	  WM_T_82576,		WMP_F_SERDES },
    972 #endif
    973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
    974 	  "82576 quad-1000BaseT Ethernet",
    975 	  WM_T_82576,		WMP_F_1000T },
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
    977 	  "82576 gigabit Ethernet",
    978 	  WM_T_82576,		WMP_F_1000T },
    979 #if 0
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
    981 	  "82576 gigabit Ethernet (SERDES)",
    982 	  WM_T_82576,		WMP_F_SERDES },
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
    984 	  "82576 quad-gigabit Ethernet (SERDES)",
    985 	  WM_T_82576,		WMP_F_SERDES },
    986 #endif
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
    988 	  "82580 1000BaseT Ethernet",
    989 	  WM_T_82580,		WMP_F_1000T },
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
    991 	  "82580 1000BaseX Ethernet",
    992 	  WM_T_82580,		WMP_F_1000X },
    993 #if 0
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
    995 	  "82580 1000BaseT Ethernet (SERDES)",
    996 	  WM_T_82580,		WMP_F_SERDES },
    997 #endif
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
    999 	  "82580 gigabit Ethernet (SGMII)",
   1000 	  WM_T_82580,		WMP_F_1000T },
   1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1002 	  "82580 dual-1000BaseT Ethernet",
   1003 	  WM_T_82580,		WMP_F_1000T },
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
   1005 	  "82580 1000BaseT Ethernet",
   1006 	  WM_T_82580ER,		WMP_F_1000T },
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
   1008 	  "82580 dual-1000BaseT Ethernet",
   1009 	  WM_T_82580ER,		WMP_F_1000T },
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1011 	  "82580 quad-1000BaseX Ethernet",
   1012 	  WM_T_82580,		WMP_F_1000X },
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1014 	  "I350 Gigabit Network Connection",
   1015 	  WM_T_I350,		WMP_F_1000T },
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1017 	  "I350 Gigabit Fiber Network Connection",
   1018 	  WM_T_I350,		WMP_F_1000X },
   1019 #if 0
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1021 	  "I350 Gigabit Backplane Connection",
   1022 	  WM_T_I350,		WMP_F_SERDES },
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1024 	  "I350 Gigabit Connection",
   1025 	  WM_T_I350,		WMP_F_1000T },
   1026 #endif
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1028 	  "I210-T1 Ethernet Server Adapter",
   1029 	  WM_T_I210,		WMP_F_1000T },
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1031 	  "I210 Ethernet (Copper OEM)",
   1032 	  WM_T_I210,		WMP_F_1000T },
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1034 	  "I210 Ethernet (Copper IT)",
   1035 	  WM_T_I210,		WMP_F_1000T },
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1037 	  "I210 Gigabit Ethernet (Fiber)",
   1038 	  WM_T_I210,		WMP_F_1000X },
   1039 #if 0
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1041 	  "I210 Gigabit Ethernet (SERDES)",
   1042 	  WM_T_I210,		WMP_F_SERDES },
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1044 	  "I210 Gigabit Ethernet (SGMII)",
   1045 	  WM_T_I210,		WMP_F_SERDES },
   1046 #endif
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1048 	  "I211 Ethernet (COPPER)",
   1049 	  WM_T_I211,		WMP_F_1000T },
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1051 	  "I217 V Ethernet Connection",
   1052 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1054 	  "I217 LM Ethernet Connection",
   1055 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1057 	  "I218 V Ethernet Connection",
   1058 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1060 	  "I218 LM Ethernet Connection",
   1061 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1062 	{ 0,			0,
   1063 	  NULL,
   1064 	  0,			0 },
   1065 };
   1066 
   1067 #ifdef WM_EVENT_COUNTERS
   1068 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1069 #endif /* WM_EVENT_COUNTERS */
   1070 
   1071 #if 0 /* Not currently used */
   1072 static inline uint32_t
   1073 wm_io_read(struct wm_softc *sc, int reg)
   1074 {
   1075 
   1076 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1077 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1078 }
   1079 #endif
   1080 
   1081 static inline void
   1082 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1083 {
   1084 
   1085 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1086 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1087 }
   1088 
   1089 static inline void
   1090 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1091     uint32_t data)
   1092 {
   1093 	uint32_t regval;
   1094 	int i;
   1095 
   1096 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1097 
   1098 	CSR_WRITE(sc, reg, regval);
   1099 
   1100 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1101 		delay(5);
   1102 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1103 			break;
   1104 	}
   1105 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1106 		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
   1107 		    device_xname(sc->sc_dev), reg);
   1108 	}
   1109 }
   1110 
   1111 static inline void
   1112 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1113 {
   1114 	wa->wa_low = htole32(v & 0xffffffffU);
   1115 	if (sizeof(bus_addr_t) == 8)
   1116 		wa->wa_high = htole32((uint64_t) v >> 32);
   1117 	else
   1118 		wa->wa_high = 0;
   1119 }
   1120 
   1121 static void
   1122 wm_set_spiaddrbits(struct wm_softc *sc)
   1123 {
   1124 	uint32_t reg;
   1125 
   1126 	sc->sc_flags |= WM_F_EEPROM_SPI;
   1127 	reg = CSR_READ(sc, WMREG_EECD);
   1128 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   1129 }
   1130 
   1131 static const struct wm_product *
   1132 wm_lookup(const struct pci_attach_args *pa)
   1133 {
   1134 	const struct wm_product *wmp;
   1135 
   1136 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1137 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1138 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1139 			return wmp;
   1140 	}
   1141 	return NULL;
   1142 }
   1143 
   1144 static int
   1145 wm_match(device_t parent, cfdata_t cf, void *aux)
   1146 {
   1147 	struct pci_attach_args *pa = aux;
   1148 
   1149 	if (wm_lookup(pa) != NULL)
   1150 		return 1;
   1151 
   1152 	return 0;
   1153 }
   1154 
   1155 static void
   1156 wm_attach(device_t parent, device_t self, void *aux)
   1157 {
   1158 	struct wm_softc *sc = device_private(self);
   1159 	struct pci_attach_args *pa = aux;
   1160 	prop_dictionary_t dict;
   1161 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1162 	pci_chipset_tag_t pc = pa->pa_pc;
   1163 	pci_intr_handle_t ih;
   1164 	const char *intrstr = NULL;
   1165 	const char *eetype, *xname;
   1166 	bus_space_tag_t memt;
   1167 	bus_space_handle_t memh;
   1168 	bus_size_t memsize;
   1169 	int memh_valid;
   1170 	int i, error;
   1171 	const struct wm_product *wmp;
   1172 	prop_data_t ea;
   1173 	prop_number_t pn;
   1174 	uint8_t enaddr[ETHER_ADDR_LEN];
   1175 	uint16_t cfg1, cfg2, swdpin, io3;
   1176 	pcireg_t preg, memtype;
   1177 	uint16_t eeprom_data, apme_mask;
   1178 	uint32_t reg;
   1179 
   1180 	sc->sc_dev = self;
   1181 	callout_init(&sc->sc_tick_ch, 0);
   1182 
   1183 	sc->sc_wmp = wmp = wm_lookup(pa);
   1184 	if (wmp == NULL) {
   1185 		printf("\n");
   1186 		panic("wm_attach: impossible");
   1187 	}
   1188 
   1189 	sc->sc_pc = pa->pa_pc;
   1190 	sc->sc_pcitag = pa->pa_tag;
   1191 
   1192 	if (pci_dma64_available(pa))
   1193 		sc->sc_dmat = pa->pa_dmat64;
   1194 	else
   1195 		sc->sc_dmat = pa->pa_dmat;
   1196 
   1197 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1198 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1199 
   1200 	sc->sc_type = wmp->wmp_type;
   1201 	if (sc->sc_type < WM_T_82543) {
   1202 		if (sc->sc_rev < 2) {
   1203 			aprint_error_dev(sc->sc_dev,
   1204 			    "i82542 must be at least rev. 2\n");
   1205 			return;
   1206 		}
   1207 		if (sc->sc_rev < 3)
   1208 			sc->sc_type = WM_T_82542_2_0;
   1209 	}
   1210 
   1211 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1212 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   1213 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   1214 	    || (sc->sc_type == WM_T_I211))
   1215 		sc->sc_flags |= WM_F_NEWQUEUE;
   1216 
   1217 	/* Set device properties (mactype) */
   1218 	dict = device_properties(sc->sc_dev);
   1219 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1220 
   1221 	/*
   1222 	 * Map the device.  All devices support memory-mapped acccess,
   1223 	 * and it is really required for normal operation.
   1224 	 */
   1225 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1226 	switch (memtype) {
   1227 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1228 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1229 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1230 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1231 		break;
   1232 	default:
   1233 		memh_valid = 0;
   1234 		break;
   1235 	}
   1236 
   1237 	if (memh_valid) {
   1238 		sc->sc_st = memt;
   1239 		sc->sc_sh = memh;
   1240 		sc->sc_ss = memsize;
   1241 	} else {
   1242 		aprint_error_dev(sc->sc_dev,
   1243 		    "unable to map device registers\n");
   1244 		return;
   1245 	}
   1246 
   1247 	/*
   1248 	 * In addition, i82544 and later support I/O mapped indirect
   1249 	 * register access.  It is not desirable (nor supported in
   1250 	 * this driver) to use it for normal operation, though it is
   1251 	 * required to work around bugs in some chip versions.
   1252 	 */
   1253 	if (sc->sc_type >= WM_T_82544) {
   1254 		/* First we have to find the I/O BAR. */
   1255 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1256 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1257 			if (memtype == PCI_MAPREG_TYPE_IO)
   1258 				break;
   1259 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1260 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1261 				i += 4;	/* skip high bits, too */
   1262 		}
   1263 		if (i < PCI_MAPREG_END) {
   1264 			/*
   1265 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1266 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1267 			 * It's no problem because newer chips has no this
   1268 			 * bug.
   1269 			 *
   1270 			 * The i8254x doesn't apparently respond when the
   1271 			 * I/O BAR is 0, which looks somewhat like it's not
   1272 			 * been configured.
   1273 			 */
   1274 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1275 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1276 				aprint_error_dev(sc->sc_dev,
   1277 				    "WARNING: I/O BAR at zero.\n");
   1278 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1279 					0, &sc->sc_iot, &sc->sc_ioh,
   1280 					NULL, &sc->sc_ios) == 0) {
   1281 				sc->sc_flags |= WM_F_IOH_VALID;
   1282 			} else {
   1283 				aprint_error_dev(sc->sc_dev,
   1284 				    "WARNING: unable to map I/O space\n");
   1285 			}
   1286 		}
   1287 
   1288 	}
   1289 
   1290 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1291 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1292 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1293 	if (sc->sc_type < WM_T_82542_2_1)
   1294 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1295 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1296 
   1297 	/* power up chip */
   1298 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1299 	    NULL)) && error != EOPNOTSUPP) {
   1300 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1301 		return;
   1302 	}
   1303 
   1304 	/*
   1305 	 * Map and establish our interrupt.
   1306 	 */
   1307 	if (pci_intr_map(pa, &ih)) {
   1308 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
   1309 		return;
   1310 	}
   1311 	intrstr = pci_intr_string(pc, ih);
   1312 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
   1313 	if (sc->sc_ih == NULL) {
   1314 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
   1315 		if (intrstr != NULL)
   1316 			aprint_error(" at %s", intrstr);
   1317 		aprint_error("\n");
   1318 		return;
   1319 	}
   1320 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   1321 
   1322 	/*
   1323 	 * Check the function ID (unit number of the chip).
   1324 	 */
   1325 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1326 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1327 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1328 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   1329 	    || (sc->sc_type == WM_T_I350))
   1330 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1331 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1332 	else
   1333 		sc->sc_funcid = 0;
   1334 
   1335 	/*
   1336 	 * Determine a few things about the bus we're connected to.
   1337 	 */
   1338 	if (sc->sc_type < WM_T_82543) {
   1339 		/* We don't really know the bus characteristics here. */
   1340 		sc->sc_bus_speed = 33;
   1341 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1342 		/*
   1343 		 * CSA (Communication Streaming Architecture) is about as fast
   1344 		 * a 32-bit 66MHz PCI Bus.
   1345 		 */
   1346 		sc->sc_flags |= WM_F_CSA;
   1347 		sc->sc_bus_speed = 66;
   1348 		aprint_verbose_dev(sc->sc_dev,
   1349 		    "Communication Streaming Architecture\n");
   1350 		if (sc->sc_type == WM_T_82547) {
   1351 			callout_init(&sc->sc_txfifo_ch, 0);
   1352 			callout_setfunc(&sc->sc_txfifo_ch,
   1353 					wm_82547_txfifo_stall, sc);
   1354 			aprint_verbose_dev(sc->sc_dev,
   1355 			    "using 82547 Tx FIFO stall work-around\n");
   1356 		}
   1357 	} else if (sc->sc_type >= WM_T_82571) {
   1358 		sc->sc_flags |= WM_F_PCIE;
   1359 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1360 		    && (sc->sc_type != WM_T_ICH10)
   1361 		    && (sc->sc_type != WM_T_PCH)
   1362 		    && (sc->sc_type != WM_T_PCH2)
   1363 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1364 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
   1365 			/* ICH* and PCH* have no PCIe capability registers */
   1366 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1367 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1368 				NULL) == 0)
   1369 				aprint_error_dev(sc->sc_dev,
   1370 				    "unable to find PCIe capability\n");
   1371 		}
   1372 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1373 	} else {
   1374 		reg = CSR_READ(sc, WMREG_STATUS);
   1375 		if (reg & STATUS_BUS64)
   1376 			sc->sc_flags |= WM_F_BUS64;
   1377 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1378 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1379 
   1380 			sc->sc_flags |= WM_F_PCIX;
   1381 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1382 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1383 				aprint_error_dev(sc->sc_dev,
   1384 				    "unable to find PCIX capability\n");
   1385 			else if (sc->sc_type != WM_T_82545_3 &&
   1386 				 sc->sc_type != WM_T_82546_3) {
   1387 				/*
   1388 				 * Work around a problem caused by the BIOS
   1389 				 * setting the max memory read byte count
   1390 				 * incorrectly.
   1391 				 */
   1392 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1393 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1394 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1395 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1396 
   1397 				bytecnt =
   1398 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1399 				    PCIX_CMD_BYTECNT_SHIFT;
   1400 				maxb =
   1401 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1402 				    PCIX_STATUS_MAXB_SHIFT;
   1403 				if (bytecnt > maxb) {
   1404 					aprint_verbose_dev(sc->sc_dev,
   1405 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1406 					    512 << bytecnt, 512 << maxb);
   1407 					pcix_cmd = (pcix_cmd &
   1408 					    ~PCIX_CMD_BYTECNT_MASK) |
   1409 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1410 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1411 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1412 					    pcix_cmd);
   1413 				}
   1414 			}
   1415 		}
   1416 		/*
   1417 		 * The quad port adapter is special; it has a PCIX-PCIX
   1418 		 * bridge on the board, and can run the secondary bus at
   1419 		 * a higher speed.
   1420 		 */
   1421 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1422 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1423 								      : 66;
   1424 		} else if (sc->sc_flags & WM_F_PCIX) {
   1425 			switch (reg & STATUS_PCIXSPD_MASK) {
   1426 			case STATUS_PCIXSPD_50_66:
   1427 				sc->sc_bus_speed = 66;
   1428 				break;
   1429 			case STATUS_PCIXSPD_66_100:
   1430 				sc->sc_bus_speed = 100;
   1431 				break;
   1432 			case STATUS_PCIXSPD_100_133:
   1433 				sc->sc_bus_speed = 133;
   1434 				break;
   1435 			default:
   1436 				aprint_error_dev(sc->sc_dev,
   1437 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1438 				    reg & STATUS_PCIXSPD_MASK);
   1439 				sc->sc_bus_speed = 66;
   1440 				break;
   1441 			}
   1442 		} else
   1443 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1444 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1445 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1446 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1447 	}
   1448 
   1449 	/*
   1450 	 * Allocate the control data structures, and create and load the
   1451 	 * DMA map for it.
   1452 	 *
   1453 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   1454 	 * memory.  So must Rx descriptors.  We simplify by allocating
   1455 	 * both sets within the same 4G segment.
   1456 	 */
   1457 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
   1458 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
   1459 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
   1460 	    sizeof(struct wm_control_data_82542) :
   1461 	    sizeof(struct wm_control_data_82544);
   1462 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
   1463 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
   1464 		    &sc->sc_cd_rseg, 0)) != 0) {
   1465 		aprint_error_dev(sc->sc_dev,
   1466 		    "unable to allocate control data, error = %d\n",
   1467 		    error);
   1468 		goto fail_0;
   1469 	}
   1470 
   1471 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
   1472 		    sc->sc_cd_rseg, sc->sc_cd_size,
   1473 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
   1474 		aprint_error_dev(sc->sc_dev,
   1475 		    "unable to map control data, error = %d\n", error);
   1476 		goto fail_1;
   1477 	}
   1478 
   1479 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
   1480 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
   1481 		aprint_error_dev(sc->sc_dev,
   1482 		    "unable to create control data DMA map, error = %d\n",
   1483 		    error);
   1484 		goto fail_2;
   1485 	}
   1486 
   1487 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
   1488 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
   1489 		aprint_error_dev(sc->sc_dev,
   1490 		    "unable to load control data DMA map, error = %d\n",
   1491 		    error);
   1492 		goto fail_3;
   1493 	}
   1494 
   1495 	/*
   1496 	 * Create the transmit buffer DMA maps.
   1497 	 */
   1498 	WM_TXQUEUELEN(sc) =
   1499 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   1500 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   1501 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   1502 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   1503 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   1504 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
   1505 			aprint_error_dev(sc->sc_dev,
   1506 			    "unable to create Tx DMA map %d, error = %d\n",
   1507 			    i, error);
   1508 			goto fail_4;
   1509 		}
   1510 	}
   1511 
   1512 	/*
   1513 	 * Create the receive buffer DMA maps.
   1514 	 */
   1515 	for (i = 0; i < WM_NRXDESC; i++) {
   1516 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   1517 			    MCLBYTES, 0, 0,
   1518 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
   1519 			aprint_error_dev(sc->sc_dev,
   1520 			    "unable to create Rx DMA map %d error = %d\n",
   1521 			    i, error);
   1522 			goto fail_5;
   1523 		}
   1524 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
   1525 	}
   1526 
   1527 	/* clear interesting stat counters */
   1528 	CSR_READ(sc, WMREG_COLC);
   1529 	CSR_READ(sc, WMREG_RXERRC);
   1530 
   1531 	/* get PHY control from SMBus to PCIe */
   1532 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1533 	    || (sc->sc_type == WM_T_PCH_LPT))
   1534 		wm_smbustopci(sc);
   1535 
   1536 	/*
   1537 	 * Reset the chip to a known state.
   1538 	 */
   1539 	wm_reset(sc);
   1540 
   1541 	/*
   1542 	 * Get some information about the EEPROM.
   1543 	 */
   1544 	switch (sc->sc_type) {
   1545 	case WM_T_82542_2_0:
   1546 	case WM_T_82542_2_1:
   1547 	case WM_T_82543:
   1548 	case WM_T_82544:
   1549 		/* Microwire */
   1550 		sc->sc_ee_addrbits = 6;
   1551 		break;
   1552 	case WM_T_82540:
   1553 	case WM_T_82545:
   1554 	case WM_T_82545_3:
   1555 	case WM_T_82546:
   1556 	case WM_T_82546_3:
   1557 		/* Microwire */
   1558 		reg = CSR_READ(sc, WMREG_EECD);
   1559 		if (reg & EECD_EE_SIZE)
   1560 			sc->sc_ee_addrbits = 8;
   1561 		else
   1562 			sc->sc_ee_addrbits = 6;
   1563 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
   1564 		break;
   1565 	case WM_T_82541:
   1566 	case WM_T_82541_2:
   1567 	case WM_T_82547:
   1568 	case WM_T_82547_2:
   1569 		reg = CSR_READ(sc, WMREG_EECD);
   1570 		if (reg & EECD_EE_TYPE) {
   1571 			/* SPI */
   1572 			wm_set_spiaddrbits(sc);
   1573 		} else
   1574 			/* Microwire */
   1575 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
   1576 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
   1577 		break;
   1578 	case WM_T_82571:
   1579 	case WM_T_82572:
   1580 		/* SPI */
   1581 		wm_set_spiaddrbits(sc);
   1582 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
   1583 		break;
   1584 	case WM_T_82573:
   1585 	case WM_T_82574:
   1586 	case WM_T_82583:
   1587 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
   1588 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1589 		else {
   1590 			/* SPI */
   1591 			wm_set_spiaddrbits(sc);
   1592 		}
   1593 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1594 		break;
   1595 	case WM_T_82575:
   1596 	case WM_T_82576:
   1597 	case WM_T_82580:
   1598 	case WM_T_82580ER:
   1599 	case WM_T_I350:
   1600 	case WM_T_80003:
   1601 		/* SPI */
   1602 		wm_set_spiaddrbits(sc);
   1603 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
   1604 		break;
   1605 	case WM_T_ICH8:
   1606 	case WM_T_ICH9:
   1607 	case WM_T_ICH10:
   1608 	case WM_T_PCH:
   1609 	case WM_T_PCH2:
   1610 	case WM_T_PCH_LPT:
   1611 		/* FLASH */
   1612 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
   1613 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1614 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1615 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
   1616 			aprint_error_dev(sc->sc_dev,
   1617 			    "can't map FLASH registers\n");
   1618 			return;
   1619 		}
   1620 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1621 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1622 						ICH_FLASH_SECTOR_SIZE;
   1623 		sc->sc_ich8_flash_bank_size =
   1624 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1625 		sc->sc_ich8_flash_bank_size -=
   1626 		    (reg & ICH_GFPREG_BASE_MASK);
   1627 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1628 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1629 		break;
   1630 	case WM_T_I210:
   1631 	case WM_T_I211:
   1632 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1633 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
   1634 		break;
   1635 	default:
   1636 		break;
   1637 	}
   1638 
   1639 	/*
   1640 	 * Defer printing the EEPROM type until after verifying the checksum
   1641 	 * This allows the EEPROM type to be printed correctly in the case
   1642 	 * that no EEPROM is attached.
   1643 	 */
   1644 	/*
   1645 	 * Validate the EEPROM checksum. If the checksum fails, flag
   1646 	 * this for later, so we can fail future reads from the EEPROM.
   1647 	 */
   1648 	if (wm_validate_eeprom_checksum(sc)) {
   1649 		/*
   1650 		 * Read twice again because some PCI-e parts fail the
   1651 		 * first check due to the link being in sleep state.
   1652 		 */
   1653 		if (wm_validate_eeprom_checksum(sc))
   1654 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   1655 	}
   1656 
   1657 	/* Set device properties (macflags) */
   1658 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   1659 
   1660 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1661 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
   1662 	else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
   1663 		aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
   1664 	} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   1665 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
   1666 	} else {
   1667 		if (sc->sc_flags & WM_F_EEPROM_SPI)
   1668 			eetype = "SPI";
   1669 		else
   1670 			eetype = "MicroWire";
   1671 		aprint_verbose_dev(sc->sc_dev,
   1672 		    "%u word (%d address bits) %s EEPROM\n",
   1673 		    1U << sc->sc_ee_addrbits,
   1674 		    sc->sc_ee_addrbits, eetype);
   1675 	}
   1676 
   1677 	switch (sc->sc_type) {
   1678 	case WM_T_82571:
   1679 	case WM_T_82572:
   1680 	case WM_T_82573:
   1681 	case WM_T_82574:
   1682 	case WM_T_82583:
   1683 	case WM_T_80003:
   1684 	case WM_T_ICH8:
   1685 	case WM_T_ICH9:
   1686 	case WM_T_ICH10:
   1687 	case WM_T_PCH:
   1688 	case WM_T_PCH2:
   1689 	case WM_T_PCH_LPT:
   1690 		if (wm_check_mng_mode(sc) != 0)
   1691 			wm_get_hw_control(sc);
   1692 		break;
   1693 	default:
   1694 		break;
   1695 	}
   1696 	wm_get_wakeup(sc);
   1697 	/*
   1698 	 * Read the Ethernet address from the EEPROM, if not first found
   1699 	 * in device properties.
   1700 	 */
   1701 	ea = prop_dictionary_get(dict, "mac-address");
   1702 	if (ea != NULL) {
   1703 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   1704 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   1705 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   1706 	} else {
   1707 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   1708 			aprint_error_dev(sc->sc_dev,
   1709 			    "unable to read Ethernet address\n");
   1710 			return;
   1711 		}
   1712 	}
   1713 
   1714 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   1715 	    ether_sprintf(enaddr));
   1716 
   1717 	/*
   1718 	 * Read the config info from the EEPROM, and set up various
   1719 	 * bits in the control registers based on their contents.
   1720 	 */
   1721 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   1722 	if (pn != NULL) {
   1723 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1724 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   1725 	} else {
   1726 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
   1727 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   1728 			return;
   1729 		}
   1730 	}
   1731 
   1732 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   1733 	if (pn != NULL) {
   1734 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1735 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   1736 	} else {
   1737 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
   1738 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   1739 			return;
   1740 		}
   1741 	}
   1742 
   1743 	/* check for WM_F_WOL */
   1744 	switch (sc->sc_type) {
   1745 	case WM_T_82542_2_0:
   1746 	case WM_T_82542_2_1:
   1747 	case WM_T_82543:
   1748 		/* dummy? */
   1749 		eeprom_data = 0;
   1750 		apme_mask = EEPROM_CFG3_APME;
   1751 		break;
   1752 	case WM_T_82544:
   1753 		apme_mask = EEPROM_CFG2_82544_APM_EN;
   1754 		eeprom_data = cfg2;
   1755 		break;
   1756 	case WM_T_82546:
   1757 	case WM_T_82546_3:
   1758 	case WM_T_82571:
   1759 	case WM_T_82572:
   1760 	case WM_T_82573:
   1761 	case WM_T_82574:
   1762 	case WM_T_82583:
   1763 	case WM_T_80003:
   1764 	default:
   1765 		apme_mask = EEPROM_CFG3_APME;
   1766 		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
   1767 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
   1768 		break;
   1769 	case WM_T_82575:
   1770 	case WM_T_82576:
   1771 	case WM_T_82580:
   1772 	case WM_T_82580ER:
   1773 	case WM_T_I350:
   1774 	case WM_T_ICH8:
   1775 	case WM_T_ICH9:
   1776 	case WM_T_ICH10:
   1777 	case WM_T_PCH:
   1778 	case WM_T_PCH2:
   1779 	case WM_T_PCH_LPT:
   1780 		/* XXX The funcid should be checked on some devices */
   1781 		apme_mask = WUC_APME;
   1782 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   1783 		break;
   1784 	}
   1785 
   1786 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   1787 	if ((eeprom_data & apme_mask) != 0)
   1788 		sc->sc_flags |= WM_F_WOL;
   1789 #ifdef WM_DEBUG
   1790 	if ((sc->sc_flags & WM_F_WOL) != 0)
   1791 		printf("WOL\n");
   1792 #endif
   1793 
   1794 	/*
   1795 	 * XXX need special handling for some multiple port cards
   1796 	 * to disable a paticular port.
   1797 	 */
   1798 
   1799 	if (sc->sc_type >= WM_T_82544) {
   1800 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   1801 		if (pn != NULL) {
   1802 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1803 			swdpin = (uint16_t) prop_number_integer_value(pn);
   1804 		} else {
   1805 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
   1806 				aprint_error_dev(sc->sc_dev,
   1807 				    "unable to read SWDPIN\n");
   1808 				return;
   1809 			}
   1810 		}
   1811 	}
   1812 
   1813 	if (cfg1 & EEPROM_CFG1_ILOS)
   1814 		sc->sc_ctrl |= CTRL_ILOS;
   1815 	if (sc->sc_type >= WM_T_82544) {
   1816 		sc->sc_ctrl |=
   1817 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   1818 		    CTRL_SWDPIO_SHIFT;
   1819 		sc->sc_ctrl |=
   1820 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   1821 		    CTRL_SWDPINS_SHIFT;
   1822 	} else {
   1823 		sc->sc_ctrl |=
   1824 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   1825 		    CTRL_SWDPIO_SHIFT;
   1826 	}
   1827 
   1828 #if 0
   1829 	if (sc->sc_type >= WM_T_82544) {
   1830 		if (cfg1 & EEPROM_CFG1_IPS0)
   1831 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   1832 		if (cfg1 & EEPROM_CFG1_IPS1)
   1833 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   1834 		sc->sc_ctrl_ext |=
   1835 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   1836 		    CTRL_EXT_SWDPIO_SHIFT;
   1837 		sc->sc_ctrl_ext |=
   1838 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   1839 		    CTRL_EXT_SWDPINS_SHIFT;
   1840 	} else {
   1841 		sc->sc_ctrl_ext |=
   1842 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   1843 		    CTRL_EXT_SWDPIO_SHIFT;
   1844 	}
   1845 #endif
   1846 
   1847 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   1848 #if 0
   1849 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   1850 #endif
   1851 
   1852 	/*
   1853 	 * Set up some register offsets that are different between
   1854 	 * the i82542 and the i82543 and later chips.
   1855 	 */
   1856 	if (sc->sc_type < WM_T_82543) {
   1857 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   1858 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   1859 	} else {
   1860 		sc->sc_rdt_reg = WMREG_RDT;
   1861 		sc->sc_tdt_reg = WMREG_TDT;
   1862 	}
   1863 
   1864 	if (sc->sc_type == WM_T_PCH) {
   1865 		uint16_t val;
   1866 
   1867 		/* Save the NVM K1 bit setting */
   1868 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
   1869 
   1870 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
   1871 			sc->sc_nvm_k1_enabled = 1;
   1872 		else
   1873 			sc->sc_nvm_k1_enabled = 0;
   1874 	}
   1875 
   1876 	/*
   1877 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   1878 	 * media structures accordingly.
   1879 	 */
   1880 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   1881 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   1882 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   1883 	    || sc->sc_type == WM_T_82573
   1884 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   1885 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   1886 		wm_gmii_mediainit(sc, wmp->wmp_product);
   1887 	} else if (sc->sc_type < WM_T_82543 ||
   1888 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   1889 		if (wmp->wmp_flags & WMP_F_1000T)
   1890 			aprint_error_dev(sc->sc_dev,
   1891 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   1892 		wm_tbi_mediainit(sc);
   1893 	} else {
   1894 		switch (sc->sc_type) {
   1895 		case WM_T_82575:
   1896 		case WM_T_82576:
   1897 		case WM_T_82580:
   1898 		case WM_T_82580ER:
   1899 		case WM_T_I350:
   1900 		case WM_T_I210:
   1901 		case WM_T_I211:
   1902 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   1903 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
   1904 			case CTRL_EXT_LINK_MODE_SGMII:
   1905 				aprint_verbose_dev(sc->sc_dev, "SGMII\n");
   1906 				sc->sc_flags |= WM_F_SGMII;
   1907 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   1908 				    reg | CTRL_EXT_I2C_ENA);
   1909 				wm_gmii_mediainit(sc, wmp->wmp_product);
   1910 				break;
   1911 			case CTRL_EXT_LINK_MODE_1000KX:
   1912 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   1913 				aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
   1914 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   1915 				    reg | CTRL_EXT_I2C_ENA);
   1916 				panic("not supported yet\n");
   1917 				break;
   1918 			case CTRL_EXT_LINK_MODE_GMII:
   1919 			default:
   1920 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   1921 				    reg & ~CTRL_EXT_I2C_ENA);
   1922 				wm_gmii_mediainit(sc, wmp->wmp_product);
   1923 				break;
   1924 			}
   1925 			break;
   1926 		default:
   1927 			if (wmp->wmp_flags & WMP_F_1000X)
   1928 				aprint_error_dev(sc->sc_dev,
   1929 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   1930 			wm_gmii_mediainit(sc, wmp->wmp_product);
   1931 		}
   1932 	}
   1933 
   1934 	ifp = &sc->sc_ethercom.ec_if;
   1935 	xname = device_xname(sc->sc_dev);
   1936 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   1937 	ifp->if_softc = sc;
   1938 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1939 	ifp->if_ioctl = wm_ioctl;
   1940 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   1941 		ifp->if_start = wm_nq_start;
   1942 	else
   1943 		ifp->if_start = wm_start;
   1944 	ifp->if_watchdog = wm_watchdog;
   1945 	ifp->if_init = wm_init;
   1946 	ifp->if_stop = wm_stop;
   1947 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   1948 	IFQ_SET_READY(&ifp->if_snd);
   1949 
   1950 	/* Check for jumbo frame */
   1951 	switch (sc->sc_type) {
   1952 	case WM_T_82573:
   1953 		/* XXX limited to 9234 if ASPM is disabled */
   1954 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
   1955 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
   1956 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1957 		break;
   1958 	case WM_T_82571:
   1959 	case WM_T_82572:
   1960 	case WM_T_82574:
   1961 	case WM_T_82575:
   1962 	case WM_T_82576:
   1963 	case WM_T_82580:
   1964 	case WM_T_82580ER:
   1965 	case WM_T_I350:
   1966 	case WM_T_I210:
   1967 	case WM_T_I211:
   1968 	case WM_T_80003:
   1969 	case WM_T_ICH9:
   1970 	case WM_T_ICH10:
   1971 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   1972 	case WM_T_PCH_LPT:
   1973 		/* XXX limited to 9234 */
   1974 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1975 		break;
   1976 	case WM_T_PCH:
   1977 		/* XXX limited to 4096 */
   1978 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1979 		break;
   1980 	case WM_T_82542_2_0:
   1981 	case WM_T_82542_2_1:
   1982 	case WM_T_82583:
   1983 	case WM_T_ICH8:
   1984 		/* No support for jumbo frame */
   1985 		break;
   1986 	default:
   1987 		/* ETHER_MAX_LEN_JUMBO */
   1988 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1989 		break;
   1990 	}
   1991 
   1992 	/*
   1993 	 * If we're a i82543 or greater, we can support VLANs.
   1994 	 */
   1995 	if (sc->sc_type >= WM_T_82543)
   1996 		sc->sc_ethercom.ec_capabilities |=
   1997 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   1998 
   1999 	/*
   2000 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2001 	 * on i82543 and later.
   2002 	 */
   2003 	if (sc->sc_type >= WM_T_82543) {
   2004 		ifp->if_capabilities |=
   2005 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2006 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2007 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2008 		    IFCAP_CSUM_TCPv6_Tx |
   2009 		    IFCAP_CSUM_UDPv6_Tx;
   2010 	}
   2011 
   2012 	/*
   2013 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2014 	 *
   2015 	 *	82541GI (8086:1076) ... no
   2016 	 *	82572EI (8086:10b9) ... yes
   2017 	 */
   2018 	if (sc->sc_type >= WM_T_82571) {
   2019 		ifp->if_capabilities |=
   2020 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2021 	}
   2022 
   2023 	/*
   2024 	 * If we're a i82544 or greater (except i82547), we can do
   2025 	 * TCP segmentation offload.
   2026 	 */
   2027 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2028 		ifp->if_capabilities |= IFCAP_TSOv4;
   2029 	}
   2030 
   2031 	if (sc->sc_type >= WM_T_82571) {
   2032 		ifp->if_capabilities |= IFCAP_TSOv6;
   2033 	}
   2034 
   2035 	/*
   2036 	 * Attach the interface.
   2037 	 */
   2038 	if_attach(ifp);
   2039 	ether_ifattach(ifp, enaddr);
   2040 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2041 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
   2042 
   2043 #ifdef WM_EVENT_COUNTERS
   2044 	/* Attach event counters. */
   2045 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2046 	    NULL, xname, "txsstall");
   2047 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2048 	    NULL, xname, "txdstall");
   2049 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2050 	    NULL, xname, "txfifo_stall");
   2051 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2052 	    NULL, xname, "txdw");
   2053 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2054 	    NULL, xname, "txqe");
   2055 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2056 	    NULL, xname, "rxintr");
   2057 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2058 	    NULL, xname, "linkintr");
   2059 
   2060 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2061 	    NULL, xname, "rxipsum");
   2062 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2063 	    NULL, xname, "rxtusum");
   2064 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2065 	    NULL, xname, "txipsum");
   2066 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2067 	    NULL, xname, "txtusum");
   2068 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2069 	    NULL, xname, "txtusum6");
   2070 
   2071 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2072 	    NULL, xname, "txtso");
   2073 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2074 	    NULL, xname, "txtso6");
   2075 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2076 	    NULL, xname, "txtsopain");
   2077 
   2078 	for (i = 0; i < WM_NTXSEGS; i++) {
   2079 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
   2080 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2081 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2082 	}
   2083 
   2084 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2085 	    NULL, xname, "txdrop");
   2086 
   2087 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2088 	    NULL, xname, "tu");
   2089 
   2090 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2091 	    NULL, xname, "tx_xoff");
   2092 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2093 	    NULL, xname, "tx_xon");
   2094 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2095 	    NULL, xname, "rx_xoff");
   2096 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2097 	    NULL, xname, "rx_xon");
   2098 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2099 	    NULL, xname, "rx_macctl");
   2100 #endif /* WM_EVENT_COUNTERS */
   2101 
   2102 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2103 		pmf_class_network_register(self, ifp);
   2104 	else
   2105 		aprint_error_dev(self, "couldn't establish power handler\n");
   2106 
   2107 	return;
   2108 
   2109 	/*
   2110 	 * Free any resources we've allocated during the failed attach
   2111 	 * attempt.  Do this in reverse order and fall through.
   2112 	 */
   2113  fail_5:
   2114 	for (i = 0; i < WM_NRXDESC; i++) {
   2115 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2116 			bus_dmamap_destroy(sc->sc_dmat,
   2117 			    sc->sc_rxsoft[i].rxs_dmamap);
   2118 	}
   2119  fail_4:
   2120 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2121 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2122 			bus_dmamap_destroy(sc->sc_dmat,
   2123 			    sc->sc_txsoft[i].txs_dmamap);
   2124 	}
   2125 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2126  fail_3:
   2127 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2128  fail_2:
   2129 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2130 	    sc->sc_cd_size);
   2131  fail_1:
   2132 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2133  fail_0:
   2134 	return;
   2135 }
   2136 
   2137 static int
   2138 wm_detach(device_t self, int flags __unused)
   2139 {
   2140 	struct wm_softc *sc = device_private(self);
   2141 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2142 	int i, s;
   2143 
   2144 	s = splnet();
   2145 	/* Stop the interface. Callouts are stopped in it. */
   2146 	wm_stop(ifp, 1);
   2147 	splx(s);
   2148 
   2149 	pmf_device_deregister(self);
   2150 
   2151 	/* Tell the firmware about the release */
   2152 	wm_release_manageability(sc);
   2153 	wm_release_hw_control(sc);
   2154 
   2155 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2156 
   2157 	/* Delete all remaining media. */
   2158 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2159 
   2160 	ether_ifdetach(ifp);
   2161 	if_detach(ifp);
   2162 
   2163 
   2164 	/* Unload RX dmamaps and free mbufs */
   2165 	wm_rxdrain(sc);
   2166 
   2167 	/* Free dmamap. It's the same as the end of the wm_attach() function */
   2168 	for (i = 0; i < WM_NRXDESC; i++) {
   2169 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2170 			bus_dmamap_destroy(sc->sc_dmat,
   2171 			    sc->sc_rxsoft[i].rxs_dmamap);
   2172 	}
   2173 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2174 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2175 			bus_dmamap_destroy(sc->sc_dmat,
   2176 			    sc->sc_txsoft[i].txs_dmamap);
   2177 	}
   2178 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2179 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2180 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2181 	    sc->sc_cd_size);
   2182 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2183 
   2184 	/* Disestablish the interrupt handler */
   2185 	if (sc->sc_ih != NULL) {
   2186 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   2187 		sc->sc_ih = NULL;
   2188 	}
   2189 
   2190 	/* Unmap the registers */
   2191 	if (sc->sc_ss) {
   2192 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2193 		sc->sc_ss = 0;
   2194 	}
   2195 
   2196 	if (sc->sc_ios) {
   2197 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2198 		sc->sc_ios = 0;
   2199 	}
   2200 
   2201 	return 0;
   2202 }
   2203 
   2204 /*
   2205  * wm_tx_offload:
   2206  *
   2207  *	Set up TCP/IP checksumming parameters for the
   2208  *	specified packet.
   2209  */
   2210 static int
   2211 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   2212     uint8_t *fieldsp)
   2213 {
   2214 	struct mbuf *m0 = txs->txs_mbuf;
   2215 	struct livengood_tcpip_ctxdesc *t;
   2216 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   2217 	uint32_t ipcse;
   2218 	struct ether_header *eh;
   2219 	int offset, iphl;
   2220 	uint8_t fields;
   2221 
   2222 	/*
   2223 	 * XXX It would be nice if the mbuf pkthdr had offset
   2224 	 * fields for the protocol headers.
   2225 	 */
   2226 
   2227 	eh = mtod(m0, struct ether_header *);
   2228 	switch (htons(eh->ether_type)) {
   2229 	case ETHERTYPE_IP:
   2230 	case ETHERTYPE_IPV6:
   2231 		offset = ETHER_HDR_LEN;
   2232 		break;
   2233 
   2234 	case ETHERTYPE_VLAN:
   2235 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2236 		break;
   2237 
   2238 	default:
   2239 		/*
   2240 		 * Don't support this protocol or encapsulation.
   2241 		 */
   2242 		*fieldsp = 0;
   2243 		*cmdp = 0;
   2244 		return 0;
   2245 	}
   2246 
   2247 	if ((m0->m_pkthdr.csum_flags &
   2248 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   2249 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   2250 	} else {
   2251 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   2252 	}
   2253 	ipcse = offset + iphl - 1;
   2254 
   2255 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   2256 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   2257 	seg = 0;
   2258 	fields = 0;
   2259 
   2260 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   2261 		int hlen = offset + iphl;
   2262 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   2263 
   2264 		if (__predict_false(m0->m_len <
   2265 				    (hlen + sizeof(struct tcphdr)))) {
   2266 			/*
   2267 			 * TCP/IP headers are not in the first mbuf; we need
   2268 			 * to do this the slow and painful way.  Let's just
   2269 			 * hope this doesn't happen very often.
   2270 			 */
   2271 			struct tcphdr th;
   2272 
   2273 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   2274 
   2275 			m_copydata(m0, hlen, sizeof(th), &th);
   2276 			if (v4) {
   2277 				struct ip ip;
   2278 
   2279 				m_copydata(m0, offset, sizeof(ip), &ip);
   2280 				ip.ip_len = 0;
   2281 				m_copyback(m0,
   2282 				    offset + offsetof(struct ip, ip_len),
   2283 				    sizeof(ip.ip_len), &ip.ip_len);
   2284 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   2285 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   2286 			} else {
   2287 				struct ip6_hdr ip6;
   2288 
   2289 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   2290 				ip6.ip6_plen = 0;
   2291 				m_copyback(m0,
   2292 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   2293 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   2294 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   2295 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   2296 			}
   2297 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   2298 			    sizeof(th.th_sum), &th.th_sum);
   2299 
   2300 			hlen += th.th_off << 2;
   2301 		} else {
   2302 			/*
   2303 			 * TCP/IP headers are in the first mbuf; we can do
   2304 			 * this the easy way.
   2305 			 */
   2306 			struct tcphdr *th;
   2307 
   2308 			if (v4) {
   2309 				struct ip *ip =
   2310 				    (void *)(mtod(m0, char *) + offset);
   2311 				th = (void *)(mtod(m0, char *) + hlen);
   2312 
   2313 				ip->ip_len = 0;
   2314 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   2315 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   2316 			} else {
   2317 				struct ip6_hdr *ip6 =
   2318 				    (void *)(mtod(m0, char *) + offset);
   2319 				th = (void *)(mtod(m0, char *) + hlen);
   2320 
   2321 				ip6->ip6_plen = 0;
   2322 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   2323 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   2324 			}
   2325 			hlen += th->th_off << 2;
   2326 		}
   2327 
   2328 		if (v4) {
   2329 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   2330 			cmdlen |= WTX_TCPIP_CMD_IP;
   2331 		} else {
   2332 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   2333 			ipcse = 0;
   2334 		}
   2335 		cmd |= WTX_TCPIP_CMD_TSE;
   2336 		cmdlen |= WTX_TCPIP_CMD_TSE |
   2337 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   2338 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   2339 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   2340 	}
   2341 
   2342 	/*
   2343 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   2344 	 * offload feature, if we load the context descriptor, we
   2345 	 * MUST provide valid values for IPCSS and TUCSS fields.
   2346 	 */
   2347 
   2348 	ipcs = WTX_TCPIP_IPCSS(offset) |
   2349 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   2350 	    WTX_TCPIP_IPCSE(ipcse);
   2351 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   2352 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   2353 		fields |= WTX_IXSM;
   2354 	}
   2355 
   2356 	offset += iphl;
   2357 
   2358 	if (m0->m_pkthdr.csum_flags &
   2359 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   2360 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   2361 		fields |= WTX_TXSM;
   2362 		tucs = WTX_TCPIP_TUCSS(offset) |
   2363 		    WTX_TCPIP_TUCSO(offset +
   2364 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   2365 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   2366 	} else if ((m0->m_pkthdr.csum_flags &
   2367 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   2368 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   2369 		fields |= WTX_TXSM;
   2370 		tucs = WTX_TCPIP_TUCSS(offset) |
   2371 		    WTX_TCPIP_TUCSO(offset +
   2372 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   2373 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   2374 	} else {
   2375 		/* Just initialize it to a valid TCP context. */
   2376 		tucs = WTX_TCPIP_TUCSS(offset) |
   2377 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   2378 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   2379 	}
   2380 
   2381 	/* Fill in the context descriptor. */
   2382 	t = (struct livengood_tcpip_ctxdesc *)
   2383 	    &sc->sc_txdescs[sc->sc_txnext];
   2384 	t->tcpip_ipcs = htole32(ipcs);
   2385 	t->tcpip_tucs = htole32(tucs);
   2386 	t->tcpip_cmdlen = htole32(cmdlen);
   2387 	t->tcpip_seg = htole32(seg);
   2388 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   2389 
   2390 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   2391 	txs->txs_ndesc++;
   2392 
   2393 	*cmdp = cmd;
   2394 	*fieldsp = fields;
   2395 
   2396 	return 0;
   2397 }
   2398 
   2399 static void
   2400 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   2401 {
   2402 	struct mbuf *m;
   2403 	int i;
   2404 
   2405 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   2406 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   2407 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   2408 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   2409 		    m->m_data, m->m_len, m->m_flags);
   2410 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   2411 	    i, i == 1 ? "" : "s");
   2412 }
   2413 
   2414 /*
   2415  * wm_82547_txfifo_stall:
   2416  *
   2417  *	Callout used to wait for the 82547 Tx FIFO to drain,
   2418  *	reset the FIFO pointers, and restart packet transmission.
   2419  */
   2420 static void
   2421 wm_82547_txfifo_stall(void *arg)
   2422 {
   2423 	struct wm_softc *sc = arg;
   2424 	int s;
   2425 
   2426 	s = splnet();
   2427 
   2428 	if (sc->sc_txfifo_stall) {
   2429 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
   2430 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   2431 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   2432 			/*
   2433 			 * Packets have drained.  Stop transmitter, reset
   2434 			 * FIFO pointers, restart transmitter, and kick
   2435 			 * the packet queue.
   2436 			 */
   2437 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   2438 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   2439 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
   2440 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
   2441 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
   2442 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
   2443 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   2444 			CSR_WRITE_FLUSH(sc);
   2445 
   2446 			sc->sc_txfifo_head = 0;
   2447 			sc->sc_txfifo_stall = 0;
   2448 			wm_start(&sc->sc_ethercom.ec_if);
   2449 		} else {
   2450 			/*
   2451 			 * Still waiting for packets to drain; try again in
   2452 			 * another tick.
   2453 			 */
   2454 			callout_schedule(&sc->sc_txfifo_ch, 1);
   2455 		}
   2456 	}
   2457 
   2458 	splx(s);
   2459 }
   2460 
   2461 static void
   2462 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   2463 {
   2464 	uint32_t reg;
   2465 
   2466 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   2467 
   2468 	if (on != 0)
   2469 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   2470 	else
   2471 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   2472 
   2473 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   2474 }
   2475 
   2476 /*
   2477  * wm_82547_txfifo_bugchk:
   2478  *
   2479  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   2480  *	prevent enqueueing a packet that would wrap around the end
   2481  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   2482  *
   2483  *	We do this by checking the amount of space before the end
   2484  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   2485  *	the Tx FIFO, wait for all remaining packets to drain, reset
   2486  *	the internal FIFO pointers to the beginning, and restart
   2487  *	transmission on the interface.
   2488  */
   2489 #define	WM_FIFO_HDR		0x10
   2490 #define	WM_82547_PAD_LEN	0x3e0
   2491 static int
   2492 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   2493 {
   2494 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
   2495 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   2496 
   2497 	/* Just return if already stalled. */
   2498 	if (sc->sc_txfifo_stall)
   2499 		return 1;
   2500 
   2501 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   2502 		/* Stall only occurs in half-duplex mode. */
   2503 		goto send_packet;
   2504 	}
   2505 
   2506 	if (len >= WM_82547_PAD_LEN + space) {
   2507 		sc->sc_txfifo_stall = 1;
   2508 		callout_schedule(&sc->sc_txfifo_ch, 1);
   2509 		return 1;
   2510 	}
   2511 
   2512  send_packet:
   2513 	sc->sc_txfifo_head += len;
   2514 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
   2515 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
   2516 
   2517 	return 0;
   2518 }
   2519 
   2520 /*
   2521  * wm_start:		[ifnet interface function]
   2522  *
   2523  *	Start packet transmission on the interface.
   2524  */
   2525 static void
   2526 wm_start(struct ifnet *ifp)
   2527 {
   2528 	struct wm_softc *sc = ifp->if_softc;
   2529 	struct mbuf *m0;
   2530 	struct m_tag *mtag;
   2531 	struct wm_txsoft *txs;
   2532 	bus_dmamap_t dmamap;
   2533 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   2534 	bus_addr_t curaddr;
   2535 	bus_size_t seglen, curlen;
   2536 	uint32_t cksumcmd;
   2537 	uint8_t cksumfields;
   2538 
   2539 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   2540 		return;
   2541 
   2542 	/*
   2543 	 * Remember the previous number of free descriptors.
   2544 	 */
   2545 	ofree = sc->sc_txfree;
   2546 
   2547 	/*
   2548 	 * Loop through the send queue, setting up transmit descriptors
   2549 	 * until we drain the queue, or use up all available transmit
   2550 	 * descriptors.
   2551 	 */
   2552 	for (;;) {
   2553 		/* Grab a packet off the queue. */
   2554 		IFQ_POLL(&ifp->if_snd, m0);
   2555 		if (m0 == NULL)
   2556 			break;
   2557 
   2558 		DPRINTF(WM_DEBUG_TX,
   2559 		    ("%s: TX: have packet to transmit: %p\n",
   2560 		    device_xname(sc->sc_dev), m0));
   2561 
   2562 		/* Get a work queue entry. */
   2563 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   2564 			wm_txintr(sc);
   2565 			if (sc->sc_txsfree == 0) {
   2566 				DPRINTF(WM_DEBUG_TX,
   2567 				    ("%s: TX: no free job descriptors\n",
   2568 					device_xname(sc->sc_dev)));
   2569 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   2570 				break;
   2571 			}
   2572 		}
   2573 
   2574 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   2575 		dmamap = txs->txs_dmamap;
   2576 
   2577 		use_tso = (m0->m_pkthdr.csum_flags &
   2578 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   2579 
   2580 		/*
   2581 		 * So says the Linux driver:
   2582 		 * The controller does a simple calculation to make sure
   2583 		 * there is enough room in the FIFO before initiating the
   2584 		 * DMA for each buffer.  The calc is:
   2585 		 *	4 = ceil(buffer len / MSS)
   2586 		 * To make sure we don't overrun the FIFO, adjust the max
   2587 		 * buffer len if the MSS drops.
   2588 		 */
   2589 		dmamap->dm_maxsegsz =
   2590 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   2591 		    ? m0->m_pkthdr.segsz << 2
   2592 		    : WTX_MAX_LEN;
   2593 
   2594 		/*
   2595 		 * Load the DMA map.  If this fails, the packet either
   2596 		 * didn't fit in the allotted number of segments, or we
   2597 		 * were short on resources.  For the too-many-segments
   2598 		 * case, we simply report an error and drop the packet,
   2599 		 * since we can't sanely copy a jumbo packet to a single
   2600 		 * buffer.
   2601 		 */
   2602 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   2603 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   2604 		if (error) {
   2605 			if (error == EFBIG) {
   2606 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   2607 				log(LOG_ERR, "%s: Tx packet consumes too many "
   2608 				    "DMA segments, dropping...\n",
   2609 				    device_xname(sc->sc_dev));
   2610 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   2611 				wm_dump_mbuf_chain(sc, m0);
   2612 				m_freem(m0);
   2613 				continue;
   2614 			}
   2615 			/*
   2616 			 * Short on resources, just stop for now.
   2617 			 */
   2618 			DPRINTF(WM_DEBUG_TX,
   2619 			    ("%s: TX: dmamap load failed: %d\n",
   2620 			    device_xname(sc->sc_dev), error));
   2621 			break;
   2622 		}
   2623 
   2624 		segs_needed = dmamap->dm_nsegs;
   2625 		if (use_tso) {
   2626 			/* For sentinel descriptor; see below. */
   2627 			segs_needed++;
   2628 		}
   2629 
   2630 		/*
   2631 		 * Ensure we have enough descriptors free to describe
   2632 		 * the packet.  Note, we always reserve one descriptor
   2633 		 * at the end of the ring due to the semantics of the
   2634 		 * TDT register, plus one more in the event we need
   2635 		 * to load offload context.
   2636 		 */
   2637 		if (segs_needed > sc->sc_txfree - 2) {
   2638 			/*
   2639 			 * Not enough free descriptors to transmit this
   2640 			 * packet.  We haven't committed anything yet,
   2641 			 * so just unload the DMA map, put the packet
   2642 			 * pack on the queue, and punt.  Notify the upper
   2643 			 * layer that there are no more slots left.
   2644 			 */
   2645 			DPRINTF(WM_DEBUG_TX,
   2646 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   2647 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   2648 			    segs_needed, sc->sc_txfree - 1));
   2649 			ifp->if_flags |= IFF_OACTIVE;
   2650 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   2651 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   2652 			break;
   2653 		}
   2654 
   2655 		/*
   2656 		 * Check for 82547 Tx FIFO bug.  We need to do this
   2657 		 * once we know we can transmit the packet, since we
   2658 		 * do some internal FIFO space accounting here.
   2659 		 */
   2660 		if (sc->sc_type == WM_T_82547 &&
   2661 		    wm_82547_txfifo_bugchk(sc, m0)) {
   2662 			DPRINTF(WM_DEBUG_TX,
   2663 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   2664 			    device_xname(sc->sc_dev)));
   2665 			ifp->if_flags |= IFF_OACTIVE;
   2666 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   2667 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   2668 			break;
   2669 		}
   2670 
   2671 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   2672 
   2673 		/*
   2674 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   2675 		 */
   2676 
   2677 		DPRINTF(WM_DEBUG_TX,
   2678 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   2679 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   2680 
   2681 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   2682 
   2683 		/*
   2684 		 * Store a pointer to the packet so that we can free it
   2685 		 * later.
   2686 		 *
   2687 		 * Initially, we consider the number of descriptors the
   2688 		 * packet uses the number of DMA segments.  This may be
   2689 		 * incremented by 1 if we do checksum offload (a descriptor
   2690 		 * is used to set the checksum context).
   2691 		 */
   2692 		txs->txs_mbuf = m0;
   2693 		txs->txs_firstdesc = sc->sc_txnext;
   2694 		txs->txs_ndesc = segs_needed;
   2695 
   2696 		/* Set up offload parameters for this packet. */
   2697 		if (m0->m_pkthdr.csum_flags &
   2698 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   2699 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   2700 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   2701 			if (wm_tx_offload(sc, txs, &cksumcmd,
   2702 					  &cksumfields) != 0) {
   2703 				/* Error message already displayed. */
   2704 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   2705 				continue;
   2706 			}
   2707 		} else {
   2708 			cksumcmd = 0;
   2709 			cksumfields = 0;
   2710 		}
   2711 
   2712 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   2713 
   2714 		/* Sync the DMA map. */
   2715 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   2716 		    BUS_DMASYNC_PREWRITE);
   2717 
   2718 		/*
   2719 		 * Initialize the transmit descriptor.
   2720 		 */
   2721 		for (nexttx = sc->sc_txnext, seg = 0;
   2722 		     seg < dmamap->dm_nsegs; seg++) {
   2723 			for (seglen = dmamap->dm_segs[seg].ds_len,
   2724 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   2725 			     seglen != 0;
   2726 			     curaddr += curlen, seglen -= curlen,
   2727 			     nexttx = WM_NEXTTX(sc, nexttx)) {
   2728 				curlen = seglen;
   2729 
   2730 				/*
   2731 				 * So says the Linux driver:
   2732 				 * Work around for premature descriptor
   2733 				 * write-backs in TSO mode.  Append a
   2734 				 * 4-byte sentinel descriptor.
   2735 				 */
   2736 				if (use_tso &&
   2737 				    seg == dmamap->dm_nsegs - 1 &&
   2738 				    curlen > 8)
   2739 					curlen -= 4;
   2740 
   2741 				wm_set_dma_addr(
   2742 				    &sc->sc_txdescs[nexttx].wtx_addr,
   2743 				    curaddr);
   2744 				sc->sc_txdescs[nexttx].wtx_cmdlen =
   2745 				    htole32(cksumcmd | curlen);
   2746 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
   2747 				    0;
   2748 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
   2749 				    cksumfields;
   2750 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   2751 				lasttx = nexttx;
   2752 
   2753 				DPRINTF(WM_DEBUG_TX,
   2754 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   2755 				     "len %#04zx\n",
   2756 				    device_xname(sc->sc_dev), nexttx,
   2757 				    (uint64_t)curaddr, curlen));
   2758 			}
   2759 		}
   2760 
   2761 		KASSERT(lasttx != -1);
   2762 
   2763 		/*
   2764 		 * Set up the command byte on the last descriptor of
   2765 		 * the packet.  If we're in the interrupt delay window,
   2766 		 * delay the interrupt.
   2767 		 */
   2768 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   2769 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   2770 
   2771 		/*
   2772 		 * If VLANs are enabled and the packet has a VLAN tag, set
   2773 		 * up the descriptor to encapsulate the packet for us.
   2774 		 *
   2775 		 * This is only valid on the last descriptor of the packet.
   2776 		 */
   2777 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   2778 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   2779 			    htole32(WTX_CMD_VLE);
   2780 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
   2781 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2782 		}
   2783 
   2784 		txs->txs_lastdesc = lasttx;
   2785 
   2786 		DPRINTF(WM_DEBUG_TX,
   2787 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   2788 		    device_xname(sc->sc_dev),
   2789 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   2790 
   2791 		/* Sync the descriptors we're using. */
   2792 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   2793 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   2794 
   2795 		/* Give the packet to the chip. */
   2796 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   2797 
   2798 		DPRINTF(WM_DEBUG_TX,
   2799 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   2800 
   2801 		DPRINTF(WM_DEBUG_TX,
   2802 		    ("%s: TX: finished transmitting packet, job %d\n",
   2803 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   2804 
   2805 		/* Advance the tx pointer. */
   2806 		sc->sc_txfree -= txs->txs_ndesc;
   2807 		sc->sc_txnext = nexttx;
   2808 
   2809 		sc->sc_txsfree--;
   2810 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   2811 
   2812 		/* Pass the packet to any BPF listeners. */
   2813 		bpf_mtap(ifp, m0);
   2814 	}
   2815 
   2816 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   2817 		/* No more slots; notify upper layer. */
   2818 		ifp->if_flags |= IFF_OACTIVE;
   2819 	}
   2820 
   2821 	if (sc->sc_txfree != ofree) {
   2822 		/* Set a watchdog timer in case the chip flakes out. */
   2823 		ifp->if_timer = 5;
   2824 	}
   2825 }
   2826 
   2827 /*
   2828  * wm_nq_tx_offload:
   2829  *
   2830  *	Set up TCP/IP checksumming parameters for the
   2831  *	specified packet, for NEWQUEUE devices
   2832  */
   2833 static int
   2834 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   2835     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   2836 {
   2837 	struct mbuf *m0 = txs->txs_mbuf;
   2838 	struct m_tag *mtag;
   2839 	uint32_t vl_len, mssidx, cmdc;
   2840 	struct ether_header *eh;
   2841 	int offset, iphl;
   2842 
   2843 	/*
   2844 	 * XXX It would be nice if the mbuf pkthdr had offset
   2845 	 * fields for the protocol headers.
   2846 	 */
   2847 	*cmdlenp = 0;
   2848 	*fieldsp = 0;
   2849 
   2850 	eh = mtod(m0, struct ether_header *);
   2851 	switch (htons(eh->ether_type)) {
   2852 	case ETHERTYPE_IP:
   2853 	case ETHERTYPE_IPV6:
   2854 		offset = ETHER_HDR_LEN;
   2855 		break;
   2856 
   2857 	case ETHERTYPE_VLAN:
   2858 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2859 		break;
   2860 
   2861 	default:
   2862 		/*
   2863 		 * Don't support this protocol or encapsulation.
   2864 		 */
   2865 		*do_csum = false;
   2866 		return 0;
   2867 	}
   2868 	*do_csum = true;
   2869 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   2870 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   2871 
   2872 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   2873 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   2874 
   2875 	if ((m0->m_pkthdr.csum_flags &
   2876 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   2877 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   2878 	} else {
   2879 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   2880 	}
   2881 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   2882 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   2883 
   2884 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   2885 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   2886 		     << NQTXC_VLLEN_VLAN_SHIFT);
   2887 		*cmdlenp |= NQTX_CMD_VLE;
   2888 	}
   2889 
   2890 	mssidx = 0;
   2891 
   2892 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   2893 		int hlen = offset + iphl;
   2894 		int tcp_hlen;
   2895 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   2896 
   2897 		if (__predict_false(m0->m_len <
   2898 				    (hlen + sizeof(struct tcphdr)))) {
   2899 			/*
   2900 			 * TCP/IP headers are not in the first mbuf; we need
   2901 			 * to do this the slow and painful way.  Let's just
   2902 			 * hope this doesn't happen very often.
   2903 			 */
   2904 			struct tcphdr th;
   2905 
   2906 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   2907 
   2908 			m_copydata(m0, hlen, sizeof(th), &th);
   2909 			if (v4) {
   2910 				struct ip ip;
   2911 
   2912 				m_copydata(m0, offset, sizeof(ip), &ip);
   2913 				ip.ip_len = 0;
   2914 				m_copyback(m0,
   2915 				    offset + offsetof(struct ip, ip_len),
   2916 				    sizeof(ip.ip_len), &ip.ip_len);
   2917 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   2918 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   2919 			} else {
   2920 				struct ip6_hdr ip6;
   2921 
   2922 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   2923 				ip6.ip6_plen = 0;
   2924 				m_copyback(m0,
   2925 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   2926 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   2927 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   2928 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   2929 			}
   2930 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   2931 			    sizeof(th.th_sum), &th.th_sum);
   2932 
   2933 			tcp_hlen = th.th_off << 2;
   2934 		} else {
   2935 			/*
   2936 			 * TCP/IP headers are in the first mbuf; we can do
   2937 			 * this the easy way.
   2938 			 */
   2939 			struct tcphdr *th;
   2940 
   2941 			if (v4) {
   2942 				struct ip *ip =
   2943 				    (void *)(mtod(m0, char *) + offset);
   2944 				th = (void *)(mtod(m0, char *) + hlen);
   2945 
   2946 				ip->ip_len = 0;
   2947 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   2948 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   2949 			} else {
   2950 				struct ip6_hdr *ip6 =
   2951 				    (void *)(mtod(m0, char *) + offset);
   2952 				th = (void *)(mtod(m0, char *) + hlen);
   2953 
   2954 				ip6->ip6_plen = 0;
   2955 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   2956 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   2957 			}
   2958 			tcp_hlen = th->th_off << 2;
   2959 		}
   2960 		hlen += tcp_hlen;
   2961 		*cmdlenp |= NQTX_CMD_TSE;
   2962 
   2963 		if (v4) {
   2964 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   2965 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   2966 		} else {
   2967 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   2968 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   2969 		}
   2970 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   2971 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   2972 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   2973 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   2974 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   2975 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   2976 	} else {
   2977 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   2978 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   2979 	}
   2980 
   2981 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   2982 		*fieldsp |= NQTXD_FIELDS_IXSM;
   2983 		cmdc |= NQTXC_CMD_IP4;
   2984 	}
   2985 
   2986 	if (m0->m_pkthdr.csum_flags &
   2987 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   2988 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   2989 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   2990 			cmdc |= NQTXC_CMD_TCP;
   2991 		} else {
   2992 			cmdc |= NQTXC_CMD_UDP;
   2993 		}
   2994 		cmdc |= NQTXC_CMD_IP4;
   2995 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   2996 	}
   2997 	if (m0->m_pkthdr.csum_flags &
   2998 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   2999 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   3000 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   3001 			cmdc |= NQTXC_CMD_TCP;
   3002 		} else {
   3003 			cmdc |= NQTXC_CMD_UDP;
   3004 		}
   3005 		cmdc |= NQTXC_CMD_IP6;
   3006 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   3007 	}
   3008 
   3009 	/* Fill in the context descriptor. */
   3010 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
   3011 	    htole32(vl_len);
   3012 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
   3013 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
   3014 	    htole32(cmdc);
   3015 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
   3016 	    htole32(mssidx);
   3017 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   3018 	DPRINTF(WM_DEBUG_TX,
   3019 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   3020 	    sc->sc_txnext, 0, vl_len));
   3021 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   3022 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   3023 	txs->txs_ndesc++;
   3024 	return 0;
   3025 }
   3026 
   3027 /*
   3028  * wm_nq_start:		[ifnet interface function]
   3029  *
   3030  *	Start packet transmission on the interface for NEWQUEUE devices
   3031  */
   3032 static void
   3033 wm_nq_start(struct ifnet *ifp)
   3034 {
   3035 	struct wm_softc *sc = ifp->if_softc;
   3036 	struct mbuf *m0;
   3037 	struct m_tag *mtag;
   3038 	struct wm_txsoft *txs;
   3039 	bus_dmamap_t dmamap;
   3040 	int error, nexttx, lasttx = -1, seg, segs_needed;
   3041 	bool do_csum, sent;
   3042 
   3043 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   3044 		return;
   3045 
   3046 	sent = false;
   3047 
   3048 	/*
   3049 	 * Loop through the send queue, setting up transmit descriptors
   3050 	 * until we drain the queue, or use up all available transmit
   3051 	 * descriptors.
   3052 	 */
   3053 	for (;;) {
   3054 		/* Grab a packet off the queue. */
   3055 		IFQ_POLL(&ifp->if_snd, m0);
   3056 		if (m0 == NULL)
   3057 			break;
   3058 
   3059 		DPRINTF(WM_DEBUG_TX,
   3060 		    ("%s: TX: have packet to transmit: %p\n",
   3061 		    device_xname(sc->sc_dev), m0));
   3062 
   3063 		/* Get a work queue entry. */
   3064 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   3065 			wm_txintr(sc);
   3066 			if (sc->sc_txsfree == 0) {
   3067 				DPRINTF(WM_DEBUG_TX,
   3068 				    ("%s: TX: no free job descriptors\n",
   3069 					device_xname(sc->sc_dev)));
   3070 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   3071 				break;
   3072 			}
   3073 		}
   3074 
   3075 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   3076 		dmamap = txs->txs_dmamap;
   3077 
   3078 		/*
   3079 		 * Load the DMA map.  If this fails, the packet either
   3080 		 * didn't fit in the allotted number of segments, or we
   3081 		 * were short on resources.  For the too-many-segments
   3082 		 * case, we simply report an error and drop the packet,
   3083 		 * since we can't sanely copy a jumbo packet to a single
   3084 		 * buffer.
   3085 		 */
   3086 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   3087 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   3088 		if (error) {
   3089 			if (error == EFBIG) {
   3090 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   3091 				log(LOG_ERR, "%s: Tx packet consumes too many "
   3092 				    "DMA segments, dropping...\n",
   3093 				    device_xname(sc->sc_dev));
   3094 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   3095 				wm_dump_mbuf_chain(sc, m0);
   3096 				m_freem(m0);
   3097 				continue;
   3098 			}
   3099 			/*
   3100 			 * Short on resources, just stop for now.
   3101 			 */
   3102 			DPRINTF(WM_DEBUG_TX,
   3103 			    ("%s: TX: dmamap load failed: %d\n",
   3104 			    device_xname(sc->sc_dev), error));
   3105 			break;
   3106 		}
   3107 
   3108 		segs_needed = dmamap->dm_nsegs;
   3109 
   3110 		/*
   3111 		 * Ensure we have enough descriptors free to describe
   3112 		 * the packet.  Note, we always reserve one descriptor
   3113 		 * at the end of the ring due to the semantics of the
   3114 		 * TDT register, plus one more in the event we need
   3115 		 * to load offload context.
   3116 		 */
   3117 		if (segs_needed > sc->sc_txfree - 2) {
   3118 			/*
   3119 			 * Not enough free descriptors to transmit this
   3120 			 * packet.  We haven't committed anything yet,
   3121 			 * so just unload the DMA map, put the packet
   3122 			 * pack on the queue, and punt.  Notify the upper
   3123 			 * layer that there are no more slots left.
   3124 			 */
   3125 			DPRINTF(WM_DEBUG_TX,
   3126 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   3127 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   3128 			    segs_needed, sc->sc_txfree - 1));
   3129 			ifp->if_flags |= IFF_OACTIVE;
   3130 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   3131 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   3132 			break;
   3133 		}
   3134 
   3135 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   3136 
   3137 		/*
   3138 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   3139 		 */
   3140 
   3141 		DPRINTF(WM_DEBUG_TX,
   3142 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   3143 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   3144 
   3145 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   3146 
   3147 		/*
   3148 		 * Store a pointer to the packet so that we can free it
   3149 		 * later.
   3150 		 *
   3151 		 * Initially, we consider the number of descriptors the
   3152 		 * packet uses the number of DMA segments.  This may be
   3153 		 * incremented by 1 if we do checksum offload (a descriptor
   3154 		 * is used to set the checksum context).
   3155 		 */
   3156 		txs->txs_mbuf = m0;
   3157 		txs->txs_firstdesc = sc->sc_txnext;
   3158 		txs->txs_ndesc = segs_needed;
   3159 
   3160 		/* Set up offload parameters for this packet. */
   3161 		uint32_t cmdlen, fields, dcmdlen;
   3162 		if (m0->m_pkthdr.csum_flags &
   3163 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   3164 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   3165 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   3166 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   3167 			    &do_csum) != 0) {
   3168 				/* Error message already displayed. */
   3169 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   3170 				continue;
   3171 			}
   3172 		} else {
   3173 			do_csum = false;
   3174 			cmdlen = 0;
   3175 			fields = 0;
   3176 		}
   3177 
   3178 		/* Sync the DMA map. */
   3179 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   3180 		    BUS_DMASYNC_PREWRITE);
   3181 
   3182 		/*
   3183 		 * Initialize the first transmit descriptor.
   3184 		 */
   3185 		nexttx = sc->sc_txnext;
   3186 		if (!do_csum) {
   3187 			/* setup a legacy descriptor */
   3188 			wm_set_dma_addr(
   3189 			    &sc->sc_txdescs[nexttx].wtx_addr,
   3190 			    dmamap->dm_segs[0].ds_addr);
   3191 			sc->sc_txdescs[nexttx].wtx_cmdlen =
   3192 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   3193 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
   3194 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
   3195 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   3196 			    NULL) {
   3197 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
   3198 				    htole32(WTX_CMD_VLE);
   3199 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
   3200 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   3201 			} else {
   3202 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   3203 			}
   3204 			dcmdlen = 0;
   3205 		} else {
   3206 			/* setup an advanced data descriptor */
   3207 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   3208 			    htole64(dmamap->dm_segs[0].ds_addr);
   3209 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   3210 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   3211 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   3212 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
   3213 			    htole32(fields);
   3214 			DPRINTF(WM_DEBUG_TX,
   3215 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   3216 			    device_xname(sc->sc_dev), nexttx,
   3217 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   3218 			DPRINTF(WM_DEBUG_TX,
   3219 			    ("\t 0x%08x%08x\n", fields,
   3220 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   3221 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   3222 		}
   3223 
   3224 		lasttx = nexttx;
   3225 		nexttx = WM_NEXTTX(sc, nexttx);
   3226 		/*
   3227 		 * fill in the next descriptors. legacy or adcanced format
   3228 		 * is the same here
   3229 		 */
   3230 		for (seg = 1; seg < dmamap->dm_nsegs;
   3231 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
   3232 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   3233 			    htole64(dmamap->dm_segs[seg].ds_addr);
   3234 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   3235 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   3236 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   3237 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
   3238 			lasttx = nexttx;
   3239 
   3240 			DPRINTF(WM_DEBUG_TX,
   3241 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   3242 			     "len %#04zx\n",
   3243 			    device_xname(sc->sc_dev), nexttx,
   3244 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   3245 			    dmamap->dm_segs[seg].ds_len));
   3246 		}
   3247 
   3248 		KASSERT(lasttx != -1);
   3249 
   3250 		/*
   3251 		 * Set up the command byte on the last descriptor of
   3252 		 * the packet.  If we're in the interrupt delay window,
   3253 		 * delay the interrupt.
   3254 		 */
   3255 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   3256 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   3257 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   3258 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   3259 
   3260 		txs->txs_lastdesc = lasttx;
   3261 
   3262 		DPRINTF(WM_DEBUG_TX,
   3263 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   3264 		    device_xname(sc->sc_dev),
   3265 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   3266 
   3267 		/* Sync the descriptors we're using. */
   3268 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   3269 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   3270 
   3271 		/* Give the packet to the chip. */
   3272 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   3273 		sent = true;
   3274 
   3275 		DPRINTF(WM_DEBUG_TX,
   3276 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   3277 
   3278 		DPRINTF(WM_DEBUG_TX,
   3279 		    ("%s: TX: finished transmitting packet, job %d\n",
   3280 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   3281 
   3282 		/* Advance the tx pointer. */
   3283 		sc->sc_txfree -= txs->txs_ndesc;
   3284 		sc->sc_txnext = nexttx;
   3285 
   3286 		sc->sc_txsfree--;
   3287 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   3288 
   3289 		/* Pass the packet to any BPF listeners. */
   3290 		bpf_mtap(ifp, m0);
   3291 	}
   3292 
   3293 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   3294 		/* No more slots; notify upper layer. */
   3295 		ifp->if_flags |= IFF_OACTIVE;
   3296 	}
   3297 
   3298 	if (sent) {
   3299 		/* Set a watchdog timer in case the chip flakes out. */
   3300 		ifp->if_timer = 5;
   3301 	}
   3302 }
   3303 
   3304 /*
   3305  * wm_watchdog:		[ifnet interface function]
   3306  *
   3307  *	Watchdog timer handler.
   3308  */
   3309 static void
   3310 wm_watchdog(struct ifnet *ifp)
   3311 {
   3312 	struct wm_softc *sc = ifp->if_softc;
   3313 
   3314 	/*
   3315 	 * Since we're using delayed interrupts, sweep up
   3316 	 * before we report an error.
   3317 	 */
   3318 	wm_txintr(sc);
   3319 
   3320 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
   3321 #ifdef WM_DEBUG
   3322 		int i, j;
   3323 		struct wm_txsoft *txs;
   3324 #endif
   3325 		log(LOG_ERR,
   3326 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3327 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
   3328 		    sc->sc_txnext);
   3329 		ifp->if_oerrors++;
   3330 #ifdef WM_DEBUG
   3331 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
   3332 		    i = WM_NEXTTXS(sc, i)) {
   3333 		    txs = &sc->sc_txsoft[i];
   3334 		    printf("txs %d tx %d -> %d\n",
   3335 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3336 		    for (j = txs->txs_firstdesc; ;
   3337 			j = WM_NEXTTX(sc, j)) {
   3338 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3339 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
   3340 			printf("\t %#08x%08x\n",
   3341 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
   3342 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
   3343 			if (j == txs->txs_lastdesc)
   3344 				break;
   3345 			}
   3346 		}
   3347 #endif
   3348 		/* Reset the interface. */
   3349 		(void) wm_init(ifp);
   3350 	}
   3351 
   3352 	/* Try to get more packets going. */
   3353 	ifp->if_start(ifp);
   3354 }
   3355 
   3356 static int
   3357 wm_ifflags_cb(struct ethercom *ec)
   3358 {
   3359 	struct ifnet *ifp = &ec->ec_if;
   3360 	struct wm_softc *sc = ifp->if_softc;
   3361 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3362 
   3363 	if (change != 0)
   3364 		sc->sc_if_flags = ifp->if_flags;
   3365 
   3366 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
   3367 		return ENETRESET;
   3368 
   3369 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3370 		wm_set_filter(sc);
   3371 
   3372 	wm_set_vlan(sc);
   3373 
   3374 	return 0;
   3375 }
   3376 
   3377 /*
   3378  * wm_ioctl:		[ifnet interface function]
   3379  *
   3380  *	Handle control requests from the operator.
   3381  */
   3382 static int
   3383 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3384 {
   3385 	struct wm_softc *sc = ifp->if_softc;
   3386 	struct ifreq *ifr = (struct ifreq *) data;
   3387 	struct ifaddr *ifa = (struct ifaddr *)data;
   3388 	struct sockaddr_dl *sdl;
   3389 	int s, error;
   3390 
   3391 	s = splnet();
   3392 
   3393 	switch (cmd) {
   3394 	case SIOCSIFMEDIA:
   3395 	case SIOCGIFMEDIA:
   3396 		/* Flow control requires full-duplex mode. */
   3397 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3398 		    (ifr->ifr_media & IFM_FDX) == 0)
   3399 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3400 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3401 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3402 				/* We can do both TXPAUSE and RXPAUSE. */
   3403 				ifr->ifr_media |=
   3404 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3405 			}
   3406 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3407 		}
   3408 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3409 		break;
   3410 	case SIOCINITIFADDR:
   3411 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3412 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3413 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3414 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3415 			/* unicast address is first multicast entry */
   3416 			wm_set_filter(sc);
   3417 			error = 0;
   3418 			break;
   3419 		}
   3420 		/*FALLTHROUGH*/
   3421 	default:
   3422 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
   3423 			break;
   3424 
   3425 		error = 0;
   3426 
   3427 		if (cmd == SIOCSIFCAP)
   3428 			error = (*ifp->if_init)(ifp);
   3429 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3430 			;
   3431 		else if (ifp->if_flags & IFF_RUNNING) {
   3432 			/*
   3433 			 * Multicast list has changed; set the hardware filter
   3434 			 * accordingly.
   3435 			 */
   3436 			wm_set_filter(sc);
   3437 		}
   3438 		break;
   3439 	}
   3440 
   3441 	/* Try to get more packets going. */
   3442 	ifp->if_start(ifp);
   3443 
   3444 	splx(s);
   3445 	return error;
   3446 }
   3447 
   3448 /*
   3449  * wm_intr:
   3450  *
   3451  *	Interrupt service routine.
   3452  */
   3453 static int
   3454 wm_intr(void *arg)
   3455 {
   3456 	struct wm_softc *sc = arg;
   3457 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3458 	uint32_t icr;
   3459 	int handled = 0;
   3460 
   3461 	while (1 /* CONSTCOND */) {
   3462 		icr = CSR_READ(sc, WMREG_ICR);
   3463 		if ((icr & sc->sc_icr) == 0)
   3464 			break;
   3465 		rnd_add_uint32(&sc->rnd_source, icr);
   3466 
   3467 		handled = 1;
   3468 
   3469 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   3470 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   3471 			DPRINTF(WM_DEBUG_RX,
   3472 			    ("%s: RX: got Rx intr 0x%08x\n",
   3473 			    device_xname(sc->sc_dev),
   3474 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   3475 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   3476 		}
   3477 #endif
   3478 		wm_rxintr(sc);
   3479 
   3480 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   3481 		if (icr & ICR_TXDW) {
   3482 			DPRINTF(WM_DEBUG_TX,
   3483 			    ("%s: TX: got TXDW interrupt\n",
   3484 			    device_xname(sc->sc_dev)));
   3485 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   3486 		}
   3487 #endif
   3488 		wm_txintr(sc);
   3489 
   3490 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
   3491 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   3492 			wm_linkintr(sc, icr);
   3493 		}
   3494 
   3495 		if (icr & ICR_RXO) {
   3496 #if defined(WM_DEBUG)
   3497 			log(LOG_WARNING, "%s: Receive overrun\n",
   3498 			    device_xname(sc->sc_dev));
   3499 #endif /* defined(WM_DEBUG) */
   3500 		}
   3501 	}
   3502 
   3503 	if (handled) {
   3504 		/* Try to get more packets going. */
   3505 		ifp->if_start(ifp);
   3506 	}
   3507 
   3508 	return handled;
   3509 }
   3510 
   3511 /*
   3512  * wm_txintr:
   3513  *
   3514  *	Helper; handle transmit interrupts.
   3515  */
   3516 static void
   3517 wm_txintr(struct wm_softc *sc)
   3518 {
   3519 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3520 	struct wm_txsoft *txs;
   3521 	uint8_t status;
   3522 	int i;
   3523 
   3524 	ifp->if_flags &= ~IFF_OACTIVE;
   3525 
   3526 	/*
   3527 	 * Go through the Tx list and free mbufs for those
   3528 	 * frames which have been transmitted.
   3529 	 */
   3530 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
   3531 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
   3532 		txs = &sc->sc_txsoft[i];
   3533 
   3534 		DPRINTF(WM_DEBUG_TX,
   3535 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   3536 
   3537 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
   3538 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   3539 
   3540 		status =
   3541 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   3542 		if ((status & WTX_ST_DD) == 0) {
   3543 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   3544 			    BUS_DMASYNC_PREREAD);
   3545 			break;
   3546 		}
   3547 
   3548 		DPRINTF(WM_DEBUG_TX,
   3549 		    ("%s: TX: job %d done: descs %d..%d\n",
   3550 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   3551 		    txs->txs_lastdesc));
   3552 
   3553 		/*
   3554 		 * XXX We should probably be using the statistics
   3555 		 * XXX registers, but I don't know if they exist
   3556 		 * XXX on chips before the i82544.
   3557 		 */
   3558 
   3559 #ifdef WM_EVENT_COUNTERS
   3560 		if (status & WTX_ST_TU)
   3561 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   3562 #endif /* WM_EVENT_COUNTERS */
   3563 
   3564 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   3565 			ifp->if_oerrors++;
   3566 			if (status & WTX_ST_LC)
   3567 				log(LOG_WARNING, "%s: late collision\n",
   3568 				    device_xname(sc->sc_dev));
   3569 			else if (status & WTX_ST_EC) {
   3570 				ifp->if_collisions += 16;
   3571 				log(LOG_WARNING, "%s: excessive collisions\n",
   3572 				    device_xname(sc->sc_dev));
   3573 			}
   3574 		} else
   3575 			ifp->if_opackets++;
   3576 
   3577 		sc->sc_txfree += txs->txs_ndesc;
   3578 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   3579 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   3580 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   3581 		m_freem(txs->txs_mbuf);
   3582 		txs->txs_mbuf = NULL;
   3583 	}
   3584 
   3585 	/* Update the dirty transmit buffer pointer. */
   3586 	sc->sc_txsdirty = i;
   3587 	DPRINTF(WM_DEBUG_TX,
   3588 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   3589 
   3590 	/*
   3591 	 * If there are no more pending transmissions, cancel the watchdog
   3592 	 * timer.
   3593 	 */
   3594 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
   3595 		ifp->if_timer = 0;
   3596 }
   3597 
   3598 /*
   3599  * wm_rxintr:
   3600  *
   3601  *	Helper; handle receive interrupts.
   3602  */
   3603 static void
   3604 wm_rxintr(struct wm_softc *sc)
   3605 {
   3606 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3607 	struct wm_rxsoft *rxs;
   3608 	struct mbuf *m;
   3609 	int i, len;
   3610 	uint8_t status, errors;
   3611 	uint16_t vlantag;
   3612 
   3613 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   3614 		rxs = &sc->sc_rxsoft[i];
   3615 
   3616 		DPRINTF(WM_DEBUG_RX,
   3617 		    ("%s: RX: checking descriptor %d\n",
   3618 		    device_xname(sc->sc_dev), i));
   3619 
   3620 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   3621 
   3622 		status = sc->sc_rxdescs[i].wrx_status;
   3623 		errors = sc->sc_rxdescs[i].wrx_errors;
   3624 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   3625 		vlantag = sc->sc_rxdescs[i].wrx_special;
   3626 
   3627 		if ((status & WRX_ST_DD) == 0) {
   3628 			/*
   3629 			 * We have processed all of the receive descriptors.
   3630 			 */
   3631 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   3632 			break;
   3633 		}
   3634 
   3635 		if (__predict_false(sc->sc_rxdiscard)) {
   3636 			DPRINTF(WM_DEBUG_RX,
   3637 			    ("%s: RX: discarding contents of descriptor %d\n",
   3638 			    device_xname(sc->sc_dev), i));
   3639 			WM_INIT_RXDESC(sc, i);
   3640 			if (status & WRX_ST_EOP) {
   3641 				/* Reset our state. */
   3642 				DPRINTF(WM_DEBUG_RX,
   3643 				    ("%s: RX: resetting rxdiscard -> 0\n",
   3644 				    device_xname(sc->sc_dev)));
   3645 				sc->sc_rxdiscard = 0;
   3646 			}
   3647 			continue;
   3648 		}
   3649 
   3650 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3651 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   3652 
   3653 		m = rxs->rxs_mbuf;
   3654 
   3655 		/*
   3656 		 * Add a new receive buffer to the ring, unless of
   3657 		 * course the length is zero. Treat the latter as a
   3658 		 * failed mapping.
   3659 		 */
   3660 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
   3661 			/*
   3662 			 * Failed, throw away what we've done so
   3663 			 * far, and discard the rest of the packet.
   3664 			 */
   3665 			ifp->if_ierrors++;
   3666 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3667 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3668 			WM_INIT_RXDESC(sc, i);
   3669 			if ((status & WRX_ST_EOP) == 0)
   3670 				sc->sc_rxdiscard = 1;
   3671 			if (sc->sc_rxhead != NULL)
   3672 				m_freem(sc->sc_rxhead);
   3673 			WM_RXCHAIN_RESET(sc);
   3674 			DPRINTF(WM_DEBUG_RX,
   3675 			    ("%s: RX: Rx buffer allocation failed, "
   3676 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   3677 			    sc->sc_rxdiscard ? " (discard)" : ""));
   3678 			continue;
   3679 		}
   3680 
   3681 		m->m_len = len;
   3682 		sc->sc_rxlen += len;
   3683 		DPRINTF(WM_DEBUG_RX,
   3684 		    ("%s: RX: buffer at %p len %d\n",
   3685 		    device_xname(sc->sc_dev), m->m_data, len));
   3686 
   3687 		/*
   3688 		 * If this is not the end of the packet, keep
   3689 		 * looking.
   3690 		 */
   3691 		if ((status & WRX_ST_EOP) == 0) {
   3692 			WM_RXCHAIN_LINK(sc, m);
   3693 			DPRINTF(WM_DEBUG_RX,
   3694 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   3695 			    device_xname(sc->sc_dev), sc->sc_rxlen));
   3696 			continue;
   3697 		}
   3698 
   3699 		/*
   3700 		 * Okay, we have the entire packet now.  The chip is
   3701 		 * configured to include the FCS except I350 and I21[01]
   3702 		 * (not all chips can be configured to strip it),
   3703 		 * so we need to trim it.
   3704 		 * May need to adjust length of previous mbuf in the
   3705 		 * chain if the current mbuf is too short.
   3706 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   3707 		 * is always set in I350, so we don't trim it.
   3708 		 */
   3709 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I210)
   3710 		    && (sc->sc_type != WM_T_I211)) {
   3711 			if (m->m_len < ETHER_CRC_LEN) {
   3712 				sc->sc_rxtail->m_len
   3713 				    -= (ETHER_CRC_LEN - m->m_len);
   3714 				m->m_len = 0;
   3715 			} else
   3716 				m->m_len -= ETHER_CRC_LEN;
   3717 			len = sc->sc_rxlen - ETHER_CRC_LEN;
   3718 		} else
   3719 			len = sc->sc_rxlen;
   3720 
   3721 		WM_RXCHAIN_LINK(sc, m);
   3722 
   3723 		*sc->sc_rxtailp = NULL;
   3724 		m = sc->sc_rxhead;
   3725 
   3726 		WM_RXCHAIN_RESET(sc);
   3727 
   3728 		DPRINTF(WM_DEBUG_RX,
   3729 		    ("%s: RX: have entire packet, len -> %d\n",
   3730 		    device_xname(sc->sc_dev), len));
   3731 
   3732 		/*
   3733 		 * If an error occurred, update stats and drop the packet.
   3734 		 */
   3735 		if (errors &
   3736 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   3737 			if (errors & WRX_ER_SE)
   3738 				log(LOG_WARNING, "%s: symbol error\n",
   3739 				    device_xname(sc->sc_dev));
   3740 			else if (errors & WRX_ER_SEQ)
   3741 				log(LOG_WARNING, "%s: receive sequence error\n",
   3742 				    device_xname(sc->sc_dev));
   3743 			else if (errors & WRX_ER_CE)
   3744 				log(LOG_WARNING, "%s: CRC error\n",
   3745 				    device_xname(sc->sc_dev));
   3746 			m_freem(m);
   3747 			continue;
   3748 		}
   3749 
   3750 		/*
   3751 		 * No errors.  Receive the packet.
   3752 		 */
   3753 		m->m_pkthdr.rcvif = ifp;
   3754 		m->m_pkthdr.len = len;
   3755 
   3756 		/*
   3757 		 * If VLANs are enabled, VLAN packets have been unwrapped
   3758 		 * for us.  Associate the tag with the packet.
   3759 		 */
   3760 		if ((status & WRX_ST_VP) != 0) {
   3761 			VLAN_INPUT_TAG(ifp, m,
   3762 			    le16toh(vlantag),
   3763 			    continue);
   3764 		}
   3765 
   3766 		/*
   3767 		 * Set up checksum info for this packet.
   3768 		 */
   3769 		if ((status & WRX_ST_IXSM) == 0) {
   3770 			if (status & WRX_ST_IPCS) {
   3771 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   3772 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   3773 				if (errors & WRX_ER_IPE)
   3774 					m->m_pkthdr.csum_flags |=
   3775 					    M_CSUM_IPv4_BAD;
   3776 			}
   3777 			if (status & WRX_ST_TCPCS) {
   3778 				/*
   3779 				 * Note: we don't know if this was TCP or UDP,
   3780 				 * so we just set both bits, and expect the
   3781 				 * upper layers to deal.
   3782 				 */
   3783 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   3784 				m->m_pkthdr.csum_flags |=
   3785 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   3786 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   3787 				if (errors & WRX_ER_TCPE)
   3788 					m->m_pkthdr.csum_flags |=
   3789 					    M_CSUM_TCP_UDP_BAD;
   3790 			}
   3791 		}
   3792 
   3793 		ifp->if_ipackets++;
   3794 
   3795 		/* Pass this up to any BPF listeners. */
   3796 		bpf_mtap(ifp, m);
   3797 
   3798 		/* Pass it on. */
   3799 		(*ifp->if_input)(ifp, m);
   3800 	}
   3801 
   3802 	/* Update the receive pointer. */
   3803 	sc->sc_rxptr = i;
   3804 
   3805 	DPRINTF(WM_DEBUG_RX,
   3806 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   3807 }
   3808 
   3809 /*
   3810  * wm_linkintr_gmii:
   3811  *
   3812  *	Helper; handle link interrupts for GMII.
   3813  */
   3814 static void
   3815 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   3816 {
   3817 
   3818 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   3819 		__func__));
   3820 
   3821 	if (icr & ICR_LSC) {
   3822 		DPRINTF(WM_DEBUG_LINK,
   3823 		    ("%s: LINK: LSC -> mii_pollstat\n",
   3824 			device_xname(sc->sc_dev)));
   3825 		mii_pollstat(&sc->sc_mii);
   3826 		if (sc->sc_type == WM_T_82543) {
   3827 			int miistatus, active;
   3828 
   3829 			/*
   3830 			 * With 82543, we need to force speed and
   3831 			 * duplex on the MAC equal to what the PHY
   3832 			 * speed and duplex configuration is.
   3833 			 */
   3834 			miistatus = sc->sc_mii.mii_media_status;
   3835 
   3836 			if (miistatus & IFM_ACTIVE) {
   3837 				active = sc->sc_mii.mii_media_active;
   3838 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   3839 				switch (IFM_SUBTYPE(active)) {
   3840 				case IFM_10_T:
   3841 					sc->sc_ctrl |= CTRL_SPEED_10;
   3842 					break;
   3843 				case IFM_100_TX:
   3844 					sc->sc_ctrl |= CTRL_SPEED_100;
   3845 					break;
   3846 				case IFM_1000_T:
   3847 					sc->sc_ctrl |= CTRL_SPEED_1000;
   3848 					break;
   3849 				default:
   3850 					/*
   3851 					 * fiber?
   3852 					 * Shoud not enter here.
   3853 					 */
   3854 					printf("unknown media (%x)\n",
   3855 					    active);
   3856 					break;
   3857 				}
   3858 				if (active & IFM_FDX)
   3859 					sc->sc_ctrl |= CTRL_FD;
   3860 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3861 			}
   3862 		} else if ((sc->sc_type == WM_T_ICH8)
   3863 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   3864 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   3865 		} else if (sc->sc_type == WM_T_PCH) {
   3866 			wm_k1_gig_workaround_hv(sc,
   3867 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   3868 		}
   3869 
   3870 		if ((sc->sc_phytype == WMPHY_82578)
   3871 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   3872 			== IFM_1000_T)) {
   3873 
   3874 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   3875 				delay(200*1000); /* XXX too big */
   3876 
   3877 				/* Link stall fix for link up */
   3878 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   3879 				    HV_MUX_DATA_CTRL,
   3880 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   3881 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   3882 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   3883 				    HV_MUX_DATA_CTRL,
   3884 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   3885 			}
   3886 		}
   3887 	} else if (icr & ICR_RXSEQ) {
   3888 		DPRINTF(WM_DEBUG_LINK,
   3889 		    ("%s: LINK Receive sequence error\n",
   3890 			device_xname(sc->sc_dev)));
   3891 	}
   3892 }
   3893 
   3894 /*
   3895  * wm_linkintr_tbi:
   3896  *
   3897  *	Helper; handle link interrupts for TBI mode.
   3898  */
   3899 static void
   3900 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   3901 {
   3902 	uint32_t status;
   3903 
   3904 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   3905 		__func__));
   3906 
   3907 	status = CSR_READ(sc, WMREG_STATUS);
   3908 	if (icr & ICR_LSC) {
   3909 		if (status & STATUS_LU) {
   3910 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   3911 			    device_xname(sc->sc_dev),
   3912 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   3913 			/*
   3914 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   3915 			 * so we should update sc->sc_ctrl
   3916 			 */
   3917 
   3918 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3919 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   3920 			sc->sc_fcrtl &= ~FCRTL_XONE;
   3921 			if (status & STATUS_FD)
   3922 				sc->sc_tctl |=
   3923 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3924 			else
   3925 				sc->sc_tctl |=
   3926 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   3927 			if (sc->sc_ctrl & CTRL_TFCE)
   3928 				sc->sc_fcrtl |= FCRTL_XONE;
   3929 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3930 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   3931 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   3932 				      sc->sc_fcrtl);
   3933 			sc->sc_tbi_linkup = 1;
   3934 		} else {
   3935 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   3936 			    device_xname(sc->sc_dev)));
   3937 			sc->sc_tbi_linkup = 0;
   3938 		}
   3939 		wm_tbi_set_linkled(sc);
   3940 	} else if (icr & ICR_RXCFG) {
   3941 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
   3942 		    device_xname(sc->sc_dev)));
   3943 		sc->sc_tbi_nrxcfg++;
   3944 		wm_check_for_link(sc);
   3945 	} else if (icr & ICR_RXSEQ) {
   3946 		DPRINTF(WM_DEBUG_LINK,
   3947 		    ("%s: LINK: Receive sequence error\n",
   3948 		    device_xname(sc->sc_dev)));
   3949 	}
   3950 }
   3951 
   3952 /*
   3953  * wm_linkintr:
   3954  *
   3955  *	Helper; handle link interrupts.
   3956  */
   3957 static void
   3958 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   3959 {
   3960 
   3961 	if (sc->sc_flags & WM_F_HAS_MII)
   3962 		wm_linkintr_gmii(sc, icr);
   3963 	else
   3964 		wm_linkintr_tbi(sc, icr);
   3965 }
   3966 
   3967 /*
   3968  * wm_tick:
   3969  *
   3970  *	One second timer, used to check link status, sweep up
   3971  *	completed transmit jobs, etc.
   3972  */
   3973 static void
   3974 wm_tick(void *arg)
   3975 {
   3976 	struct wm_softc *sc = arg;
   3977 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3978 	int s;
   3979 
   3980 	s = splnet();
   3981 
   3982 	if (sc->sc_type >= WM_T_82542_2_1) {
   3983 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3984 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3985 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3986 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3987 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3988 	}
   3989 
   3990 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3991 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   3992 	    + CSR_READ(sc, WMREG_CRCERRS)
   3993 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3994 	    + CSR_READ(sc, WMREG_SYMERRC)
   3995 	    + CSR_READ(sc, WMREG_RXERRC)
   3996 	    + CSR_READ(sc, WMREG_SEC)
   3997 	    + CSR_READ(sc, WMREG_CEXTERR)
   3998 	    + CSR_READ(sc, WMREG_RLEC);
   3999 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   4000 
   4001 	if (sc->sc_flags & WM_F_HAS_MII)
   4002 		mii_tick(&sc->sc_mii);
   4003 	else
   4004 		wm_tbi_check_link(sc);
   4005 
   4006 	splx(s);
   4007 
   4008 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4009 }
   4010 
   4011 /*
   4012  * wm_reset:
   4013  *
   4014  *	Reset the i82542 chip.
   4015  */
   4016 static void
   4017 wm_reset(struct wm_softc *sc)
   4018 {
   4019 	int phy_reset = 0;
   4020 	uint32_t reg, mask;
   4021 
   4022 	/*
   4023 	 * Allocate on-chip memory according to the MTU size.
   4024 	 * The Packet Buffer Allocation register must be written
   4025 	 * before the chip is reset.
   4026 	 */
   4027 	switch (sc->sc_type) {
   4028 	case WM_T_82547:
   4029 	case WM_T_82547_2:
   4030 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4031 		    PBA_22K : PBA_30K;
   4032 		sc->sc_txfifo_head = 0;
   4033 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4034 		sc->sc_txfifo_size =
   4035 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4036 		sc->sc_txfifo_stall = 0;
   4037 		break;
   4038 	case WM_T_82571:
   4039 	case WM_T_82572:
   4040 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4041 	case WM_T_I350:
   4042 	case WM_T_80003:
   4043 		sc->sc_pba = PBA_32K;
   4044 		break;
   4045 	case WM_T_82580:
   4046 	case WM_T_82580ER:
   4047 		sc->sc_pba = PBA_35K;
   4048 		break;
   4049 	case WM_T_I210:
   4050 	case WM_T_I211:
   4051 		sc->sc_pba = PBA_34K;
   4052 		break;
   4053 	case WM_T_82576:
   4054 		sc->sc_pba = PBA_64K;
   4055 		break;
   4056 	case WM_T_82573:
   4057 		sc->sc_pba = PBA_12K;
   4058 		break;
   4059 	case WM_T_82574:
   4060 	case WM_T_82583:
   4061 		sc->sc_pba = PBA_20K;
   4062 		break;
   4063 	case WM_T_ICH8:
   4064 		sc->sc_pba = PBA_8K;
   4065 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4066 		break;
   4067 	case WM_T_ICH9:
   4068 	case WM_T_ICH10:
   4069 		sc->sc_pba = PBA_10K;
   4070 		break;
   4071 	case WM_T_PCH:
   4072 	case WM_T_PCH2:
   4073 	case WM_T_PCH_LPT:
   4074 		sc->sc_pba = PBA_26K;
   4075 		break;
   4076 	default:
   4077 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4078 		    PBA_40K : PBA_48K;
   4079 		break;
   4080 	}
   4081 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4082 
   4083 	/* Prevent the PCI-E bus from sticking */
   4084 	if (sc->sc_flags & WM_F_PCIE) {
   4085 		int timeout = 800;
   4086 
   4087 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4088 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4089 
   4090 		while (timeout--) {
   4091 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4092 			    == 0)
   4093 				break;
   4094 			delay(100);
   4095 		}
   4096 	}
   4097 
   4098 	/* Set the completion timeout for interface */
   4099 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4100 	    || (sc->sc_type == WM_T_I350))
   4101 		wm_set_pcie_completion_timeout(sc);
   4102 
   4103 	/* Clear interrupt */
   4104 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4105 
   4106 	/* Stop the transmit and receive processes. */
   4107 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4108 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4109 	sc->sc_rctl &= ~RCTL_EN;
   4110 
   4111 	/* XXX set_tbi_sbp_82543() */
   4112 
   4113 	delay(10*1000);
   4114 
   4115 	/* Must acquire the MDIO ownership before MAC reset */
   4116 	switch (sc->sc_type) {
   4117 	case WM_T_82573:
   4118 	case WM_T_82574:
   4119 	case WM_T_82583:
   4120 		wm_get_hw_semaphore_82573(sc);
   4121 		break;
   4122 	default:
   4123 		break;
   4124 	}
   4125 
   4126 	/*
   4127 	 * 82541 Errata 29? & 82547 Errata 28?
   4128 	 * See also the description about PHY_RST bit in CTRL register
   4129 	 * in 8254x_GBe_SDM.pdf.
   4130 	 */
   4131 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4132 		CSR_WRITE(sc, WMREG_CTRL,
   4133 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4134 		delay(5000);
   4135 	}
   4136 
   4137 	switch (sc->sc_type) {
   4138 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4139 	case WM_T_82541:
   4140 	case WM_T_82541_2:
   4141 	case WM_T_82547:
   4142 	case WM_T_82547_2:
   4143 		/*
   4144 		 * On some chipsets, a reset through a memory-mapped write
   4145 		 * cycle can cause the chip to reset before completing the
   4146 		 * write cycle.  This causes major headache that can be
   4147 		 * avoided by issuing the reset via indirect register writes
   4148 		 * through I/O space.
   4149 		 *
   4150 		 * So, if we successfully mapped the I/O BAR at attach time,
   4151 		 * use that.  Otherwise, try our luck with a memory-mapped
   4152 		 * reset.
   4153 		 */
   4154 		if (sc->sc_flags & WM_F_IOH_VALID)
   4155 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4156 		else
   4157 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4158 		break;
   4159 	case WM_T_82545_3:
   4160 	case WM_T_82546_3:
   4161 		/* Use the shadow control register on these chips. */
   4162 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4163 		break;
   4164 	case WM_T_80003:
   4165 		mask = swfwphysem[sc->sc_funcid];
   4166 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4167 		wm_get_swfw_semaphore(sc, mask);
   4168 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4169 		wm_put_swfw_semaphore(sc, mask);
   4170 		break;
   4171 	case WM_T_ICH8:
   4172 	case WM_T_ICH9:
   4173 	case WM_T_ICH10:
   4174 	case WM_T_PCH:
   4175 	case WM_T_PCH2:
   4176 	case WM_T_PCH_LPT:
   4177 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4178 		if (wm_check_reset_block(sc) == 0) {
   4179 			/*
   4180 			 * Gate automatic PHY configuration by hardware on
   4181 			 * non-managed 82579
   4182 			 */
   4183 			if ((sc->sc_type == WM_T_PCH2)
   4184 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4185 				!= 0))
   4186 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   4187 
   4188 
   4189 			reg |= CTRL_PHY_RESET;
   4190 			phy_reset = 1;
   4191 		}
   4192 		wm_get_swfwhw_semaphore(sc);
   4193 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4194 		delay(20*1000);
   4195 		wm_put_swfwhw_semaphore(sc);
   4196 		break;
   4197 	case WM_T_82542_2_0:
   4198 	case WM_T_82542_2_1:
   4199 	case WM_T_82543:
   4200 	case WM_T_82540:
   4201 	case WM_T_82545:
   4202 	case WM_T_82546:
   4203 	case WM_T_82571:
   4204 	case WM_T_82572:
   4205 	case WM_T_82573:
   4206 	case WM_T_82574:
   4207 	case WM_T_82575:
   4208 	case WM_T_82576:
   4209 	case WM_T_82580:
   4210 	case WM_T_82580ER:
   4211 	case WM_T_82583:
   4212 	case WM_T_I350:
   4213 	case WM_T_I210:
   4214 	case WM_T_I211:
   4215 	default:
   4216 		/* Everything else can safely use the documented method. */
   4217 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4218 		break;
   4219 	}
   4220 
   4221 	/* Must release the MDIO ownership after MAC reset */
   4222 	switch (sc->sc_type) {
   4223 	case WM_T_82574:
   4224 	case WM_T_82583:
   4225 		wm_put_hw_semaphore_82573(sc);
   4226 		break;
   4227 	default:
   4228 		break;
   4229 	}
   4230 
   4231 	if (phy_reset != 0)
   4232 		wm_get_cfg_done(sc);
   4233 
   4234 	/* reload EEPROM */
   4235 	switch (sc->sc_type) {
   4236 	case WM_T_82542_2_0:
   4237 	case WM_T_82542_2_1:
   4238 	case WM_T_82543:
   4239 	case WM_T_82544:
   4240 		delay(10);
   4241 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4242 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4243 		delay(2000);
   4244 		break;
   4245 	case WM_T_82540:
   4246 	case WM_T_82545:
   4247 	case WM_T_82545_3:
   4248 	case WM_T_82546:
   4249 	case WM_T_82546_3:
   4250 		delay(5*1000);
   4251 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4252 		break;
   4253 	case WM_T_82541:
   4254 	case WM_T_82541_2:
   4255 	case WM_T_82547:
   4256 	case WM_T_82547_2:
   4257 		delay(20000);
   4258 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4259 		break;
   4260 	case WM_T_82571:
   4261 	case WM_T_82572:
   4262 	case WM_T_82573:
   4263 	case WM_T_82574:
   4264 	case WM_T_82583:
   4265 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4266 			delay(10);
   4267 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4268 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4269 		}
   4270 		/* check EECD_EE_AUTORD */
   4271 		wm_get_auto_rd_done(sc);
   4272 		/*
   4273 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4274 		 * is set.
   4275 		 */
   4276 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4277 		    || (sc->sc_type == WM_T_82583))
   4278 			delay(25*1000);
   4279 		break;
   4280 	case WM_T_82575:
   4281 	case WM_T_82576:
   4282 	case WM_T_82580:
   4283 	case WM_T_82580ER:
   4284 	case WM_T_I350:
   4285 	case WM_T_I210:
   4286 	case WM_T_I211:
   4287 	case WM_T_80003:
   4288 		/* check EECD_EE_AUTORD */
   4289 		wm_get_auto_rd_done(sc);
   4290 		break;
   4291 	case WM_T_ICH8:
   4292 	case WM_T_ICH9:
   4293 	case WM_T_ICH10:
   4294 	case WM_T_PCH:
   4295 	case WM_T_PCH2:
   4296 	case WM_T_PCH_LPT:
   4297 		break;
   4298 	default:
   4299 		panic("%s: unknown type\n", __func__);
   4300 	}
   4301 
   4302 	/* Check whether EEPROM is present or not */
   4303 	switch (sc->sc_type) {
   4304 	case WM_T_82575:
   4305 	case WM_T_82576:
   4306 #if 0 /* XXX */
   4307 	case WM_T_82580:
   4308 	case WM_T_82580ER:
   4309 #endif
   4310 	case WM_T_I350:
   4311 	case WM_T_ICH8:
   4312 	case WM_T_ICH9:
   4313 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4314 			/* Not found */
   4315 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4316 			if ((sc->sc_type == WM_T_82575)
   4317 			    || (sc->sc_type == WM_T_82576)
   4318 			    || (sc->sc_type == WM_T_82580)
   4319 			    || (sc->sc_type == WM_T_82580ER)
   4320 			    || (sc->sc_type == WM_T_I350))
   4321 				wm_reset_init_script_82575(sc);
   4322 		}
   4323 		break;
   4324 	default:
   4325 		break;
   4326 	}
   4327 
   4328 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   4329 	    || (sc->sc_type == WM_T_I350)) {
   4330 		/* clear global device reset status bit */
   4331 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4332 	}
   4333 
   4334 	/* Clear any pending interrupt events. */
   4335 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4336 	reg = CSR_READ(sc, WMREG_ICR);
   4337 
   4338 	/* reload sc_ctrl */
   4339 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4340 
   4341 	if (sc->sc_type == WM_T_I350)
   4342 		wm_set_eee_i350(sc);
   4343 
   4344 	/* dummy read from WUC */
   4345 	if (sc->sc_type == WM_T_PCH)
   4346 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4347 	/*
   4348 	 * For PCH, this write will make sure that any noise will be detected
   4349 	 * as a CRC error and be dropped rather than show up as a bad packet
   4350 	 * to the DMA engine
   4351 	 */
   4352 	if (sc->sc_type == WM_T_PCH)
   4353 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4354 
   4355 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4356 		CSR_WRITE(sc, WMREG_WUC, 0);
   4357 
   4358 	/* XXX need special handling for 82580 */
   4359 }
   4360 
   4361 static void
   4362 wm_set_vlan(struct wm_softc *sc)
   4363 {
   4364 	/* Deal with VLAN enables. */
   4365 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4366 		sc->sc_ctrl |= CTRL_VME;
   4367 	else
   4368 		sc->sc_ctrl &= ~CTRL_VME;
   4369 
   4370 	/* Write the control registers. */
   4371 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4372 }
   4373 
   4374 /*
   4375  * wm_init:		[ifnet interface function]
   4376  *
   4377  *	Initialize the interface.  Must be called at splnet().
   4378  */
   4379 static int
   4380 wm_init(struct ifnet *ifp)
   4381 {
   4382 	struct wm_softc *sc = ifp->if_softc;
   4383 	struct wm_rxsoft *rxs;
   4384 	int i, j, trynum, error = 0;
   4385 	uint32_t reg;
   4386 
   4387 	/*
   4388 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4389 	 * There is a small but measurable benefit to avoiding the adjusment
   4390 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4391 	 * on such platforms.  One possibility is that the DMA itself is
   4392 	 * slightly more efficient if the front of the entire packet (instead
   4393 	 * of the front of the headers) is aligned.
   4394 	 *
   4395 	 * Note we must always set align_tweak to 0 if we are using
   4396 	 * jumbo frames.
   4397 	 */
   4398 #ifdef __NO_STRICT_ALIGNMENT
   4399 	sc->sc_align_tweak = 0;
   4400 #else
   4401 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4402 		sc->sc_align_tweak = 0;
   4403 	else
   4404 		sc->sc_align_tweak = 2;
   4405 #endif /* __NO_STRICT_ALIGNMENT */
   4406 
   4407 	/* Cancel any pending I/O. */
   4408 	wm_stop(ifp, 0);
   4409 
   4410 	/* update statistics before reset */
   4411 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4412 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4413 
   4414 	/* Reset the chip to a known state. */
   4415 	wm_reset(sc);
   4416 
   4417 	switch (sc->sc_type) {
   4418 	case WM_T_82571:
   4419 	case WM_T_82572:
   4420 	case WM_T_82573:
   4421 	case WM_T_82574:
   4422 	case WM_T_82583:
   4423 	case WM_T_80003:
   4424 	case WM_T_ICH8:
   4425 	case WM_T_ICH9:
   4426 	case WM_T_ICH10:
   4427 	case WM_T_PCH:
   4428 	case WM_T_PCH2:
   4429 	case WM_T_PCH_LPT:
   4430 		if (wm_check_mng_mode(sc) != 0)
   4431 			wm_get_hw_control(sc);
   4432 		break;
   4433 	default:
   4434 		break;
   4435 	}
   4436 
   4437 	/* Reset the PHY. */
   4438 	if (sc->sc_flags & WM_F_HAS_MII)
   4439 		wm_gmii_reset(sc);
   4440 
   4441 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4442 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
   4443 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   4444 	    || (sc->sc_type == WM_T_PCH_LPT))
   4445 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
   4446 
   4447 	/* Initialize the transmit descriptor ring. */
   4448 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
   4449 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
   4450 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   4451 	sc->sc_txfree = WM_NTXDESC(sc);
   4452 	sc->sc_txnext = 0;
   4453 
   4454 	if (sc->sc_type < WM_T_82543) {
   4455 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
   4456 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
   4457 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
   4458 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   4459 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   4460 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   4461 	} else {
   4462 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
   4463 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
   4464 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
   4465 		CSR_WRITE(sc, WMREG_TDH, 0);
   4466 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
   4467 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
   4468 
   4469 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4470 			/*
   4471 			 * Don't write TDT before TCTL.EN is set.
   4472 			 * See the document.
   4473 			 */
   4474 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
   4475 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   4476 			    | TXDCTL_WTHRESH(0));
   4477 		else {
   4478 			CSR_WRITE(sc, WMREG_TDT, 0);
   4479 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   4480 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   4481 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   4482 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   4483 		}
   4484 	}
   4485 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   4486 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   4487 
   4488 	/* Initialize the transmit job descriptors. */
   4489 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
   4490 		sc->sc_txsoft[i].txs_mbuf = NULL;
   4491 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
   4492 	sc->sc_txsnext = 0;
   4493 	sc->sc_txsdirty = 0;
   4494 
   4495 	/*
   4496 	 * Initialize the receive descriptor and receive job
   4497 	 * descriptor rings.
   4498 	 */
   4499 	if (sc->sc_type < WM_T_82543) {
   4500 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
   4501 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
   4502 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   4503 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   4504 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   4505 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   4506 
   4507 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   4508 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   4509 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   4510 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   4511 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   4512 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   4513 	} else {
   4514 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
   4515 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
   4516 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   4517 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4518 			CSR_WRITE(sc, WMREG_EITR(0), 450);
   4519 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   4520 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   4521 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
   4522 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   4523 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
   4524 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   4525 			    | RXDCTL_WTHRESH(1));
   4526 		} else {
   4527 			CSR_WRITE(sc, WMREG_RDH, 0);
   4528 			CSR_WRITE(sc, WMREG_RDT, 0);
   4529 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
   4530 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
   4531 		}
   4532 	}
   4533 	for (i = 0; i < WM_NRXDESC; i++) {
   4534 		rxs = &sc->sc_rxsoft[i];
   4535 		if (rxs->rxs_mbuf == NULL) {
   4536 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   4537 				log(LOG_ERR, "%s: unable to allocate or map "
   4538 				    "rx buffer %d, error = %d\n",
   4539 				    device_xname(sc->sc_dev), i, error);
   4540 				/*
   4541 				 * XXX Should attempt to run with fewer receive
   4542 				 * XXX buffers instead of just failing.
   4543 				 */
   4544 				wm_rxdrain(sc);
   4545 				goto out;
   4546 			}
   4547 		} else {
   4548 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4549 				WM_INIT_RXDESC(sc, i);
   4550 			/*
   4551 			 * For 82575 and newer device, the RX descriptors
   4552 			 * must be initialized after the setting of RCTL.EN in
   4553 			 * wm_set_filter()
   4554 			 */
   4555 		}
   4556 	}
   4557 	sc->sc_rxptr = 0;
   4558 	sc->sc_rxdiscard = 0;
   4559 	WM_RXCHAIN_RESET(sc);
   4560 
   4561 	/*
   4562 	 * Clear out the VLAN table -- we don't use it (yet).
   4563 	 */
   4564 	CSR_WRITE(sc, WMREG_VET, 0);
   4565 	if (sc->sc_type == WM_T_I350)
   4566 		trynum = 10; /* Due to hw errata */
   4567 	else
   4568 		trynum = 1;
   4569 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4570 		for (j = 0; j < trynum; j++)
   4571 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4572 
   4573 	/*
   4574 	 * Set up flow-control parameters.
   4575 	 *
   4576 	 * XXX Values could probably stand some tuning.
   4577 	 */
   4578 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4579 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4580 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   4581 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4582 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4583 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4584 	}
   4585 
   4586 	sc->sc_fcrtl = FCRTL_DFLT;
   4587 	if (sc->sc_type < WM_T_82543) {
   4588 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4589 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4590 	} else {
   4591 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4592 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4593 	}
   4594 
   4595 	if (sc->sc_type == WM_T_80003)
   4596 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4597 	else
   4598 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4599 
   4600 	/* Writes the control register. */
   4601 	wm_set_vlan(sc);
   4602 
   4603 	if (sc->sc_flags & WM_F_HAS_MII) {
   4604 		int val;
   4605 
   4606 		switch (sc->sc_type) {
   4607 		case WM_T_80003:
   4608 		case WM_T_ICH8:
   4609 		case WM_T_ICH9:
   4610 		case WM_T_ICH10:
   4611 		case WM_T_PCH:
   4612 		case WM_T_PCH2:
   4613 		case WM_T_PCH_LPT:
   4614 			/*
   4615 			 * Set the mac to wait the maximum time between each
   4616 			 * iteration and increase the max iterations when
   4617 			 * polling the phy; this fixes erroneous timeouts at
   4618 			 * 10Mbps.
   4619 			 */
   4620 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4621 			    0xFFFF);
   4622 			val = wm_kmrn_readreg(sc,
   4623 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   4624 			val |= 0x3F;
   4625 			wm_kmrn_writereg(sc,
   4626 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4627 			break;
   4628 		default:
   4629 			break;
   4630 		}
   4631 
   4632 		if (sc->sc_type == WM_T_80003) {
   4633 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4634 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4635 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4636 
   4637 			/* Bypass RX and TX FIFO's */
   4638 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4639 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4640 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4641 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4642 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4643 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4644 		}
   4645 	}
   4646 #if 0
   4647 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4648 #endif
   4649 
   4650 	/*
   4651 	 * Set up checksum offload parameters.
   4652 	 */
   4653 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4654 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4655 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4656 		reg |= RXCSUM_IPOFL;
   4657 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4658 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4659 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4660 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4661 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4662 
   4663 	/* Reset TBI's RXCFG count */
   4664 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
   4665 
   4666 	/*
   4667 	 * Set up the interrupt registers.
   4668 	 */
   4669 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4670 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4671 	    ICR_RXO | ICR_RXT0;
   4672 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
   4673 		sc->sc_icr |= ICR_RXCFG;
   4674 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4675 
   4676 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4677 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4678 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   4679 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4680 		reg |= KABGTXD_BGSQLBIAS;
   4681 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4682 	}
   4683 
   4684 	/* Set up the inter-packet gap. */
   4685 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4686 
   4687 	if (sc->sc_type >= WM_T_82543) {
   4688 		/*
   4689 		 * Set up the interrupt throttling register (units of 256ns)
   4690 		 * Note that a footnote in Intel's documentation says this
   4691 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4692 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4693 		 * that that is also true for the 1024ns units of the other
   4694 		 * interrupt-related timer registers -- so, really, we ought
   4695 		 * to divide this value by 4 when the link speed is low.
   4696 		 *
   4697 		 * XXX implement this division at link speed change!
   4698 		 */
   4699 
   4700 		 /*
   4701 		  * For N interrupts/sec, set this value to:
   4702 		  * 1000000000 / (N * 256).  Note that we set the
   4703 		  * absolute and packet timer values to this value
   4704 		  * divided by 4 to get "simple timer" behavior.
   4705 		  */
   4706 
   4707 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4708 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4709 	}
   4710 
   4711 	/* Set the VLAN ethernetype. */
   4712 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4713 
   4714 	/*
   4715 	 * Set up the transmit control register; we start out with
   4716 	 * a collision distance suitable for FDX, but update it whe
   4717 	 * we resolve the media type.
   4718 	 */
   4719 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4720 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4721 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4722 	if (sc->sc_type >= WM_T_82571)
   4723 		sc->sc_tctl |= TCTL_MULR;
   4724 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4725 
   4726 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4727 		/*
   4728 		 * Write TDT after TCTL.EN is set.
   4729 		 * See the document.
   4730 		 */
   4731 		CSR_WRITE(sc, WMREG_TDT, 0);
   4732 	}
   4733 
   4734 	if (sc->sc_type == WM_T_80003) {
   4735 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4736 		reg &= ~TCTL_EXT_GCEX_MASK;
   4737 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4738 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4739 	}
   4740 
   4741 	/* Set the media. */
   4742 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4743 		goto out;
   4744 
   4745 	/* Configure for OS presence */
   4746 	wm_init_manageability(sc);
   4747 
   4748 	/*
   4749 	 * Set up the receive control register; we actually program
   4750 	 * the register when we set the receive filter.  Use multicast
   4751 	 * address offset type 0.
   4752 	 *
   4753 	 * Only the i82544 has the ability to strip the incoming
   4754 	 * CRC, so we don't enable that feature.
   4755 	 */
   4756 	sc->sc_mchash_type = 0;
   4757 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4758 	    | RCTL_MO(sc->sc_mchash_type);
   4759 
   4760 	/*
   4761 	 * The I350 has a bug where it always strips the CRC whether
   4762 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4763 	 */
   4764 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210))
   4765 		sc->sc_rctl |= RCTL_SECRC;
   4766 
   4767 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4768 	    && (ifp->if_mtu > ETHERMTU)) {
   4769 		sc->sc_rctl |= RCTL_LPE;
   4770 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4771 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4772 	}
   4773 
   4774 	if (MCLBYTES == 2048) {
   4775 		sc->sc_rctl |= RCTL_2k;
   4776 	} else {
   4777 		if (sc->sc_type >= WM_T_82543) {
   4778 			switch (MCLBYTES) {
   4779 			case 4096:
   4780 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4781 				break;
   4782 			case 8192:
   4783 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4784 				break;
   4785 			case 16384:
   4786 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4787 				break;
   4788 			default:
   4789 				panic("wm_init: MCLBYTES %d unsupported",
   4790 				    MCLBYTES);
   4791 				break;
   4792 			}
   4793 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4794 	}
   4795 
   4796 	/* Set the receive filter. */
   4797 	wm_set_filter(sc);
   4798 
   4799 	/* Enable ECC */
   4800 	switch (sc->sc_type) {
   4801 	case WM_T_82571:
   4802 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4803 		reg |= PBA_ECC_CORR_EN;
   4804 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4805 		break;
   4806 	case WM_T_PCH_LPT:
   4807 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4808 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4809 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4810 
   4811 		reg = CSR_READ(sc, WMREG_CTRL);
   4812 		reg |= CTRL_MEHE;
   4813 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4814 		break;
   4815 	default:
   4816 		break;
   4817 	}
   4818 
   4819 	/* On 575 and later set RDT only if RX enabled */
   4820 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4821 		for (i = 0; i < WM_NRXDESC; i++)
   4822 			WM_INIT_RXDESC(sc, i);
   4823 
   4824 	/* Start the one second link check clock. */
   4825 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4826 
   4827 	/* ...all done! */
   4828 	ifp->if_flags |= IFF_RUNNING;
   4829 	ifp->if_flags &= ~IFF_OACTIVE;
   4830 
   4831  out:
   4832 	sc->sc_if_flags = ifp->if_flags;
   4833 	if (error)
   4834 		log(LOG_ERR, "%s: interface not running\n",
   4835 		    device_xname(sc->sc_dev));
   4836 	return error;
   4837 }
   4838 
   4839 /*
   4840  * wm_rxdrain:
   4841  *
   4842  *	Drain the receive queue.
   4843  */
   4844 static void
   4845 wm_rxdrain(struct wm_softc *sc)
   4846 {
   4847 	struct wm_rxsoft *rxs;
   4848 	int i;
   4849 
   4850 	for (i = 0; i < WM_NRXDESC; i++) {
   4851 		rxs = &sc->sc_rxsoft[i];
   4852 		if (rxs->rxs_mbuf != NULL) {
   4853 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4854 			m_freem(rxs->rxs_mbuf);
   4855 			rxs->rxs_mbuf = NULL;
   4856 		}
   4857 	}
   4858 }
   4859 
   4860 /*
   4861  * wm_stop:		[ifnet interface function]
   4862  *
   4863  *	Stop transmission on the interface.
   4864  */
   4865 static void
   4866 wm_stop(struct ifnet *ifp, int disable)
   4867 {
   4868 	struct wm_softc *sc = ifp->if_softc;
   4869 	struct wm_txsoft *txs;
   4870 	int i;
   4871 
   4872 	/* Stop the one second clock. */
   4873 	callout_stop(&sc->sc_tick_ch);
   4874 
   4875 	/* Stop the 82547 Tx FIFO stall check timer. */
   4876 	if (sc->sc_type == WM_T_82547)
   4877 		callout_stop(&sc->sc_txfifo_ch);
   4878 
   4879 	if (sc->sc_flags & WM_F_HAS_MII) {
   4880 		/* Down the MII. */
   4881 		mii_down(&sc->sc_mii);
   4882 	} else {
   4883 #if 0
   4884 		/* Should we clear PHY's status properly? */
   4885 		wm_reset(sc);
   4886 #endif
   4887 	}
   4888 
   4889 	/* Stop the transmit and receive processes. */
   4890 	CSR_WRITE(sc, WMREG_TCTL, 0);
   4891 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4892 	sc->sc_rctl &= ~RCTL_EN;
   4893 
   4894 	/*
   4895 	 * Clear the interrupt mask to ensure the device cannot assert its
   4896 	 * interrupt line.
   4897 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
   4898 	 * any currently pending or shared interrupt.
   4899 	 */
   4900 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4901 	sc->sc_icr = 0;
   4902 
   4903 	/* Release any queued transmit buffers. */
   4904 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   4905 		txs = &sc->sc_txsoft[i];
   4906 		if (txs->txs_mbuf != NULL) {
   4907 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   4908 			m_freem(txs->txs_mbuf);
   4909 			txs->txs_mbuf = NULL;
   4910 		}
   4911 	}
   4912 
   4913 	/* Mark the interface as down and cancel the watchdog timer. */
   4914 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   4915 	ifp->if_timer = 0;
   4916 
   4917 	if (disable)
   4918 		wm_rxdrain(sc);
   4919 
   4920 #if 0 /* notyet */
   4921 	if (sc->sc_type >= WM_T_82544)
   4922 		CSR_WRITE(sc, WMREG_WUC, 0);
   4923 #endif
   4924 }
   4925 
   4926 void
   4927 wm_get_auto_rd_done(struct wm_softc *sc)
   4928 {
   4929 	int i;
   4930 
   4931 	/* wait for eeprom to reload */
   4932 	switch (sc->sc_type) {
   4933 	case WM_T_82571:
   4934 	case WM_T_82572:
   4935 	case WM_T_82573:
   4936 	case WM_T_82574:
   4937 	case WM_T_82583:
   4938 	case WM_T_82575:
   4939 	case WM_T_82576:
   4940 	case WM_T_82580:
   4941 	case WM_T_82580ER:
   4942 	case WM_T_I350:
   4943 	case WM_T_I210:
   4944 	case WM_T_I211:
   4945 	case WM_T_80003:
   4946 	case WM_T_ICH8:
   4947 	case WM_T_ICH9:
   4948 		for (i = 0; i < 10; i++) {
   4949 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4950 				break;
   4951 			delay(1000);
   4952 		}
   4953 		if (i == 10) {
   4954 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4955 			    "complete\n", device_xname(sc->sc_dev));
   4956 		}
   4957 		break;
   4958 	default:
   4959 		break;
   4960 	}
   4961 }
   4962 
   4963 void
   4964 wm_lan_init_done(struct wm_softc *sc)
   4965 {
   4966 	uint32_t reg = 0;
   4967 	int i;
   4968 
   4969 	/* wait for eeprom to reload */
   4970 	switch (sc->sc_type) {
   4971 	case WM_T_ICH10:
   4972 	case WM_T_PCH:
   4973 	case WM_T_PCH2:
   4974 	case WM_T_PCH_LPT:
   4975 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4976 			reg = CSR_READ(sc, WMREG_STATUS);
   4977 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4978 				break;
   4979 			delay(100);
   4980 		}
   4981 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4982 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4983 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4984 		}
   4985 		break;
   4986 	default:
   4987 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4988 		    __func__);
   4989 		break;
   4990 	}
   4991 
   4992 	reg &= ~STATUS_LAN_INIT_DONE;
   4993 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4994 }
   4995 
   4996 void
   4997 wm_get_cfg_done(struct wm_softc *sc)
   4998 {
   4999 	int mask;
   5000 	uint32_t reg;
   5001 	int i;
   5002 
   5003 	/* wait for eeprom to reload */
   5004 	switch (sc->sc_type) {
   5005 	case WM_T_82542_2_0:
   5006 	case WM_T_82542_2_1:
   5007 		/* null */
   5008 		break;
   5009 	case WM_T_82543:
   5010 	case WM_T_82544:
   5011 	case WM_T_82540:
   5012 	case WM_T_82545:
   5013 	case WM_T_82545_3:
   5014 	case WM_T_82546:
   5015 	case WM_T_82546_3:
   5016 	case WM_T_82541:
   5017 	case WM_T_82541_2:
   5018 	case WM_T_82547:
   5019 	case WM_T_82547_2:
   5020 	case WM_T_82573:
   5021 	case WM_T_82574:
   5022 	case WM_T_82583:
   5023 		/* generic */
   5024 		delay(10*1000);
   5025 		break;
   5026 	case WM_T_80003:
   5027 	case WM_T_82571:
   5028 	case WM_T_82572:
   5029 	case WM_T_82575:
   5030 	case WM_T_82576:
   5031 	case WM_T_82580:
   5032 	case WM_T_82580ER:
   5033 	case WM_T_I350:
   5034 	case WM_T_I210:
   5035 	case WM_T_I211:
   5036 		if (sc->sc_type == WM_T_82571) {
   5037 			/* Only 82571 shares port 0 */
   5038 			mask = EEMNGCTL_CFGDONE_0;
   5039 		} else
   5040 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   5041 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   5042 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   5043 				break;
   5044 			delay(1000);
   5045 		}
   5046 		if (i >= WM_PHY_CFG_TIMEOUT) {
   5047 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   5048 				device_xname(sc->sc_dev), __func__));
   5049 		}
   5050 		break;
   5051 	case WM_T_ICH8:
   5052 	case WM_T_ICH9:
   5053 	case WM_T_ICH10:
   5054 	case WM_T_PCH:
   5055 	case WM_T_PCH2:
   5056 	case WM_T_PCH_LPT:
   5057 		delay(10*1000);
   5058 		if (sc->sc_type >= WM_T_ICH10)
   5059 			wm_lan_init_done(sc);
   5060 		else
   5061 			wm_get_auto_rd_done(sc);
   5062 
   5063 		reg = CSR_READ(sc, WMREG_STATUS);
   5064 		if ((reg & STATUS_PHYRA) != 0)
   5065 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   5066 		break;
   5067 	default:
   5068 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   5069 		    __func__);
   5070 		break;
   5071 	}
   5072 }
   5073 
   5074 /*
   5075  * wm_acquire_eeprom:
   5076  *
   5077  *	Perform the EEPROM handshake required on some chips.
   5078  */
   5079 static int
   5080 wm_acquire_eeprom(struct wm_softc *sc)
   5081 {
   5082 	uint32_t reg;
   5083 	int x;
   5084 	int ret = 0;
   5085 
   5086 	/* always success */
   5087 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   5088 		return 0;
   5089 
   5090 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
   5091 		ret = wm_get_swfwhw_semaphore(sc);
   5092 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
   5093 		/* this will also do wm_get_swsm_semaphore() if needed */
   5094 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   5095 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
   5096 		ret = wm_get_swsm_semaphore(sc);
   5097 	}
   5098 
   5099 	if (ret) {
   5100 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   5101 			__func__);
   5102 		return 1;
   5103 	}
   5104 
   5105 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
   5106 		reg = CSR_READ(sc, WMREG_EECD);
   5107 
   5108 		/* Request EEPROM access. */
   5109 		reg |= EECD_EE_REQ;
   5110 		CSR_WRITE(sc, WMREG_EECD, reg);
   5111 
   5112 		/* ..and wait for it to be granted. */
   5113 		for (x = 0; x < 1000; x++) {
   5114 			reg = CSR_READ(sc, WMREG_EECD);
   5115 			if (reg & EECD_EE_GNT)
   5116 				break;
   5117 			delay(5);
   5118 		}
   5119 		if ((reg & EECD_EE_GNT) == 0) {
   5120 			aprint_error_dev(sc->sc_dev,
   5121 			    "could not acquire EEPROM GNT\n");
   5122 			reg &= ~EECD_EE_REQ;
   5123 			CSR_WRITE(sc, WMREG_EECD, reg);
   5124 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
   5125 				wm_put_swfwhw_semaphore(sc);
   5126 			if (sc->sc_flags & WM_F_SWFW_SYNC)
   5127 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   5128 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   5129 				wm_put_swsm_semaphore(sc);
   5130 			return 1;
   5131 		}
   5132 	}
   5133 
   5134 	return 0;
   5135 }
   5136 
   5137 /*
   5138  * wm_release_eeprom:
   5139  *
   5140  *	Release the EEPROM mutex.
   5141  */
   5142 static void
   5143 wm_release_eeprom(struct wm_softc *sc)
   5144 {
   5145 	uint32_t reg;
   5146 
   5147 	/* always success */
   5148 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   5149 		return;
   5150 
   5151 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
   5152 		reg = CSR_READ(sc, WMREG_EECD);
   5153 		reg &= ~EECD_EE_REQ;
   5154 		CSR_WRITE(sc, WMREG_EECD, reg);
   5155 	}
   5156 
   5157 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
   5158 		wm_put_swfwhw_semaphore(sc);
   5159 	if (sc->sc_flags & WM_F_SWFW_SYNC)
   5160 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   5161 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   5162 		wm_put_swsm_semaphore(sc);
   5163 }
   5164 
   5165 /*
   5166  * wm_eeprom_sendbits:
   5167  *
   5168  *	Send a series of bits to the EEPROM.
   5169  */
   5170 static void
   5171 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   5172 {
   5173 	uint32_t reg;
   5174 	int x;
   5175 
   5176 	reg = CSR_READ(sc, WMREG_EECD);
   5177 
   5178 	for (x = nbits; x > 0; x--) {
   5179 		if (bits & (1U << (x - 1)))
   5180 			reg |= EECD_DI;
   5181 		else
   5182 			reg &= ~EECD_DI;
   5183 		CSR_WRITE(sc, WMREG_EECD, reg);
   5184 		delay(2);
   5185 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   5186 		delay(2);
   5187 		CSR_WRITE(sc, WMREG_EECD, reg);
   5188 		delay(2);
   5189 	}
   5190 }
   5191 
   5192 /*
   5193  * wm_eeprom_recvbits:
   5194  *
   5195  *	Receive a series of bits from the EEPROM.
   5196  */
   5197 static void
   5198 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   5199 {
   5200 	uint32_t reg, val;
   5201 	int x;
   5202 
   5203 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   5204 
   5205 	val = 0;
   5206 	for (x = nbits; x > 0; x--) {
   5207 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   5208 		delay(2);
   5209 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   5210 			val |= (1U << (x - 1));
   5211 		CSR_WRITE(sc, WMREG_EECD, reg);
   5212 		delay(2);
   5213 	}
   5214 	*valp = val;
   5215 }
   5216 
   5217 /*
   5218  * wm_read_eeprom_uwire:
   5219  *
   5220  *	Read a word from the EEPROM using the MicroWire protocol.
   5221  */
   5222 static int
   5223 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   5224 {
   5225 	uint32_t reg, val;
   5226 	int i;
   5227 
   5228 	for (i = 0; i < wordcnt; i++) {
   5229 		/* Clear SK and DI. */
   5230 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   5231 		CSR_WRITE(sc, WMREG_EECD, reg);
   5232 
   5233 		/*
   5234 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   5235 		 * and Xen.
   5236 		 *
   5237 		 * We use this workaround only for 82540 because qemu's
   5238 		 * e1000 act as 82540.
   5239 		 */
   5240 		if (sc->sc_type == WM_T_82540) {
   5241 			reg |= EECD_SK;
   5242 			CSR_WRITE(sc, WMREG_EECD, reg);
   5243 			reg &= ~EECD_SK;
   5244 			CSR_WRITE(sc, WMREG_EECD, reg);
   5245 			delay(2);
   5246 		}
   5247 		/* XXX: end of workaround */
   5248 
   5249 		/* Set CHIP SELECT. */
   5250 		reg |= EECD_CS;
   5251 		CSR_WRITE(sc, WMREG_EECD, reg);
   5252 		delay(2);
   5253 
   5254 		/* Shift in the READ command. */
   5255 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   5256 
   5257 		/* Shift in address. */
   5258 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
   5259 
   5260 		/* Shift out the data. */
   5261 		wm_eeprom_recvbits(sc, &val, 16);
   5262 		data[i] = val & 0xffff;
   5263 
   5264 		/* Clear CHIP SELECT. */
   5265 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   5266 		CSR_WRITE(sc, WMREG_EECD, reg);
   5267 		delay(2);
   5268 	}
   5269 
   5270 	return 0;
   5271 }
   5272 
   5273 /*
   5274  * wm_spi_eeprom_ready:
   5275  *
   5276  *	Wait for a SPI EEPROM to be ready for commands.
   5277  */
   5278 static int
   5279 wm_spi_eeprom_ready(struct wm_softc *sc)
   5280 {
   5281 	uint32_t val;
   5282 	int usec;
   5283 
   5284 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   5285 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   5286 		wm_eeprom_recvbits(sc, &val, 8);
   5287 		if ((val & SPI_SR_RDY) == 0)
   5288 			break;
   5289 	}
   5290 	if (usec >= SPI_MAX_RETRIES) {
   5291 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   5292 		return 1;
   5293 	}
   5294 	return 0;
   5295 }
   5296 
   5297 /*
   5298  * wm_read_eeprom_spi:
   5299  *
   5300  *	Read a work from the EEPROM using the SPI protocol.
   5301  */
   5302 static int
   5303 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   5304 {
   5305 	uint32_t reg, val;
   5306 	int i;
   5307 	uint8_t opc;
   5308 
   5309 	/* Clear SK and CS. */
   5310 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   5311 	CSR_WRITE(sc, WMREG_EECD, reg);
   5312 	delay(2);
   5313 
   5314 	if (wm_spi_eeprom_ready(sc))
   5315 		return 1;
   5316 
   5317 	/* Toggle CS to flush commands. */
   5318 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   5319 	delay(2);
   5320 	CSR_WRITE(sc, WMREG_EECD, reg);
   5321 	delay(2);
   5322 
   5323 	opc = SPI_OPC_READ;
   5324 	if (sc->sc_ee_addrbits == 8 && word >= 128)
   5325 		opc |= SPI_OPC_A8;
   5326 
   5327 	wm_eeprom_sendbits(sc, opc, 8);
   5328 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
   5329 
   5330 	for (i = 0; i < wordcnt; i++) {
   5331 		wm_eeprom_recvbits(sc, &val, 16);
   5332 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   5333 	}
   5334 
   5335 	/* Raise CS and clear SK. */
   5336 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   5337 	CSR_WRITE(sc, WMREG_EECD, reg);
   5338 	delay(2);
   5339 
   5340 	return 0;
   5341 }
   5342 
   5343 #define NVM_CHECKSUM			0xBABA
   5344 #define EEPROM_SIZE			0x0040
   5345 #define NVM_COMPAT			0x0003
   5346 #define NVM_COMPAT_VALID_CHECKSUM	0x0001
   5347 #define NVM_FUTURE_INIT_WORD1			0x0019
   5348 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM	0x0040
   5349 
   5350 /*
   5351  * wm_validate_eeprom_checksum
   5352  *
   5353  * The checksum is defined as the sum of the first 64 (16 bit) words.
   5354  */
   5355 static int
   5356 wm_validate_eeprom_checksum(struct wm_softc *sc)
   5357 {
   5358 	uint16_t checksum, valid_checksum;
   5359 	uint16_t eeprom_data;
   5360 	uint16_t csum_wordaddr;
   5361 	int i;
   5362 
   5363 	checksum = 0;
   5364 
   5365 	/* Don't check for I211 */
   5366 	if (sc->sc_type == WM_T_I211)
   5367 		return 0;
   5368 
   5369 	if (sc->sc_type == WM_T_PCH_LPT) {
   5370 		csum_wordaddr = NVM_COMPAT;
   5371 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   5372 	} else {
   5373 		csum_wordaddr = NVM_FUTURE_INIT_WORD1;
   5374 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   5375 	}
   5376 
   5377 #ifdef WM_DEBUG
   5378 	/* Dump EEPROM image for debug */
   5379 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5380 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5381 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   5382 		wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
   5383 		if ((eeprom_data & valid_checksum) == 0) {
   5384 			DPRINTF(WM_DEBUG_NVM,
   5385 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   5386 				device_xname(sc->sc_dev), eeprom_data,
   5387 				    valid_checksum));
   5388 		}
   5389 	}
   5390 
   5391 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   5392 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   5393 		for (i = 0; i < EEPROM_SIZE; i++) {
   5394 			if (wm_read_eeprom(sc, i, 1, &eeprom_data))
   5395 				printf("XX ");
   5396 			else
   5397 				printf("%04x ", eeprom_data);
   5398 			if (i % 8 == 7)
   5399 				printf("\n");
   5400 		}
   5401 	}
   5402 
   5403 #endif /* WM_DEBUG */
   5404 
   5405 	for (i = 0; i < EEPROM_SIZE; i++) {
   5406 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
   5407 			return 1;
   5408 		checksum += eeprom_data;
   5409 	}
   5410 
   5411 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   5412 #ifdef WM_DEBUG
   5413 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   5414 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   5415 #endif
   5416 	}
   5417 
   5418 	return 0;
   5419 }
   5420 
   5421 /*
   5422  * wm_read_eeprom:
   5423  *
   5424  *	Read data from the serial EEPROM.
   5425  */
   5426 static int
   5427 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   5428 {
   5429 	int rv;
   5430 
   5431 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   5432 		return 1;
   5433 
   5434 	if (wm_acquire_eeprom(sc))
   5435 		return 1;
   5436 
   5437 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5438 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5439 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   5440 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
   5441 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   5442 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
   5443 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   5444 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
   5445 	else
   5446 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
   5447 
   5448 	wm_release_eeprom(sc);
   5449 	return rv;
   5450 }
   5451 
   5452 static int
   5453 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
   5454     uint16_t *data)
   5455 {
   5456 	int i, eerd = 0;
   5457 	int error = 0;
   5458 
   5459 	for (i = 0; i < wordcnt; i++) {
   5460 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   5461 
   5462 		CSR_WRITE(sc, WMREG_EERD, eerd);
   5463 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   5464 		if (error != 0)
   5465 			break;
   5466 
   5467 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   5468 	}
   5469 
   5470 	return error;
   5471 }
   5472 
   5473 static int
   5474 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   5475 {
   5476 	uint32_t attempts = 100000;
   5477 	uint32_t i, reg = 0;
   5478 	int32_t done = -1;
   5479 
   5480 	for (i = 0; i < attempts; i++) {
   5481 		reg = CSR_READ(sc, rw);
   5482 
   5483 		if (reg & EERD_DONE) {
   5484 			done = 0;
   5485 			break;
   5486 		}
   5487 		delay(5);
   5488 	}
   5489 
   5490 	return done;
   5491 }
   5492 
   5493 static int
   5494 wm_check_alt_mac_addr(struct wm_softc *sc)
   5495 {
   5496 	uint16_t myea[ETHER_ADDR_LEN / 2];
   5497 	uint16_t offset = EEPROM_OFF_MACADDR;
   5498 
   5499 	/* Try to read alternative MAC address pointer */
   5500 	if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   5501 		return -1;
   5502 
   5503 	/* Check pointer */
   5504 	if (offset == 0xffff)
   5505 		return -1;
   5506 
   5507 	/*
   5508 	 * Check whether alternative MAC address is valid or not.
   5509 	 * Some cards have non 0xffff pointer but those don't use
   5510 	 * alternative MAC address in reality.
   5511 	 *
   5512 	 * Check whether the broadcast bit is set or not.
   5513 	 */
   5514 	if (wm_read_eeprom(sc, offset, 1, myea) == 0)
   5515 		if (((myea[0] & 0xff) & 0x01) == 0)
   5516 			return 0; /* found! */
   5517 
   5518 	/* not found */
   5519 	return -1;
   5520 }
   5521 
   5522 static int
   5523 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   5524 {
   5525 	uint16_t myea[ETHER_ADDR_LEN / 2];
   5526 	uint16_t offset = EEPROM_OFF_MACADDR;
   5527 	int do_invert = 0;
   5528 
   5529 	switch (sc->sc_type) {
   5530 	case WM_T_82580:
   5531 	case WM_T_82580ER:
   5532 	case WM_T_I350:
   5533 		switch (sc->sc_funcid) {
   5534 		case 0:
   5535 			/* default value (== EEPROM_OFF_MACADDR) */
   5536 			break;
   5537 		case 1:
   5538 			offset = EEPROM_OFF_LAN1;
   5539 			break;
   5540 		case 2:
   5541 			offset = EEPROM_OFF_LAN2;
   5542 			break;
   5543 		case 3:
   5544 			offset = EEPROM_OFF_LAN3;
   5545 			break;
   5546 		default:
   5547 			goto bad;
   5548 			/* NOTREACHED */
   5549 			break;
   5550 		}
   5551 		break;
   5552 	case WM_T_82571:
   5553 	case WM_T_82575:
   5554 	case WM_T_82576:
   5555 	case WM_T_80003:
   5556 	case WM_T_I210:
   5557 	case WM_T_I211:
   5558 		if (wm_check_alt_mac_addr(sc) != 0) {
   5559 			/* reset the offset to LAN0 */
   5560 			offset = EEPROM_OFF_MACADDR;
   5561 			if ((sc->sc_funcid & 0x01) == 1)
   5562 				do_invert = 1;
   5563 			goto do_read;
   5564 		}
   5565 		switch (sc->sc_funcid) {
   5566 		case 0:
   5567 			/*
   5568 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
   5569 			 * itself.
   5570 			 */
   5571 			break;
   5572 		case 1:
   5573 			offset += EEPROM_OFF_MACADDR_LAN1;
   5574 			break;
   5575 		case 2:
   5576 			offset += EEPROM_OFF_MACADDR_LAN2;
   5577 			break;
   5578 		case 3:
   5579 			offset += EEPROM_OFF_MACADDR_LAN3;
   5580 			break;
   5581 		default:
   5582 			goto bad;
   5583 			/* NOTREACHED */
   5584 			break;
   5585 		}
   5586 		break;
   5587 	default:
   5588 		if ((sc->sc_funcid & 0x01) == 1)
   5589 			do_invert = 1;
   5590 		break;
   5591 	}
   5592 
   5593  do_read:
   5594 	if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
   5595 		myea) != 0) {
   5596 		goto bad;
   5597 	}
   5598 
   5599 	enaddr[0] = myea[0] & 0xff;
   5600 	enaddr[1] = myea[0] >> 8;
   5601 	enaddr[2] = myea[1] & 0xff;
   5602 	enaddr[3] = myea[1] >> 8;
   5603 	enaddr[4] = myea[2] & 0xff;
   5604 	enaddr[5] = myea[2] >> 8;
   5605 
   5606 	/*
   5607 	 * Toggle the LSB of the MAC address on the second port
   5608 	 * of some dual port cards.
   5609 	 */
   5610 	if (do_invert != 0)
   5611 		enaddr[5] ^= 1;
   5612 
   5613 	return 0;
   5614 
   5615  bad:
   5616 	aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
   5617 
   5618 	return -1;
   5619 }
   5620 
   5621 /*
   5622  * wm_add_rxbuf:
   5623  *
   5624  *	Add a receive buffer to the indiciated descriptor.
   5625  */
   5626 static int
   5627 wm_add_rxbuf(struct wm_softc *sc, int idx)
   5628 {
   5629 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   5630 	struct mbuf *m;
   5631 	int error;
   5632 
   5633 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5634 	if (m == NULL)
   5635 		return ENOBUFS;
   5636 
   5637 	MCLGET(m, M_DONTWAIT);
   5638 	if ((m->m_flags & M_EXT) == 0) {
   5639 		m_freem(m);
   5640 		return ENOBUFS;
   5641 	}
   5642 
   5643 	if (rxs->rxs_mbuf != NULL)
   5644 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5645 
   5646 	rxs->rxs_mbuf = m;
   5647 
   5648 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5649 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5650 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   5651 	if (error) {
   5652 		/* XXX XXX XXX */
   5653 		aprint_error_dev(sc->sc_dev,
   5654 		    "unable to load rx DMA map %d, error = %d\n",
   5655 		    idx, error);
   5656 		panic("wm_add_rxbuf");
   5657 	}
   5658 
   5659 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5660 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5661 
   5662 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5663 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5664 			WM_INIT_RXDESC(sc, idx);
   5665 	} else
   5666 		WM_INIT_RXDESC(sc, idx);
   5667 
   5668 	return 0;
   5669 }
   5670 
   5671 /*
   5672  * wm_set_ral:
   5673  *
   5674  *	Set an entery in the receive address list.
   5675  */
   5676 static void
   5677 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   5678 {
   5679 	uint32_t ral_lo, ral_hi;
   5680 
   5681 	if (enaddr != NULL) {
   5682 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   5683 		    (enaddr[3] << 24);
   5684 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   5685 		ral_hi |= RAL_AV;
   5686 	} else {
   5687 		ral_lo = 0;
   5688 		ral_hi = 0;
   5689 	}
   5690 
   5691 	if (sc->sc_type >= WM_T_82544) {
   5692 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   5693 		    ral_lo);
   5694 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   5695 		    ral_hi);
   5696 	} else {
   5697 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   5698 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   5699 	}
   5700 }
   5701 
   5702 /*
   5703  * wm_mchash:
   5704  *
   5705  *	Compute the hash of the multicast address for the 4096-bit
   5706  *	multicast filter.
   5707  */
   5708 static uint32_t
   5709 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   5710 {
   5711 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   5712 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   5713 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   5714 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   5715 	uint32_t hash;
   5716 
   5717 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5718 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5719 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   5720 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   5721 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   5722 		return (hash & 0x3ff);
   5723 	}
   5724 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   5725 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   5726 
   5727 	return (hash & 0xfff);
   5728 }
   5729 
   5730 /*
   5731  * wm_set_filter:
   5732  *
   5733  *	Set up the receive filter.
   5734  */
   5735 static void
   5736 wm_set_filter(struct wm_softc *sc)
   5737 {
   5738 	struct ethercom *ec = &sc->sc_ethercom;
   5739 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5740 	struct ether_multi *enm;
   5741 	struct ether_multistep step;
   5742 	bus_addr_t mta_reg;
   5743 	uint32_t hash, reg, bit;
   5744 	int i, size;
   5745 
   5746 	if (sc->sc_type >= WM_T_82544)
   5747 		mta_reg = WMREG_CORDOVA_MTA;
   5748 	else
   5749 		mta_reg = WMREG_MTA;
   5750 
   5751 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   5752 
   5753 	if (ifp->if_flags & IFF_BROADCAST)
   5754 		sc->sc_rctl |= RCTL_BAM;
   5755 	if (ifp->if_flags & IFF_PROMISC) {
   5756 		sc->sc_rctl |= RCTL_UPE;
   5757 		goto allmulti;
   5758 	}
   5759 
   5760 	/*
   5761 	 * Set the station address in the first RAL slot, and
   5762 	 * clear the remaining slots.
   5763 	 */
   5764 	if (sc->sc_type == WM_T_ICH8)
   5765 		size = WM_RAL_TABSIZE_ICH8 -1;
   5766 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   5767 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   5768 	    || (sc->sc_type == WM_T_PCH_LPT))
   5769 		size = WM_RAL_TABSIZE_ICH8;
   5770 	else if (sc->sc_type == WM_T_82575)
   5771 		size = WM_RAL_TABSIZE_82575;
   5772 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   5773 		size = WM_RAL_TABSIZE_82576;
   5774 	else if (sc->sc_type == WM_T_I350)
   5775 		size = WM_RAL_TABSIZE_I350;
   5776 	else
   5777 		size = WM_RAL_TABSIZE;
   5778 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   5779 	for (i = 1; i < size; i++)
   5780 		wm_set_ral(sc, NULL, i);
   5781 
   5782 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5783 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5784 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   5785 		size = WM_ICH8_MC_TABSIZE;
   5786 	else
   5787 		size = WM_MC_TABSIZE;
   5788 	/* Clear out the multicast table. */
   5789 	for (i = 0; i < size; i++)
   5790 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   5791 
   5792 	ETHER_FIRST_MULTI(step, ec, enm);
   5793 	while (enm != NULL) {
   5794 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   5795 			/*
   5796 			 * We must listen to a range of multicast addresses.
   5797 			 * For now, just accept all multicasts, rather than
   5798 			 * trying to set only those filter bits needed to match
   5799 			 * the range.  (At this time, the only use of address
   5800 			 * ranges is for IP multicast routing, for which the
   5801 			 * range is big enough to require all bits set.)
   5802 			 */
   5803 			goto allmulti;
   5804 		}
   5805 
   5806 		hash = wm_mchash(sc, enm->enm_addrlo);
   5807 
   5808 		reg = (hash >> 5);
   5809 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5810 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5811 		    || (sc->sc_type == WM_T_PCH2)
   5812 		    || (sc->sc_type == WM_T_PCH_LPT))
   5813 			reg &= 0x1f;
   5814 		else
   5815 			reg &= 0x7f;
   5816 		bit = hash & 0x1f;
   5817 
   5818 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   5819 		hash |= 1U << bit;
   5820 
   5821 		/* XXX Hardware bug?? */
   5822 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   5823 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   5824 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   5825 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   5826 		} else
   5827 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   5828 
   5829 		ETHER_NEXT_MULTI(step, enm);
   5830 	}
   5831 
   5832 	ifp->if_flags &= ~IFF_ALLMULTI;
   5833 	goto setit;
   5834 
   5835  allmulti:
   5836 	ifp->if_flags |= IFF_ALLMULTI;
   5837 	sc->sc_rctl |= RCTL_MPE;
   5838 
   5839  setit:
   5840 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   5841 }
   5842 
   5843 /*
   5844  * wm_tbi_mediainit:
   5845  *
   5846  *	Initialize media for use on 1000BASE-X devices.
   5847  */
   5848 static void
   5849 wm_tbi_mediainit(struct wm_softc *sc)
   5850 {
   5851 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5852 	const char *sep = "";
   5853 
   5854 	if (sc->sc_type < WM_T_82543)
   5855 		sc->sc_tipg = TIPG_WM_DFLT;
   5856 	else
   5857 		sc->sc_tipg = TIPG_LG_DFLT;
   5858 
   5859 	sc->sc_tbi_anegticks = 5;
   5860 
   5861 	/* Initialize our media structures */
   5862 	sc->sc_mii.mii_ifp = ifp;
   5863 
   5864 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   5865 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
   5866 	    wm_tbi_mediastatus);
   5867 
   5868 	/*
   5869 	 * SWD Pins:
   5870 	 *
   5871 	 *	0 = Link LED (output)
   5872 	 *	1 = Loss Of Signal (input)
   5873 	 */
   5874 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   5875 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   5876 
   5877 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5878 
   5879 #define	ADD(ss, mm, dd)							\
   5880 do {									\
   5881 	aprint_normal("%s%s", sep, ss);					\
   5882 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   5883 	sep = ", ";							\
   5884 } while (/*CONSTCOND*/0)
   5885 
   5886 	aprint_normal_dev(sc->sc_dev, "");
   5887 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   5888 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   5889 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   5890 	aprint_normal("\n");
   5891 
   5892 #undef ADD
   5893 
   5894 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   5895 }
   5896 
   5897 /*
   5898  * wm_tbi_mediastatus:	[ifmedia interface function]
   5899  *
   5900  *	Get the current interface media status on a 1000BASE-X device.
   5901  */
   5902 static void
   5903 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   5904 {
   5905 	struct wm_softc *sc = ifp->if_softc;
   5906 	uint32_t ctrl, status;
   5907 
   5908 	ifmr->ifm_status = IFM_AVALID;
   5909 	ifmr->ifm_active = IFM_ETHER;
   5910 
   5911 	status = CSR_READ(sc, WMREG_STATUS);
   5912 	if ((status & STATUS_LU) == 0) {
   5913 		ifmr->ifm_active |= IFM_NONE;
   5914 		return;
   5915 	}
   5916 
   5917 	ifmr->ifm_status |= IFM_ACTIVE;
   5918 	ifmr->ifm_active |= IFM_1000_SX;
   5919 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   5920 		ifmr->ifm_active |= IFM_FDX;
   5921 	ctrl = CSR_READ(sc, WMREG_CTRL);
   5922 	if (ctrl & CTRL_RFCE)
   5923 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   5924 	if (ctrl & CTRL_TFCE)
   5925 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   5926 }
   5927 
   5928 /*
   5929  * wm_tbi_mediachange:	[ifmedia interface function]
   5930  *
   5931  *	Set hardware to newly-selected media on a 1000BASE-X device.
   5932  */
   5933 static int
   5934 wm_tbi_mediachange(struct ifnet *ifp)
   5935 {
   5936 	struct wm_softc *sc = ifp->if_softc;
   5937 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   5938 	uint32_t status;
   5939 	int i;
   5940 
   5941 	sc->sc_txcw = 0;
   5942 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
   5943 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   5944 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   5945 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   5946 		sc->sc_txcw |= TXCW_ANE;
   5947 	} else {
   5948 		/*
   5949 		 * If autonegotiation is turned off, force link up and turn on
   5950 		 * full duplex
   5951 		 */
   5952 		sc->sc_txcw &= ~TXCW_ANE;
   5953 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
   5954 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   5955 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5956 		delay(1000);
   5957 	}
   5958 
   5959 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   5960 		    device_xname(sc->sc_dev),sc->sc_txcw));
   5961 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   5962 	delay(10000);
   5963 
   5964 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   5965 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   5966 
   5967 	/*
   5968 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   5969 	 * optics detect a signal, 0 if they don't.
   5970 	 */
   5971 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   5972 		/* Have signal; wait for the link to come up. */
   5973 
   5974 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   5975 			/*
   5976 			 * Reset the link, and let autonegotiation do its thing
   5977 			 */
   5978 			sc->sc_ctrl |= CTRL_LRST;
   5979 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5980 			delay(1000);
   5981 			sc->sc_ctrl &= ~CTRL_LRST;
   5982 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5983 			delay(1000);
   5984 		}
   5985 
   5986 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   5987 			delay(10000);
   5988 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   5989 				break;
   5990 		}
   5991 
   5992 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   5993 			    device_xname(sc->sc_dev),i));
   5994 
   5995 		status = CSR_READ(sc, WMREG_STATUS);
   5996 		DPRINTF(WM_DEBUG_LINK,
   5997 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   5998 			device_xname(sc->sc_dev),status, STATUS_LU));
   5999 		if (status & STATUS_LU) {
   6000 			/* Link is up. */
   6001 			DPRINTF(WM_DEBUG_LINK,
   6002 			    ("%s: LINK: set media -> link up %s\n",
   6003 			    device_xname(sc->sc_dev),
   6004 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   6005 
   6006 			/*
   6007 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   6008 			 * so we should update sc->sc_ctrl
   6009 			 */
   6010 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   6011 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   6012 			sc->sc_fcrtl &= ~FCRTL_XONE;
   6013 			if (status & STATUS_FD)
   6014 				sc->sc_tctl |=
   6015 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6016 			else
   6017 				sc->sc_tctl |=
   6018 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   6019 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   6020 				sc->sc_fcrtl |= FCRTL_XONE;
   6021 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6022 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   6023 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   6024 				      sc->sc_fcrtl);
   6025 			sc->sc_tbi_linkup = 1;
   6026 		} else {
   6027 			if (i == WM_LINKUP_TIMEOUT)
   6028 				wm_check_for_link(sc);
   6029 			/* Link is down. */
   6030 			DPRINTF(WM_DEBUG_LINK,
   6031 			    ("%s: LINK: set media -> link down\n",
   6032 			    device_xname(sc->sc_dev)));
   6033 			sc->sc_tbi_linkup = 0;
   6034 		}
   6035 	} else {
   6036 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   6037 		    device_xname(sc->sc_dev)));
   6038 		sc->sc_tbi_linkup = 0;
   6039 	}
   6040 
   6041 	wm_tbi_set_linkled(sc);
   6042 
   6043 	return 0;
   6044 }
   6045 
   6046 /*
   6047  * wm_tbi_set_linkled:
   6048  *
   6049  *	Update the link LED on 1000BASE-X devices.
   6050  */
   6051 static void
   6052 wm_tbi_set_linkled(struct wm_softc *sc)
   6053 {
   6054 
   6055 	if (sc->sc_tbi_linkup)
   6056 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   6057 	else
   6058 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   6059 
   6060 	/* 82540 or newer devices are active low */
   6061 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   6062 
   6063 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6064 }
   6065 
   6066 /*
   6067  * wm_tbi_check_link:
   6068  *
   6069  *	Check the link on 1000BASE-X devices.
   6070  */
   6071 static void
   6072 wm_tbi_check_link(struct wm_softc *sc)
   6073 {
   6074 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6075 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6076 	uint32_t rxcw, ctrl, status;
   6077 
   6078 	status = CSR_READ(sc, WMREG_STATUS);
   6079 
   6080 	rxcw = CSR_READ(sc, WMREG_RXCW);
   6081 	ctrl = CSR_READ(sc, WMREG_CTRL);
   6082 
   6083 	/* set link status */
   6084 	if ((status & STATUS_LU) == 0) {
   6085 		DPRINTF(WM_DEBUG_LINK,
   6086 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
   6087 		sc->sc_tbi_linkup = 0;
   6088 	} else if (sc->sc_tbi_linkup == 0) {
   6089 		DPRINTF(WM_DEBUG_LINK,
   6090 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
   6091 		    (status & STATUS_FD) ? "FDX" : "HDX"));
   6092 		sc->sc_tbi_linkup = 1;
   6093 	}
   6094 
   6095 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
   6096 	    && ((status & STATUS_LU) == 0)) {
   6097 		sc->sc_tbi_linkup = 0;
   6098 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
   6099 			/* RXCFG storm! */
   6100 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
   6101 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
   6102 			wm_init(ifp);
   6103 			ifp->if_start(ifp);
   6104 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   6105 			/* If the timer expired, retry autonegotiation */
   6106 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
   6107 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   6108 				sc->sc_tbi_ticks = 0;
   6109 				/*
   6110 				 * Reset the link, and let autonegotiation do
   6111 				 * its thing
   6112 				 */
   6113 				sc->sc_ctrl |= CTRL_LRST;
   6114 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6115 				delay(1000);
   6116 				sc->sc_ctrl &= ~CTRL_LRST;
   6117 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6118 				delay(1000);
   6119 				CSR_WRITE(sc, WMREG_TXCW,
   6120 				    sc->sc_txcw & ~TXCW_ANE);
   6121 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   6122 			}
   6123 		}
   6124 	}
   6125 
   6126 	wm_tbi_set_linkled(sc);
   6127 }
   6128 
   6129 /*
   6130  * wm_gmii_reset:
   6131  *
   6132  *	Reset the PHY.
   6133  */
   6134 static void
   6135 wm_gmii_reset(struct wm_softc *sc)
   6136 {
   6137 	uint32_t reg;
   6138 	int rv;
   6139 
   6140 	/* get phy semaphore */
   6141 	switch (sc->sc_type) {
   6142 	case WM_T_82571:
   6143 	case WM_T_82572:
   6144 	case WM_T_82573:
   6145 	case WM_T_82574:
   6146 	case WM_T_82583:
   6147 		 /* XXX should get sw semaphore, too */
   6148 		rv = wm_get_swsm_semaphore(sc);
   6149 		break;
   6150 	case WM_T_82575:
   6151 	case WM_T_82576:
   6152 	case WM_T_82580:
   6153 	case WM_T_82580ER:
   6154 	case WM_T_I350:
   6155 	case WM_T_I210:
   6156 	case WM_T_I211:
   6157 	case WM_T_80003:
   6158 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   6159 		break;
   6160 	case WM_T_ICH8:
   6161 	case WM_T_ICH9:
   6162 	case WM_T_ICH10:
   6163 	case WM_T_PCH:
   6164 	case WM_T_PCH2:
   6165 	case WM_T_PCH_LPT:
   6166 		rv = wm_get_swfwhw_semaphore(sc);
   6167 		break;
   6168 	default:
   6169 		/* nothing to do*/
   6170 		rv = 0;
   6171 		break;
   6172 	}
   6173 	if (rv != 0) {
   6174 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6175 		    __func__);
   6176 		return;
   6177 	}
   6178 
   6179 	switch (sc->sc_type) {
   6180 	case WM_T_82542_2_0:
   6181 	case WM_T_82542_2_1:
   6182 		/* null */
   6183 		break;
   6184 	case WM_T_82543:
   6185 		/*
   6186 		 * With 82543, we need to force speed and duplex on the MAC
   6187 		 * equal to what the PHY speed and duplex configuration is.
   6188 		 * In addition, we need to perform a hardware reset on the PHY
   6189 		 * to take it out of reset.
   6190 		 */
   6191 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   6192 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6193 
   6194 		/* The PHY reset pin is active-low. */
   6195 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6196 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   6197 		    CTRL_EXT_SWDPIN(4));
   6198 		reg |= CTRL_EXT_SWDPIO(4);
   6199 
   6200 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6201 		delay(10*1000);
   6202 
   6203 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   6204 		delay(150);
   6205 #if 0
   6206 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   6207 #endif
   6208 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   6209 		break;
   6210 	case WM_T_82544:	/* reset 10000us */
   6211 	case WM_T_82540:
   6212 	case WM_T_82545:
   6213 	case WM_T_82545_3:
   6214 	case WM_T_82546:
   6215 	case WM_T_82546_3:
   6216 	case WM_T_82541:
   6217 	case WM_T_82541_2:
   6218 	case WM_T_82547:
   6219 	case WM_T_82547_2:
   6220 	case WM_T_82571:	/* reset 100us */
   6221 	case WM_T_82572:
   6222 	case WM_T_82573:
   6223 	case WM_T_82574:
   6224 	case WM_T_82575:
   6225 	case WM_T_82576:
   6226 	case WM_T_82580:
   6227 	case WM_T_82580ER:
   6228 	case WM_T_I350:
   6229 	case WM_T_I210:
   6230 	case WM_T_I211:
   6231 	case WM_T_82583:
   6232 	case WM_T_80003:
   6233 		/* generic reset */
   6234 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   6235 		delay(20000);
   6236 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6237 		delay(20000);
   6238 
   6239 		if ((sc->sc_type == WM_T_82541)
   6240 		    || (sc->sc_type == WM_T_82541_2)
   6241 		    || (sc->sc_type == WM_T_82547)
   6242 		    || (sc->sc_type == WM_T_82547_2)) {
   6243 			/* workaround for igp are done in igp_reset() */
   6244 			/* XXX add code to set LED after phy reset */
   6245 		}
   6246 		break;
   6247 	case WM_T_ICH8:
   6248 	case WM_T_ICH9:
   6249 	case WM_T_ICH10:
   6250 	case WM_T_PCH:
   6251 	case WM_T_PCH2:
   6252 	case WM_T_PCH_LPT:
   6253 		/* generic reset */
   6254 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   6255 		delay(100);
   6256 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6257 		delay(150);
   6258 		break;
   6259 	default:
   6260 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   6261 		    __func__);
   6262 		break;
   6263 	}
   6264 
   6265 	/* release PHY semaphore */
   6266 	switch (sc->sc_type) {
   6267 	case WM_T_82571:
   6268 	case WM_T_82572:
   6269 	case WM_T_82573:
   6270 	case WM_T_82574:
   6271 	case WM_T_82583:
   6272 		 /* XXX should put sw semaphore, too */
   6273 		wm_put_swsm_semaphore(sc);
   6274 		break;
   6275 	case WM_T_82575:
   6276 	case WM_T_82576:
   6277 	case WM_T_82580:
   6278 	case WM_T_82580ER:
   6279 	case WM_T_I350:
   6280 	case WM_T_I210:
   6281 	case WM_T_I211:
   6282 	case WM_T_80003:
   6283 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   6284 		break;
   6285 	case WM_T_ICH8:
   6286 	case WM_T_ICH9:
   6287 	case WM_T_ICH10:
   6288 	case WM_T_PCH:
   6289 	case WM_T_PCH2:
   6290 	case WM_T_PCH_LPT:
   6291 		wm_put_swfwhw_semaphore(sc);
   6292 		break;
   6293 	default:
   6294 		/* nothing to do*/
   6295 		rv = 0;
   6296 		break;
   6297 	}
   6298 
   6299 	/* get_cfg_done */
   6300 	wm_get_cfg_done(sc);
   6301 
   6302 	/* extra setup */
   6303 	switch (sc->sc_type) {
   6304 	case WM_T_82542_2_0:
   6305 	case WM_T_82542_2_1:
   6306 	case WM_T_82543:
   6307 	case WM_T_82544:
   6308 	case WM_T_82540:
   6309 	case WM_T_82545:
   6310 	case WM_T_82545_3:
   6311 	case WM_T_82546:
   6312 	case WM_T_82546_3:
   6313 	case WM_T_82541_2:
   6314 	case WM_T_82547_2:
   6315 	case WM_T_82571:
   6316 	case WM_T_82572:
   6317 	case WM_T_82573:
   6318 	case WM_T_82574:
   6319 	case WM_T_82575:
   6320 	case WM_T_82576:
   6321 	case WM_T_82580:
   6322 	case WM_T_82580ER:
   6323 	case WM_T_I350:
   6324 	case WM_T_I210:
   6325 	case WM_T_I211:
   6326 	case WM_T_82583:
   6327 	case WM_T_80003:
   6328 		/* null */
   6329 		break;
   6330 	case WM_T_82541:
   6331 	case WM_T_82547:
   6332 		/* XXX Configure actively LED after PHY reset */
   6333 		break;
   6334 	case WM_T_ICH8:
   6335 	case WM_T_ICH9:
   6336 	case WM_T_ICH10:
   6337 	case WM_T_PCH:
   6338 	case WM_T_PCH2:
   6339 	case WM_T_PCH_LPT:
   6340 		/* Allow time for h/w to get to a quiescent state afer reset */
   6341 		delay(10*1000);
   6342 
   6343 		if (sc->sc_type == WM_T_PCH)
   6344 			wm_hv_phy_workaround_ich8lan(sc);
   6345 
   6346 		if (sc->sc_type == WM_T_PCH2)
   6347 			wm_lv_phy_workaround_ich8lan(sc);
   6348 
   6349 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   6350 			/*
   6351 			 * dummy read to clear the phy wakeup bit after lcd
   6352 			 * reset
   6353 			 */
   6354 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   6355 		}
   6356 
   6357 		/*
   6358 		 * XXX Configure the LCD with th extended configuration region
   6359 		 * in NVM
   6360 		 */
   6361 
   6362 		/* Configure the LCD with the OEM bits in NVM */
   6363 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   6364 		    || (sc->sc_type == WM_T_PCH_LPT)) {
   6365 			/*
   6366 			 * Disable LPLU.
   6367 			 * XXX It seems that 82567 has LPLU, too.
   6368 			 */
   6369 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   6370 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
   6371 			reg |= HV_OEM_BITS_ANEGNOW;
   6372 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   6373 		}
   6374 		break;
   6375 	default:
   6376 		panic("%s: unknown type\n", __func__);
   6377 		break;
   6378 	}
   6379 }
   6380 
   6381 /*
   6382  * wm_gmii_mediainit:
   6383  *
   6384  *	Initialize media for use on 1000BASE-T devices.
   6385  */
   6386 static void
   6387 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   6388 {
   6389 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6390 	struct mii_data *mii = &sc->sc_mii;
   6391 
   6392 	/* We have MII. */
   6393 	sc->sc_flags |= WM_F_HAS_MII;
   6394 
   6395 	if (sc->sc_type == WM_T_80003)
   6396 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   6397 	else
   6398 		sc->sc_tipg = TIPG_1000T_DFLT;
   6399 
   6400 	/*
   6401 	 * Let the chip set speed/duplex on its own based on
   6402 	 * signals from the PHY.
   6403 	 * XXXbouyer - I'm not sure this is right for the 80003,
   6404 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   6405 	 */
   6406 	sc->sc_ctrl |= CTRL_SLU;
   6407 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6408 
   6409 	/* Initialize our media structures and probe the GMII. */
   6410 	mii->mii_ifp = ifp;
   6411 
   6412 	/*
   6413 	 * Determine the PHY access method.
   6414 	 *
   6415 	 *  For SGMII, use SGMII specific method.
   6416 	 *
   6417 	 *  For some devices, we can determine the PHY access method
   6418 	 * from sc_type.
   6419 	 *
   6420 	 *  For ICH8 variants, it's difficult to detemine the PHY access
   6421 	 * method by sc_type, so use the PCI product ID for some devices.
   6422 	 * For other ICH8 variants, try to use igp's method. If the PHY
   6423 	 * can't detect, then use bm's method.
   6424 	 */
   6425 	switch (prodid) {
   6426 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   6427 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   6428 		/* 82577 */
   6429 		sc->sc_phytype = WMPHY_82577;
   6430 		mii->mii_readreg = wm_gmii_hv_readreg;
   6431 		mii->mii_writereg = wm_gmii_hv_writereg;
   6432 		break;
   6433 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   6434 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   6435 		/* 82578 */
   6436 		sc->sc_phytype = WMPHY_82578;
   6437 		mii->mii_readreg = wm_gmii_hv_readreg;
   6438 		mii->mii_writereg = wm_gmii_hv_writereg;
   6439 		break;
   6440 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   6441 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   6442 		/* 82579 */
   6443 		sc->sc_phytype = WMPHY_82579;
   6444 		mii->mii_readreg = wm_gmii_hv_readreg;
   6445 		mii->mii_writereg = wm_gmii_hv_writereg;
   6446 		break;
   6447 	case PCI_PRODUCT_INTEL_I217_LM:
   6448 	case PCI_PRODUCT_INTEL_I217_V:
   6449 	case PCI_PRODUCT_INTEL_I218_LM:
   6450 	case PCI_PRODUCT_INTEL_I218_V:
   6451 		/* I21[78] */
   6452 		mii->mii_readreg = wm_gmii_hv_readreg;
   6453 		mii->mii_writereg = wm_gmii_hv_writereg;
   6454 		break;
   6455 	case PCI_PRODUCT_INTEL_82801I_BM:
   6456 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   6457 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   6458 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   6459 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   6460 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   6461 		/* 82567 */
   6462 		sc->sc_phytype = WMPHY_BM;
   6463 		mii->mii_readreg = wm_gmii_bm_readreg;
   6464 		mii->mii_writereg = wm_gmii_bm_writereg;
   6465 		break;
   6466 	default:
   6467 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   6468 			mii->mii_readreg = wm_sgmii_readreg;
   6469 			mii->mii_writereg = wm_sgmii_writereg;
   6470 		} else if (sc->sc_type >= WM_T_80003) {
   6471 			mii->mii_readreg = wm_gmii_i80003_readreg;
   6472 			mii->mii_writereg = wm_gmii_i80003_writereg;
   6473 		} else if (sc->sc_type >= WM_T_I210) {
   6474 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6475 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6476 		} else if (sc->sc_type >= WM_T_82580) {
   6477 			sc->sc_phytype = WMPHY_82580;
   6478 			mii->mii_readreg = wm_gmii_82580_readreg;
   6479 			mii->mii_writereg = wm_gmii_82580_writereg;
   6480 		} else if (sc->sc_type >= WM_T_82544) {
   6481 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6482 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6483 		} else {
   6484 			mii->mii_readreg = wm_gmii_i82543_readreg;
   6485 			mii->mii_writereg = wm_gmii_i82543_writereg;
   6486 		}
   6487 		break;
   6488 	}
   6489 	mii->mii_statchg = wm_gmii_statchg;
   6490 
   6491 	wm_gmii_reset(sc);
   6492 
   6493 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   6494 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   6495 	    wm_gmii_mediastatus);
   6496 
   6497 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   6498 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   6499 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   6500 	    || (sc->sc_type == WM_T_I211)) {
   6501 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   6502 			/* Attach only one port */
   6503 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   6504 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6505 		} else {
   6506 			int i;
   6507 			uint32_t ctrl_ext;
   6508 
   6509 			/* Power on sgmii phy if it is disabled */
   6510 			ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   6511 			CSR_WRITE(sc, WMREG_CTRL_EXT,
   6512 			    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   6513 			CSR_WRITE_FLUSH(sc);
   6514 			delay(300*1000); /* XXX too long */
   6515 
   6516 			/* from 1 to 8 */
   6517 			for (i = 1; i < 8; i++)
   6518 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   6519 				    i, MII_OFFSET_ANY, MIIF_DOPAUSE);
   6520 
   6521 			/* restore previous sfp cage power state */
   6522 			CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   6523 		}
   6524 	} else {
   6525 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6526 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6527 	}
   6528 
   6529 	/*
   6530 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   6531 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   6532 	 */
   6533 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   6534 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6535 		wm_set_mdio_slow_mode_hv(sc);
   6536 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6537 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6538 	}
   6539 
   6540 	/*
   6541 	 * (For ICH8 variants)
   6542 	 * If PHY detection failed, use BM's r/w function and retry.
   6543 	 */
   6544 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6545 		/* if failed, retry with *_bm_* */
   6546 		mii->mii_readreg = wm_gmii_bm_readreg;
   6547 		mii->mii_writereg = wm_gmii_bm_writereg;
   6548 
   6549 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6550 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6551 	}
   6552 
   6553 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6554 		/* Any PHY wasn't find */
   6555 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   6556 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   6557 		sc->sc_phytype = WMPHY_NONE;
   6558 	} else {
   6559 		/*
   6560 		 * PHY Found!
   6561 		 * Check PHY type.
   6562 		 */
   6563 		uint32_t model;
   6564 		struct mii_softc *child;
   6565 
   6566 		child = LIST_FIRST(&mii->mii_phys);
   6567 		if (device_is_a(child->mii_dev, "igphy")) {
   6568 			struct igphy_softc *isc = (struct igphy_softc *)child;
   6569 
   6570 			model = isc->sc_mii.mii_mpd_model;
   6571 			if (model == MII_MODEL_yyINTEL_I82566)
   6572 				sc->sc_phytype = WMPHY_IGP_3;
   6573 		}
   6574 
   6575 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   6576 	}
   6577 }
   6578 
   6579 /*
   6580  * wm_gmii_mediastatus:	[ifmedia interface function]
   6581  *
   6582  *	Get the current interface media status on a 1000BASE-T device.
   6583  */
   6584 static void
   6585 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   6586 {
   6587 	struct wm_softc *sc = ifp->if_softc;
   6588 
   6589 	ether_mediastatus(ifp, ifmr);
   6590 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   6591 	    | sc->sc_flowflags;
   6592 }
   6593 
   6594 /*
   6595  * wm_gmii_mediachange:	[ifmedia interface function]
   6596  *
   6597  *	Set hardware to newly-selected media on a 1000BASE-T device.
   6598  */
   6599 static int
   6600 wm_gmii_mediachange(struct ifnet *ifp)
   6601 {
   6602 	struct wm_softc *sc = ifp->if_softc;
   6603 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6604 	int rc;
   6605 
   6606 	if ((ifp->if_flags & IFF_UP) == 0)
   6607 		return 0;
   6608 
   6609 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   6610 	sc->sc_ctrl |= CTRL_SLU;
   6611 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   6612 	    || (sc->sc_type > WM_T_82543)) {
   6613 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   6614 	} else {
   6615 		sc->sc_ctrl &= ~CTRL_ASDE;
   6616 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   6617 		if (ife->ifm_media & IFM_FDX)
   6618 			sc->sc_ctrl |= CTRL_FD;
   6619 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   6620 		case IFM_10_T:
   6621 			sc->sc_ctrl |= CTRL_SPEED_10;
   6622 			break;
   6623 		case IFM_100_TX:
   6624 			sc->sc_ctrl |= CTRL_SPEED_100;
   6625 			break;
   6626 		case IFM_1000_T:
   6627 			sc->sc_ctrl |= CTRL_SPEED_1000;
   6628 			break;
   6629 		default:
   6630 			panic("wm_gmii_mediachange: bad media 0x%x",
   6631 			    ife->ifm_media);
   6632 		}
   6633 	}
   6634 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6635 	if (sc->sc_type <= WM_T_82543)
   6636 		wm_gmii_reset(sc);
   6637 
   6638 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   6639 		return 0;
   6640 	return rc;
   6641 }
   6642 
   6643 #define	MDI_IO		CTRL_SWDPIN(2)
   6644 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   6645 #define	MDI_CLK		CTRL_SWDPIN(3)
   6646 
   6647 static void
   6648 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   6649 {
   6650 	uint32_t i, v;
   6651 
   6652 	v = CSR_READ(sc, WMREG_CTRL);
   6653 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6654 	v |= MDI_DIR | CTRL_SWDPIO(3);
   6655 
   6656 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   6657 		if (data & i)
   6658 			v |= MDI_IO;
   6659 		else
   6660 			v &= ~MDI_IO;
   6661 		CSR_WRITE(sc, WMREG_CTRL, v);
   6662 		delay(10);
   6663 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6664 		delay(10);
   6665 		CSR_WRITE(sc, WMREG_CTRL, v);
   6666 		delay(10);
   6667 	}
   6668 }
   6669 
   6670 static uint32_t
   6671 i82543_mii_recvbits(struct wm_softc *sc)
   6672 {
   6673 	uint32_t v, i, data = 0;
   6674 
   6675 	v = CSR_READ(sc, WMREG_CTRL);
   6676 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6677 	v |= CTRL_SWDPIO(3);
   6678 
   6679 	CSR_WRITE(sc, WMREG_CTRL, v);
   6680 	delay(10);
   6681 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6682 	delay(10);
   6683 	CSR_WRITE(sc, WMREG_CTRL, v);
   6684 	delay(10);
   6685 
   6686 	for (i = 0; i < 16; i++) {
   6687 		data <<= 1;
   6688 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6689 		delay(10);
   6690 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   6691 			data |= 1;
   6692 		CSR_WRITE(sc, WMREG_CTRL, v);
   6693 		delay(10);
   6694 	}
   6695 
   6696 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6697 	delay(10);
   6698 	CSR_WRITE(sc, WMREG_CTRL, v);
   6699 	delay(10);
   6700 
   6701 	return data;
   6702 }
   6703 
   6704 #undef MDI_IO
   6705 #undef MDI_DIR
   6706 #undef MDI_CLK
   6707 
   6708 /*
   6709  * wm_gmii_i82543_readreg:	[mii interface function]
   6710  *
   6711  *	Read a PHY register on the GMII (i82543 version).
   6712  */
   6713 static int
   6714 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   6715 {
   6716 	struct wm_softc *sc = device_private(self);
   6717 	int rv;
   6718 
   6719 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6720 	i82543_mii_sendbits(sc, reg | (phy << 5) |
   6721 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   6722 	rv = i82543_mii_recvbits(sc) & 0xffff;
   6723 
   6724 	DPRINTF(WM_DEBUG_GMII,
   6725 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   6726 	    device_xname(sc->sc_dev), phy, reg, rv));
   6727 
   6728 	return rv;
   6729 }
   6730 
   6731 /*
   6732  * wm_gmii_i82543_writereg:	[mii interface function]
   6733  *
   6734  *	Write a PHY register on the GMII (i82543 version).
   6735  */
   6736 static void
   6737 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   6738 {
   6739 	struct wm_softc *sc = device_private(self);
   6740 
   6741 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6742 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   6743 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   6744 	    (MII_COMMAND_START << 30), 32);
   6745 }
   6746 
   6747 /*
   6748  * wm_gmii_i82544_readreg:	[mii interface function]
   6749  *
   6750  *	Read a PHY register on the GMII.
   6751  */
   6752 static int
   6753 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   6754 {
   6755 	struct wm_softc *sc = device_private(self);
   6756 	uint32_t mdic = 0;
   6757 	int i, rv;
   6758 
   6759 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   6760 	    MDIC_REGADD(reg));
   6761 
   6762 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   6763 		mdic = CSR_READ(sc, WMREG_MDIC);
   6764 		if (mdic & MDIC_READY)
   6765 			break;
   6766 		delay(50);
   6767 	}
   6768 
   6769 	if ((mdic & MDIC_READY) == 0) {
   6770 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   6771 		    device_xname(sc->sc_dev), phy, reg);
   6772 		rv = 0;
   6773 	} else if (mdic & MDIC_E) {
   6774 #if 0 /* This is normal if no PHY is present. */
   6775 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   6776 		    device_xname(sc->sc_dev), phy, reg);
   6777 #endif
   6778 		rv = 0;
   6779 	} else {
   6780 		rv = MDIC_DATA(mdic);
   6781 		if (rv == 0xffff)
   6782 			rv = 0;
   6783 	}
   6784 
   6785 	return rv;
   6786 }
   6787 
   6788 /*
   6789  * wm_gmii_i82544_writereg:	[mii interface function]
   6790  *
   6791  *	Write a PHY register on the GMII.
   6792  */
   6793 static void
   6794 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   6795 {
   6796 	struct wm_softc *sc = device_private(self);
   6797 	uint32_t mdic = 0;
   6798 	int i;
   6799 
   6800 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   6801 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   6802 
   6803 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   6804 		mdic = CSR_READ(sc, WMREG_MDIC);
   6805 		if (mdic & MDIC_READY)
   6806 			break;
   6807 		delay(50);
   6808 	}
   6809 
   6810 	if ((mdic & MDIC_READY) == 0)
   6811 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   6812 		    device_xname(sc->sc_dev), phy, reg);
   6813 	else if (mdic & MDIC_E)
   6814 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   6815 		    device_xname(sc->sc_dev), phy, reg);
   6816 }
   6817 
   6818 /*
   6819  * wm_gmii_i80003_readreg:	[mii interface function]
   6820  *
   6821  *	Read a PHY register on the kumeran
   6822  * This could be handled by the PHY layer if we didn't have to lock the
   6823  * ressource ...
   6824  */
   6825 static int
   6826 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   6827 {
   6828 	struct wm_softc *sc = device_private(self);
   6829 	int sem;
   6830 	int rv;
   6831 
   6832 	if (phy != 1) /* only one PHY on kumeran bus */
   6833 		return 0;
   6834 
   6835 	sem = swfwphysem[sc->sc_funcid];
   6836 	if (wm_get_swfw_semaphore(sc, sem)) {
   6837 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6838 		    __func__);
   6839 		return 0;
   6840 	}
   6841 
   6842 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   6843 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   6844 		    reg >> GG82563_PAGE_SHIFT);
   6845 	} else {
   6846 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   6847 		    reg >> GG82563_PAGE_SHIFT);
   6848 	}
   6849 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   6850 	delay(200);
   6851 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   6852 	delay(200);
   6853 
   6854 	wm_put_swfw_semaphore(sc, sem);
   6855 	return rv;
   6856 }
   6857 
   6858 /*
   6859  * wm_gmii_i80003_writereg:	[mii interface function]
   6860  *
   6861  *	Write a PHY register on the kumeran.
   6862  * This could be handled by the PHY layer if we didn't have to lock the
   6863  * ressource ...
   6864  */
   6865 static void
   6866 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   6867 {
   6868 	struct wm_softc *sc = device_private(self);
   6869 	int sem;
   6870 
   6871 	if (phy != 1) /* only one PHY on kumeran bus */
   6872 		return;
   6873 
   6874 	sem = swfwphysem[sc->sc_funcid];
   6875 	if (wm_get_swfw_semaphore(sc, sem)) {
   6876 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6877 		    __func__);
   6878 		return;
   6879 	}
   6880 
   6881 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   6882 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   6883 		    reg >> GG82563_PAGE_SHIFT);
   6884 	} else {
   6885 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   6886 		    reg >> GG82563_PAGE_SHIFT);
   6887 	}
   6888 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   6889 	delay(200);
   6890 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   6891 	delay(200);
   6892 
   6893 	wm_put_swfw_semaphore(sc, sem);
   6894 }
   6895 
   6896 /*
   6897  * wm_gmii_bm_readreg:	[mii interface function]
   6898  *
   6899  *	Read a PHY register on the kumeran
   6900  * This could be handled by the PHY layer if we didn't have to lock the
   6901  * ressource ...
   6902  */
   6903 static int
   6904 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   6905 {
   6906 	struct wm_softc *sc = device_private(self);
   6907 	int sem;
   6908 	int rv;
   6909 
   6910 	sem = swfwphysem[sc->sc_funcid];
   6911 	if (wm_get_swfw_semaphore(sc, sem)) {
   6912 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6913 		    __func__);
   6914 		return 0;
   6915 	}
   6916 
   6917 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   6918 		if (phy == 1)
   6919 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   6920 			    reg);
   6921 		else
   6922 			wm_gmii_i82544_writereg(self, phy,
   6923 			    GG82563_PHY_PAGE_SELECT,
   6924 			    reg >> GG82563_PAGE_SHIFT);
   6925 	}
   6926 
   6927 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   6928 	wm_put_swfw_semaphore(sc, sem);
   6929 	return rv;
   6930 }
   6931 
   6932 /*
   6933  * wm_gmii_bm_writereg:	[mii interface function]
   6934  *
   6935  *	Write a PHY register on the kumeran.
   6936  * This could be handled by the PHY layer if we didn't have to lock the
   6937  * ressource ...
   6938  */
   6939 static void
   6940 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   6941 {
   6942 	struct wm_softc *sc = device_private(self);
   6943 	int sem;
   6944 
   6945 	sem = swfwphysem[sc->sc_funcid];
   6946 	if (wm_get_swfw_semaphore(sc, sem)) {
   6947 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6948 		    __func__);
   6949 		return;
   6950 	}
   6951 
   6952 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   6953 		if (phy == 1)
   6954 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   6955 			    reg);
   6956 		else
   6957 			wm_gmii_i82544_writereg(self, phy,
   6958 			    GG82563_PHY_PAGE_SELECT,
   6959 			    reg >> GG82563_PAGE_SHIFT);
   6960 	}
   6961 
   6962 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   6963 	wm_put_swfw_semaphore(sc, sem);
   6964 }
   6965 
   6966 static void
   6967 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   6968 {
   6969 	struct wm_softc *sc = device_private(self);
   6970 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   6971 	uint16_t wuce;
   6972 
   6973 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   6974 	if (sc->sc_type == WM_T_PCH) {
   6975 		/* XXX e1000 driver do nothing... why? */
   6976 	}
   6977 
   6978 	/* Set page 769 */
   6979 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6980 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   6981 
   6982 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   6983 
   6984 	wuce &= ~BM_WUC_HOST_WU_BIT;
   6985 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   6986 	    wuce | BM_WUC_ENABLE_BIT);
   6987 
   6988 	/* Select page 800 */
   6989 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6990 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   6991 
   6992 	/* Write page 800 */
   6993 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   6994 
   6995 	if (rd)
   6996 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   6997 	else
   6998 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   6999 
   7000 	/* Set page 769 */
   7001 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7002 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   7003 
   7004 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   7005 }
   7006 
   7007 /*
   7008  * wm_gmii_hv_readreg:	[mii interface function]
   7009  *
   7010  *	Read a PHY register on the kumeran
   7011  * This could be handled by the PHY layer if we didn't have to lock the
   7012  * ressource ...
   7013  */
   7014 static int
   7015 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   7016 {
   7017 	struct wm_softc *sc = device_private(self);
   7018 	uint16_t page = BM_PHY_REG_PAGE(reg);
   7019 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   7020 	uint16_t val;
   7021 	int rv;
   7022 
   7023 	if (wm_get_swfwhw_semaphore(sc)) {
   7024 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7025 		    __func__);
   7026 		return 0;
   7027 	}
   7028 
   7029 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   7030 	if (sc->sc_phytype == WMPHY_82577) {
   7031 		/* XXX must write */
   7032 	}
   7033 
   7034 	/* Page 800 works differently than the rest so it has its own func */
   7035 	if (page == BM_WUC_PAGE) {
   7036 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   7037 		return val;
   7038 	}
   7039 
   7040 	/*
   7041 	 * Lower than page 768 works differently than the rest so it has its
   7042 	 * own func
   7043 	 */
   7044 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   7045 		printf("gmii_hv_readreg!!!\n");
   7046 		return 0;
   7047 	}
   7048 
   7049 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   7050 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7051 		    page << BME1000_PAGE_SHIFT);
   7052 	}
   7053 
   7054 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   7055 	wm_put_swfwhw_semaphore(sc);
   7056 	return rv;
   7057 }
   7058 
   7059 /*
   7060  * wm_gmii_hv_writereg:	[mii interface function]
   7061  *
   7062  *	Write a PHY register on the kumeran.
   7063  * This could be handled by the PHY layer if we didn't have to lock the
   7064  * ressource ...
   7065  */
   7066 static void
   7067 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   7068 {
   7069 	struct wm_softc *sc = device_private(self);
   7070 	uint16_t page = BM_PHY_REG_PAGE(reg);
   7071 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   7072 
   7073 	if (wm_get_swfwhw_semaphore(sc)) {
   7074 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7075 		    __func__);
   7076 		return;
   7077 	}
   7078 
   7079 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   7080 
   7081 	/* Page 800 works differently than the rest so it has its own func */
   7082 	if (page == BM_WUC_PAGE) {
   7083 		uint16_t tmp;
   7084 
   7085 		tmp = val;
   7086 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   7087 		return;
   7088 	}
   7089 
   7090 	/*
   7091 	 * Lower than page 768 works differently than the rest so it has its
   7092 	 * own func
   7093 	 */
   7094 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   7095 		printf("gmii_hv_writereg!!!\n");
   7096 		return;
   7097 	}
   7098 
   7099 	/*
   7100 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   7101 	 * Power Down (whenever bit 11 of the PHY control register is set)
   7102 	 */
   7103 
   7104 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   7105 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   7106 		    page << BME1000_PAGE_SHIFT);
   7107 	}
   7108 
   7109 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   7110 	wm_put_swfwhw_semaphore(sc);
   7111 }
   7112 
   7113 /*
   7114  * wm_sgmii_readreg:	[mii interface function]
   7115  *
   7116  *	Read a PHY register on the SGMII
   7117  * This could be handled by the PHY layer if we didn't have to lock the
   7118  * ressource ...
   7119  */
   7120 static int
   7121 wm_sgmii_readreg(device_t self, int phy, int reg)
   7122 {
   7123 	struct wm_softc *sc = device_private(self);
   7124 	uint32_t i2ccmd;
   7125 	int i, rv;
   7126 
   7127 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7128 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7129 		    __func__);
   7130 		return 0;
   7131 	}
   7132 
   7133 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7134 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7135 	    | I2CCMD_OPCODE_READ;
   7136 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7137 
   7138 	/* Poll the ready bit */
   7139 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7140 		delay(50);
   7141 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7142 		if (i2ccmd & I2CCMD_READY)
   7143 			break;
   7144 	}
   7145 	if ((i2ccmd & I2CCMD_READY) == 0)
   7146 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   7147 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7148 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7149 
   7150 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   7151 
   7152 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7153 	return rv;
   7154 }
   7155 
   7156 /*
   7157  * wm_sgmii_writereg:	[mii interface function]
   7158  *
   7159  *	Write a PHY register on the SGMII.
   7160  * This could be handled by the PHY layer if we didn't have to lock the
   7161  * ressource ...
   7162  */
   7163 static void
   7164 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   7165 {
   7166 	struct wm_softc *sc = device_private(self);
   7167 	uint32_t i2ccmd;
   7168 	int i;
   7169 
   7170 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7171 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7172 		    __func__);
   7173 		return;
   7174 	}
   7175 
   7176 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7177 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7178 	    | I2CCMD_OPCODE_WRITE;
   7179 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7180 
   7181 	/* Poll the ready bit */
   7182 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7183 		delay(50);
   7184 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7185 		if (i2ccmd & I2CCMD_READY)
   7186 			break;
   7187 	}
   7188 	if ((i2ccmd & I2CCMD_READY) == 0)
   7189 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   7190 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7191 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7192 
   7193 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   7194 }
   7195 
   7196 /*
   7197  * wm_gmii_82580_readreg:	[mii interface function]
   7198  *
   7199  *	Read a PHY register on the 82580 and I350.
   7200  * This could be handled by the PHY layer if we didn't have to lock the
   7201  * ressource ...
   7202  */
   7203 static int
   7204 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   7205 {
   7206 	struct wm_softc *sc = device_private(self);
   7207 	int sem;
   7208 	int rv;
   7209 
   7210 	sem = swfwphysem[sc->sc_funcid];
   7211 	if (wm_get_swfw_semaphore(sc, sem)) {
   7212 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7213 		    __func__);
   7214 		return 0;
   7215 	}
   7216 
   7217 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   7218 
   7219 	wm_put_swfw_semaphore(sc, sem);
   7220 	return rv;
   7221 }
   7222 
   7223 /*
   7224  * wm_gmii_82580_writereg:	[mii interface function]
   7225  *
   7226  *	Write a PHY register on the 82580 and I350.
   7227  * This could be handled by the PHY layer if we didn't have to lock the
   7228  * ressource ...
   7229  */
   7230 static void
   7231 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   7232 {
   7233 	struct wm_softc *sc = device_private(self);
   7234 	int sem;
   7235 
   7236 	sem = swfwphysem[sc->sc_funcid];
   7237 	if (wm_get_swfw_semaphore(sc, sem)) {
   7238 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7239 		    __func__);
   7240 		return;
   7241 	}
   7242 
   7243 	wm_gmii_i82544_writereg(self, phy, reg, val);
   7244 
   7245 	wm_put_swfw_semaphore(sc, sem);
   7246 }
   7247 
   7248 /*
   7249  * wm_gmii_statchg:	[mii interface function]
   7250  *
   7251  *	Callback from MII layer when media changes.
   7252  */
   7253 static void
   7254 wm_gmii_statchg(struct ifnet *ifp)
   7255 {
   7256 	struct wm_softc *sc = ifp->if_softc;
   7257 	struct mii_data *mii = &sc->sc_mii;
   7258 
   7259 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   7260 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7261 	sc->sc_fcrtl &= ~FCRTL_XONE;
   7262 
   7263 	/*
   7264 	 * Get flow control negotiation result.
   7265 	 */
   7266 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   7267 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   7268 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   7269 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   7270 	}
   7271 
   7272 	if (sc->sc_flowflags & IFM_FLOW) {
   7273 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   7274 			sc->sc_ctrl |= CTRL_TFCE;
   7275 			sc->sc_fcrtl |= FCRTL_XONE;
   7276 		}
   7277 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   7278 			sc->sc_ctrl |= CTRL_RFCE;
   7279 	}
   7280 
   7281 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7282 		DPRINTF(WM_DEBUG_LINK,
   7283 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   7284 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7285 	} else {
   7286 		DPRINTF(WM_DEBUG_LINK,
   7287 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   7288 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7289 	}
   7290 
   7291 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7292 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7293 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   7294 						 : WMREG_FCRTL, sc->sc_fcrtl);
   7295 	if (sc->sc_type == WM_T_80003) {
   7296 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   7297 		case IFM_1000_T:
   7298 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   7299 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   7300 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   7301 			break;
   7302 		default:
   7303 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   7304 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   7305 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   7306 			break;
   7307 		}
   7308 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7309 	}
   7310 }
   7311 
   7312 /*
   7313  * wm_kmrn_readreg:
   7314  *
   7315  *	Read a kumeran register
   7316  */
   7317 static int
   7318 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   7319 {
   7320 	int rv;
   7321 
   7322 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
   7323 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   7324 			aprint_error_dev(sc->sc_dev,
   7325 			    "%s: failed to get semaphore\n", __func__);
   7326 			return 0;
   7327 		}
   7328 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
   7329 		if (wm_get_swfwhw_semaphore(sc)) {
   7330 			aprint_error_dev(sc->sc_dev,
   7331 			    "%s: failed to get semaphore\n", __func__);
   7332 			return 0;
   7333 		}
   7334 	}
   7335 
   7336 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   7337 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   7338 	    KUMCTRLSTA_REN);
   7339 	delay(2);
   7340 
   7341 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   7342 
   7343 	if (sc->sc_flags == WM_F_SWFW_SYNC)
   7344 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7345 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
   7346 		wm_put_swfwhw_semaphore(sc);
   7347 
   7348 	return rv;
   7349 }
   7350 
   7351 /*
   7352  * wm_kmrn_writereg:
   7353  *
   7354  *	Write a kumeran register
   7355  */
   7356 static void
   7357 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   7358 {
   7359 
   7360 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
   7361 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   7362 			aprint_error_dev(sc->sc_dev,
   7363 			    "%s: failed to get semaphore\n", __func__);
   7364 			return;
   7365 		}
   7366 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
   7367 		if (wm_get_swfwhw_semaphore(sc)) {
   7368 			aprint_error_dev(sc->sc_dev,
   7369 			    "%s: failed to get semaphore\n", __func__);
   7370 			return;
   7371 		}
   7372 	}
   7373 
   7374 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   7375 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   7376 	    (val & KUMCTRLSTA_MASK));
   7377 
   7378 	if (sc->sc_flags == WM_F_SWFW_SYNC)
   7379 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7380 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
   7381 		wm_put_swfwhw_semaphore(sc);
   7382 }
   7383 
   7384 static int
   7385 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
   7386 {
   7387 	uint32_t eecd = 0;
   7388 
   7389 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   7390 	    || sc->sc_type == WM_T_82583) {
   7391 		eecd = CSR_READ(sc, WMREG_EECD);
   7392 
   7393 		/* Isolate bits 15 & 16 */
   7394 		eecd = ((eecd >> 15) & 0x03);
   7395 
   7396 		/* If both bits are set, device is Flash type */
   7397 		if (eecd == 0x03)
   7398 			return 0;
   7399 	}
   7400 	return 1;
   7401 }
   7402 
   7403 static int
   7404 wm_get_swsm_semaphore(struct wm_softc *sc)
   7405 {
   7406 	int32_t timeout;
   7407 	uint32_t swsm;
   7408 
   7409 	/* Get the FW semaphore. */
   7410 	timeout = 1000 + 1; /* XXX */
   7411 	while (timeout) {
   7412 		swsm = CSR_READ(sc, WMREG_SWSM);
   7413 		swsm |= SWSM_SWESMBI;
   7414 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   7415 		/* if we managed to set the bit we got the semaphore. */
   7416 		swsm = CSR_READ(sc, WMREG_SWSM);
   7417 		if (swsm & SWSM_SWESMBI)
   7418 			break;
   7419 
   7420 		delay(50);
   7421 		timeout--;
   7422 	}
   7423 
   7424 	if (timeout == 0) {
   7425 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
   7426 		/* Release semaphores */
   7427 		wm_put_swsm_semaphore(sc);
   7428 		return 1;
   7429 	}
   7430 	return 0;
   7431 }
   7432 
   7433 static void
   7434 wm_put_swsm_semaphore(struct wm_softc *sc)
   7435 {
   7436 	uint32_t swsm;
   7437 
   7438 	swsm = CSR_READ(sc, WMREG_SWSM);
   7439 	swsm &= ~(SWSM_SWESMBI);
   7440 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   7441 }
   7442 
   7443 static int
   7444 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   7445 {
   7446 	uint32_t swfw_sync;
   7447 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   7448 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   7449 	int timeout = 200;
   7450 
   7451 	for (timeout = 0; timeout < 200; timeout++) {
   7452 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
   7453 			if (wm_get_swsm_semaphore(sc)) {
   7454 				aprint_error_dev(sc->sc_dev,
   7455 				    "%s: failed to get semaphore\n",
   7456 				    __func__);
   7457 				return 1;
   7458 			}
   7459 		}
   7460 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   7461 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   7462 			swfw_sync |= swmask;
   7463 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   7464 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   7465 				wm_put_swsm_semaphore(sc);
   7466 			return 0;
   7467 		}
   7468 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   7469 			wm_put_swsm_semaphore(sc);
   7470 		delay(5000);
   7471 	}
   7472 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   7473 	    device_xname(sc->sc_dev), mask, swfw_sync);
   7474 	return 1;
   7475 }
   7476 
   7477 static void
   7478 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   7479 {
   7480 	uint32_t swfw_sync;
   7481 
   7482 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
   7483 		while (wm_get_swsm_semaphore(sc) != 0)
   7484 			continue;
   7485 	}
   7486 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   7487 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   7488 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   7489 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   7490 		wm_put_swsm_semaphore(sc);
   7491 }
   7492 
   7493 static int
   7494 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   7495 {
   7496 	uint32_t ext_ctrl;
   7497 	int timeout = 200;
   7498 
   7499 	for (timeout = 0; timeout < 200; timeout++) {
   7500 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   7501 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
   7502 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   7503 
   7504 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   7505 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
   7506 			return 0;
   7507 		delay(5000);
   7508 	}
   7509 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   7510 	    device_xname(sc->sc_dev), ext_ctrl);
   7511 	return 1;
   7512 }
   7513 
   7514 static void
   7515 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   7516 {
   7517 	uint32_t ext_ctrl;
   7518 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   7519 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
   7520 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   7521 }
   7522 
   7523 static int
   7524 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   7525 {
   7526 	int i = 0;
   7527 	uint32_t reg;
   7528 
   7529 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   7530 	do {
   7531 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   7532 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   7533 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   7534 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   7535 			break;
   7536 		delay(2*1000);
   7537 		i++;
   7538 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   7539 
   7540 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   7541 		wm_put_hw_semaphore_82573(sc);
   7542 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   7543 		    device_xname(sc->sc_dev));
   7544 		return -1;
   7545 	}
   7546 
   7547 	return 0;
   7548 }
   7549 
   7550 static void
   7551 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   7552 {
   7553 	uint32_t reg;
   7554 
   7555 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   7556 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   7557 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   7558 }
   7559 
   7560 static int
   7561 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   7562 {
   7563 	uint32_t eecd;
   7564 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   7565 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   7566 	uint8_t sig_byte = 0;
   7567 
   7568 	switch (sc->sc_type) {
   7569 	case WM_T_ICH8:
   7570 	case WM_T_ICH9:
   7571 		eecd = CSR_READ(sc, WMREG_EECD);
   7572 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   7573 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   7574 			return 0;
   7575 		}
   7576 		/* FALLTHROUGH */
   7577 	default:
   7578 		/* Default to 0 */
   7579 		*bank = 0;
   7580 
   7581 		/* Check bank 0 */
   7582 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   7583 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   7584 			*bank = 0;
   7585 			return 0;
   7586 		}
   7587 
   7588 		/* Check bank 1 */
   7589 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   7590 		    &sig_byte);
   7591 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   7592 			*bank = 1;
   7593 			return 0;
   7594 		}
   7595 	}
   7596 
   7597 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   7598 		device_xname(sc->sc_dev)));
   7599 	return -1;
   7600 }
   7601 
   7602 /******************************************************************************
   7603  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   7604  * register.
   7605  *
   7606  * sc - Struct containing variables accessed by shared code
   7607  * offset - offset of word in the EEPROM to read
   7608  * data - word read from the EEPROM
   7609  * words - number of words to read
   7610  *****************************************************************************/
   7611 static int
   7612 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   7613 {
   7614 	int32_t  error = 0;
   7615 	uint32_t flash_bank = 0;
   7616 	uint32_t act_offset = 0;
   7617 	uint32_t bank_offset = 0;
   7618 	uint16_t word = 0;
   7619 	uint16_t i = 0;
   7620 
   7621 	/* We need to know which is the valid flash bank.  In the event
   7622 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   7623 	 * managing flash_bank.  So it cannot be trusted and needs
   7624 	 * to be updated with each read.
   7625 	 */
   7626 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
   7627 	if (error) {
   7628 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
   7629 		    __func__);
   7630 		flash_bank = 0;
   7631 	}
   7632 
   7633 	/*
   7634 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   7635 	 * size
   7636 	 */
   7637 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   7638 
   7639 	error = wm_get_swfwhw_semaphore(sc);
   7640 	if (error) {
   7641 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7642 		    __func__);
   7643 		return error;
   7644 	}
   7645 
   7646 	for (i = 0; i < words; i++) {
   7647 		/* The NVM part needs a byte offset, hence * 2 */
   7648 		act_offset = bank_offset + ((offset + i) * 2);
   7649 		error = wm_read_ich8_word(sc, act_offset, &word);
   7650 		if (error) {
   7651 			aprint_error_dev(sc->sc_dev,
   7652 			    "%s: failed to read NVM\n", __func__);
   7653 			break;
   7654 		}
   7655 		data[i] = word;
   7656 	}
   7657 
   7658 	wm_put_swfwhw_semaphore(sc);
   7659 	return error;
   7660 }
   7661 
   7662 /******************************************************************************
   7663  * This function does initial flash setup so that a new read/write/erase cycle
   7664  * can be started.
   7665  *
   7666  * sc - The pointer to the hw structure
   7667  ****************************************************************************/
   7668 static int32_t
   7669 wm_ich8_cycle_init(struct wm_softc *sc)
   7670 {
   7671 	uint16_t hsfsts;
   7672 	int32_t error = 1;
   7673 	int32_t i     = 0;
   7674 
   7675 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7676 
   7677 	/* May be check the Flash Des Valid bit in Hw status */
   7678 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   7679 		return error;
   7680 	}
   7681 
   7682 	/* Clear FCERR in Hw status by writing 1 */
   7683 	/* Clear DAEL in Hw status by writing a 1 */
   7684 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   7685 
   7686 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   7687 
   7688 	/*
   7689 	 * Either we should have a hardware SPI cycle in progress bit to check
   7690 	 * against, in order to start a new cycle or FDONE bit should be
   7691 	 * changed in the hardware so that it is 1 after harware reset, which
   7692 	 * can then be used as an indication whether a cycle is in progress or
   7693 	 * has been completed .. we should also have some software semaphore
   7694 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   7695 	 * threads access to those bits can be sequentiallized or a way so that
   7696 	 * 2 threads dont start the cycle at the same time
   7697 	 */
   7698 
   7699 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   7700 		/*
   7701 		 * There is no cycle running at present, so we can start a
   7702 		 * cycle
   7703 		 */
   7704 
   7705 		/* Begin by setting Flash Cycle Done. */
   7706 		hsfsts |= HSFSTS_DONE;
   7707 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   7708 		error = 0;
   7709 	} else {
   7710 		/*
   7711 		 * otherwise poll for sometime so the current cycle has a
   7712 		 * chance to end before giving up.
   7713 		 */
   7714 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   7715 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7716 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   7717 				error = 0;
   7718 				break;
   7719 			}
   7720 			delay(1);
   7721 		}
   7722 		if (error == 0) {
   7723 			/*
   7724 			 * Successful in waiting for previous cycle to timeout,
   7725 			 * now set the Flash Cycle Done.
   7726 			 */
   7727 			hsfsts |= HSFSTS_DONE;
   7728 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   7729 		}
   7730 	}
   7731 	return error;
   7732 }
   7733 
   7734 /******************************************************************************
   7735  * This function starts a flash cycle and waits for its completion
   7736  *
   7737  * sc - The pointer to the hw structure
   7738  ****************************************************************************/
   7739 static int32_t
   7740 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   7741 {
   7742 	uint16_t hsflctl;
   7743 	uint16_t hsfsts;
   7744 	int32_t error = 1;
   7745 	uint32_t i = 0;
   7746 
   7747 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   7748 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   7749 	hsflctl |= HSFCTL_GO;
   7750 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   7751 
   7752 	/* wait till FDONE bit is set to 1 */
   7753 	do {
   7754 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7755 		if (hsfsts & HSFSTS_DONE)
   7756 			break;
   7757 		delay(1);
   7758 		i++;
   7759 	} while (i < timeout);
   7760 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   7761 		error = 0;
   7762 
   7763 	return error;
   7764 }
   7765 
   7766 /******************************************************************************
   7767  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   7768  *
   7769  * sc - The pointer to the hw structure
   7770  * index - The index of the byte or word to read.
   7771  * size - Size of data to read, 1=byte 2=word
   7772  * data - Pointer to the word to store the value read.
   7773  *****************************************************************************/
   7774 static int32_t
   7775 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   7776     uint32_t size, uint16_t* data)
   7777 {
   7778 	uint16_t hsfsts;
   7779 	uint16_t hsflctl;
   7780 	uint32_t flash_linear_address;
   7781 	uint32_t flash_data = 0;
   7782 	int32_t error = 1;
   7783 	int32_t count = 0;
   7784 
   7785 	if (size < 1  || size > 2 || data == 0x0 ||
   7786 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   7787 		return error;
   7788 
   7789 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   7790 	    sc->sc_ich8_flash_base;
   7791 
   7792 	do {
   7793 		delay(1);
   7794 		/* Steps */
   7795 		error = wm_ich8_cycle_init(sc);
   7796 		if (error)
   7797 			break;
   7798 
   7799 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   7800 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   7801 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   7802 		    & HSFCTL_BCOUNT_MASK;
   7803 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   7804 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   7805 
   7806 		/*
   7807 		 * Write the last 24 bits of index into Flash Linear address
   7808 		 * field in Flash Address
   7809 		 */
   7810 		/* TODO: TBD maybe check the index against the size of flash */
   7811 
   7812 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   7813 
   7814 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   7815 
   7816 		/*
   7817 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   7818 		 * the whole sequence a few more times, else read in (shift in)
   7819 		 * the Flash Data0, the order is least significant byte first
   7820 		 * msb to lsb
   7821 		 */
   7822 		if (error == 0) {
   7823 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   7824 			if (size == 1)
   7825 				*data = (uint8_t)(flash_data & 0x000000FF);
   7826 			else if (size == 2)
   7827 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   7828 			break;
   7829 		} else {
   7830 			/*
   7831 			 * If we've gotten here, then things are probably
   7832 			 * completely hosed, but if the error condition is
   7833 			 * detected, it won't hurt to give it another try...
   7834 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   7835 			 */
   7836 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7837 			if (hsfsts & HSFSTS_ERR) {
   7838 				/* Repeat for some time before giving up. */
   7839 				continue;
   7840 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   7841 				break;
   7842 		}
   7843 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   7844 
   7845 	return error;
   7846 }
   7847 
   7848 /******************************************************************************
   7849  * Reads a single byte from the NVM using the ICH8 flash access registers.
   7850  *
   7851  * sc - pointer to wm_hw structure
   7852  * index - The index of the byte to read.
   7853  * data - Pointer to a byte to store the value read.
   7854  *****************************************************************************/
   7855 static int32_t
   7856 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   7857 {
   7858 	int32_t status;
   7859 	uint16_t word = 0;
   7860 
   7861 	status = wm_read_ich8_data(sc, index, 1, &word);
   7862 	if (status == 0)
   7863 		*data = (uint8_t)word;
   7864 	else
   7865 		*data = 0;
   7866 
   7867 	return status;
   7868 }
   7869 
   7870 /******************************************************************************
   7871  * Reads a word from the NVM using the ICH8 flash access registers.
   7872  *
   7873  * sc - pointer to wm_hw structure
   7874  * index - The starting byte index of the word to read.
   7875  * data - Pointer to a word to store the value read.
   7876  *****************************************************************************/
   7877 static int32_t
   7878 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   7879 {
   7880 	int32_t status;
   7881 
   7882 	status = wm_read_ich8_data(sc, index, 2, data);
   7883 	return status;
   7884 }
   7885 
   7886 static int
   7887 wm_check_mng_mode(struct wm_softc *sc)
   7888 {
   7889 	int rv;
   7890 
   7891 	switch (sc->sc_type) {
   7892 	case WM_T_ICH8:
   7893 	case WM_T_ICH9:
   7894 	case WM_T_ICH10:
   7895 	case WM_T_PCH:
   7896 	case WM_T_PCH2:
   7897 	case WM_T_PCH_LPT:
   7898 		rv = wm_check_mng_mode_ich8lan(sc);
   7899 		break;
   7900 	case WM_T_82574:
   7901 	case WM_T_82583:
   7902 		rv = wm_check_mng_mode_82574(sc);
   7903 		break;
   7904 	case WM_T_82571:
   7905 	case WM_T_82572:
   7906 	case WM_T_82573:
   7907 	case WM_T_80003:
   7908 		rv = wm_check_mng_mode_generic(sc);
   7909 		break;
   7910 	default:
   7911 		/* noting to do */
   7912 		rv = 0;
   7913 		break;
   7914 	}
   7915 
   7916 	return rv;
   7917 }
   7918 
   7919 static int
   7920 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   7921 {
   7922 	uint32_t fwsm;
   7923 
   7924 	fwsm = CSR_READ(sc, WMREG_FWSM);
   7925 
   7926 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   7927 		return 1;
   7928 
   7929 	return 0;
   7930 }
   7931 
   7932 static int
   7933 wm_check_mng_mode_82574(struct wm_softc *sc)
   7934 {
   7935 	uint16_t data;
   7936 
   7937 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
   7938 
   7939 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
   7940 		return 1;
   7941 
   7942 	return 0;
   7943 }
   7944 
   7945 static int
   7946 wm_check_mng_mode_generic(struct wm_softc *sc)
   7947 {
   7948 	uint32_t fwsm;
   7949 
   7950 	fwsm = CSR_READ(sc, WMREG_FWSM);
   7951 
   7952 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   7953 		return 1;
   7954 
   7955 	return 0;
   7956 }
   7957 
   7958 static int
   7959 wm_enable_mng_pass_thru(struct wm_softc *sc)
   7960 {
   7961 	uint32_t manc, fwsm, factps;
   7962 
   7963 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   7964 		return 0;
   7965 
   7966 	manc = CSR_READ(sc, WMREG_MANC);
   7967 
   7968 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   7969 		device_xname(sc->sc_dev), manc));
   7970 	if ((manc & MANC_RECV_TCO_EN) == 0)
   7971 		return 0;
   7972 
   7973 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   7974 		fwsm = CSR_READ(sc, WMREG_FWSM);
   7975 		factps = CSR_READ(sc, WMREG_FACTPS);
   7976 		if (((factps & FACTPS_MNGCG) == 0)
   7977 		    && ((fwsm & FWSM_MODE_MASK)
   7978 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   7979 			return 1;
   7980 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   7981 		uint16_t data;
   7982 
   7983 		factps = CSR_READ(sc, WMREG_FACTPS);
   7984 		wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
   7985 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   7986 			device_xname(sc->sc_dev), factps, data));
   7987 		if (((factps & FACTPS_MNGCG) == 0)
   7988 		    && ((data & EEPROM_CFG2_MNGM_MASK)
   7989 			== (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
   7990 			return 1;
   7991 	} else if (((manc & MANC_SMBUS_EN) != 0)
   7992 	    && ((manc & MANC_ASF_EN) == 0))
   7993 		return 1;
   7994 
   7995 	return 0;
   7996 }
   7997 
   7998 static int
   7999 wm_check_reset_block(struct wm_softc *sc)
   8000 {
   8001 	uint32_t reg;
   8002 
   8003 	switch (sc->sc_type) {
   8004 	case WM_T_ICH8:
   8005 	case WM_T_ICH9:
   8006 	case WM_T_ICH10:
   8007 	case WM_T_PCH:
   8008 	case WM_T_PCH2:
   8009 	case WM_T_PCH_LPT:
   8010 		reg = CSR_READ(sc, WMREG_FWSM);
   8011 		if ((reg & FWSM_RSPCIPHY) != 0)
   8012 			return 0;
   8013 		else
   8014 			return -1;
   8015 		break;
   8016 	case WM_T_82571:
   8017 	case WM_T_82572:
   8018 	case WM_T_82573:
   8019 	case WM_T_82574:
   8020 	case WM_T_82583:
   8021 	case WM_T_80003:
   8022 		reg = CSR_READ(sc, WMREG_MANC);
   8023 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   8024 			return -1;
   8025 		else
   8026 			return 0;
   8027 		break;
   8028 	default:
   8029 		/* no problem */
   8030 		break;
   8031 	}
   8032 
   8033 	return 0;
   8034 }
   8035 
   8036 static void
   8037 wm_get_hw_control(struct wm_softc *sc)
   8038 {
   8039 	uint32_t reg;
   8040 
   8041 	switch (sc->sc_type) {
   8042 	case WM_T_82573:
   8043 		reg = CSR_READ(sc, WMREG_SWSM);
   8044 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   8045 		break;
   8046 	case WM_T_82571:
   8047 	case WM_T_82572:
   8048 	case WM_T_82574:
   8049 	case WM_T_82583:
   8050 	case WM_T_80003:
   8051 	case WM_T_ICH8:
   8052 	case WM_T_ICH9:
   8053 	case WM_T_ICH10:
   8054 	case WM_T_PCH:
   8055 	case WM_T_PCH2:
   8056 	case WM_T_PCH_LPT:
   8057 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8058 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   8059 		break;
   8060 	default:
   8061 		break;
   8062 	}
   8063 }
   8064 
   8065 static void
   8066 wm_release_hw_control(struct wm_softc *sc)
   8067 {
   8068 	uint32_t reg;
   8069 
   8070 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   8071 		return;
   8072 
   8073 	if (sc->sc_type == WM_T_82573) {
   8074 		reg = CSR_READ(sc, WMREG_SWSM);
   8075 		reg &= ~SWSM_DRV_LOAD;
   8076 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   8077 	} else {
   8078 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8079 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   8080 	}
   8081 }
   8082 
   8083 /* XXX Currently TBI only */
   8084 static int
   8085 wm_check_for_link(struct wm_softc *sc)
   8086 {
   8087 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8088 	uint32_t rxcw;
   8089 	uint32_t ctrl;
   8090 	uint32_t status;
   8091 	uint32_t sig;
   8092 
   8093 	rxcw = CSR_READ(sc, WMREG_RXCW);
   8094 	ctrl = CSR_READ(sc, WMREG_CTRL);
   8095 	status = CSR_READ(sc, WMREG_STATUS);
   8096 
   8097 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   8098 
   8099 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   8100 		device_xname(sc->sc_dev), __func__,
   8101 		((ctrl & CTRL_SWDPIN(1)) == sig),
   8102 		((status & STATUS_LU) != 0),
   8103 		((rxcw & RXCW_C) != 0)
   8104 		    ));
   8105 
   8106 	/*
   8107 	 * SWDPIN   LU RXCW
   8108 	 *      0    0    0
   8109 	 *      0    0    1	(should not happen)
   8110 	 *      0    1    0	(should not happen)
   8111 	 *      0    1    1	(should not happen)
   8112 	 *      1    0    0	Disable autonego and force linkup
   8113 	 *      1    0    1	got /C/ but not linkup yet
   8114 	 *      1    1    0	(linkup)
   8115 	 *      1    1    1	If IFM_AUTO, back to autonego
   8116 	 *
   8117 	 */
   8118 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   8119 	    && ((status & STATUS_LU) == 0)
   8120 	    && ((rxcw & RXCW_C) == 0)) {
   8121 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   8122 			__func__));
   8123 		sc->sc_tbi_linkup = 0;
   8124 		/* Disable auto-negotiation in the TXCW register */
   8125 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   8126 
   8127 		/*
   8128 		 * Force link-up and also force full-duplex.
   8129 		 *
   8130 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   8131 		 * so we should update sc->sc_ctrl
   8132 		 */
   8133 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   8134 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8135 	} else if (((status & STATUS_LU) != 0)
   8136 	    && ((rxcw & RXCW_C) != 0)
   8137 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   8138 		sc->sc_tbi_linkup = 1;
   8139 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   8140 			__func__));
   8141 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   8142 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   8143 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   8144 	    && ((rxcw & RXCW_C) != 0)) {
   8145 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   8146 	} else {
   8147 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   8148 			status));
   8149 	}
   8150 
   8151 	return 0;
   8152 }
   8153 
   8154 /* Work-around for 82566 Kumeran PCS lock loss */
   8155 static void
   8156 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   8157 {
   8158 	int miistatus, active, i;
   8159 	int reg;
   8160 
   8161 	miistatus = sc->sc_mii.mii_media_status;
   8162 
   8163 	/* If the link is not up, do nothing */
   8164 	if ((miistatus & IFM_ACTIVE) != 0)
   8165 		return;
   8166 
   8167 	active = sc->sc_mii.mii_media_active;
   8168 
   8169 	/* Nothing to do if the link is other than 1Gbps */
   8170 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   8171 		return;
   8172 
   8173 	for (i = 0; i < 10; i++) {
   8174 		/* read twice */
   8175 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   8176 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   8177 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
   8178 			goto out;	/* GOOD! */
   8179 
   8180 		/* Reset the PHY */
   8181 		wm_gmii_reset(sc);
   8182 		delay(5*1000);
   8183 	}
   8184 
   8185 	/* Disable GigE link negotiation */
   8186 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   8187 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   8188 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   8189 
   8190 	/*
   8191 	 * Call gig speed drop workaround on Gig disable before accessing
   8192 	 * any PHY registers.
   8193 	 */
   8194 	wm_gig_downshift_workaround_ich8lan(sc);
   8195 
   8196 out:
   8197 	return;
   8198 }
   8199 
   8200 /* WOL from S5 stops working */
   8201 static void
   8202 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   8203 {
   8204 	uint16_t kmrn_reg;
   8205 
   8206 	/* Only for igp3 */
   8207 	if (sc->sc_phytype == WMPHY_IGP_3) {
   8208 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   8209 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   8210 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   8211 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   8212 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   8213 	}
   8214 }
   8215 
   8216 #ifdef WM_WOL
   8217 /* Power down workaround on D3 */
   8218 static void
   8219 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   8220 {
   8221 	uint32_t reg;
   8222 	int i;
   8223 
   8224 	for (i = 0; i < 2; i++) {
   8225 		/* Disable link */
   8226 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   8227 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   8228 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   8229 
   8230 		/*
   8231 		 * Call gig speed drop workaround on Gig disable before
   8232 		 * accessing any PHY registers
   8233 		 */
   8234 		if (sc->sc_type == WM_T_ICH8)
   8235 			wm_gig_downshift_workaround_ich8lan(sc);
   8236 
   8237 		/* Write VR power-down enable */
   8238 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   8239 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   8240 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   8241 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   8242 
   8243 		/* Read it back and test */
   8244 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   8245 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   8246 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   8247 			break;
   8248 
   8249 		/* Issue PHY reset and repeat at most one more time */
   8250 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8251 	}
   8252 }
   8253 #endif /* WM_WOL */
   8254 
   8255 /*
   8256  * Workaround for pch's PHYs
   8257  * XXX should be moved to new PHY driver?
   8258  */
   8259 static void
   8260 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   8261 {
   8262 	if (sc->sc_phytype == WMPHY_82577)
   8263 		wm_set_mdio_slow_mode_hv(sc);
   8264 
   8265 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   8266 
   8267 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   8268 
   8269 	/* 82578 */
   8270 	if (sc->sc_phytype == WMPHY_82578) {
   8271 		/* PCH rev. < 3 */
   8272 		if (sc->sc_rev < 3) {
   8273 			/* XXX 6 bit shift? Why? Is it page2? */
   8274 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   8275 			    0x66c0);
   8276 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   8277 			    0xffff);
   8278 		}
   8279 
   8280 		/* XXX phy rev. < 2 */
   8281 	}
   8282 
   8283 	/* Select page 0 */
   8284 
   8285 	/* XXX acquire semaphore */
   8286 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   8287 	/* XXX release semaphore */
   8288 
   8289 	/*
   8290 	 * Configure the K1 Si workaround during phy reset assuming there is
   8291 	 * link so that it disables K1 if link is in 1Gbps.
   8292 	 */
   8293 	wm_k1_gig_workaround_hv(sc, 1);
   8294 }
   8295 
   8296 static void
   8297 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   8298 {
   8299 
   8300 	wm_set_mdio_slow_mode_hv(sc);
   8301 }
   8302 
   8303 static void
   8304 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   8305 {
   8306 	int k1_enable = sc->sc_nvm_k1_enabled;
   8307 
   8308 	/* XXX acquire semaphore */
   8309 
   8310 	if (link) {
   8311 		k1_enable = 0;
   8312 
   8313 		/* Link stall fix for link up */
   8314 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   8315 	} else {
   8316 		/* Link stall fix for link down */
   8317 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   8318 	}
   8319 
   8320 	wm_configure_k1_ich8lan(sc, k1_enable);
   8321 
   8322 	/* XXX release semaphore */
   8323 }
   8324 
   8325 static void
   8326 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   8327 {
   8328 	uint32_t reg;
   8329 
   8330 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   8331 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   8332 	    reg | HV_KMRN_MDIO_SLOW);
   8333 }
   8334 
   8335 static void
   8336 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   8337 {
   8338 	uint32_t ctrl, ctrl_ext, tmp;
   8339 	uint16_t kmrn_reg;
   8340 
   8341 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   8342 
   8343 	if (k1_enable)
   8344 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   8345 	else
   8346 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   8347 
   8348 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   8349 
   8350 	delay(20);
   8351 
   8352 	ctrl = CSR_READ(sc, WMREG_CTRL);
   8353 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8354 
   8355 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   8356 	tmp |= CTRL_FRCSPD;
   8357 
   8358 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   8359 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   8360 	delay(20);
   8361 
   8362 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   8363 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8364 	delay(20);
   8365 }
   8366 
   8367 static void
   8368 wm_smbustopci(struct wm_softc *sc)
   8369 {
   8370 	uint32_t fwsm;
   8371 
   8372 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8373 	if (((fwsm & FWSM_FW_VALID) == 0)
   8374 	    && ((wm_check_reset_block(sc) == 0))) {
   8375 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   8376 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   8377 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8378 		delay(10);
   8379 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   8380 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8381 		delay(50*1000);
   8382 
   8383 		/*
   8384 		 * Gate automatic PHY configuration by hardware on non-managed
   8385 		 * 82579
   8386 		 */
   8387 		if (sc->sc_type == WM_T_PCH2)
   8388 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   8389 	}
   8390 }
   8391 
   8392 static void
   8393 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   8394 {
   8395 	uint32_t gcr;
   8396 	pcireg_t ctrl2;
   8397 
   8398 	gcr = CSR_READ(sc, WMREG_GCR);
   8399 
   8400 	/* Only take action if timeout value is defaulted to 0 */
   8401 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   8402 		goto out;
   8403 
   8404 	if ((gcr & GCR_CAP_VER2) == 0) {
   8405 		gcr |= GCR_CMPL_TMOUT_10MS;
   8406 		goto out;
   8407 	}
   8408 
   8409 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   8410 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   8411 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   8412 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   8413 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   8414 
   8415 out:
   8416 	/* Disable completion timeout resend */
   8417 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   8418 
   8419 	CSR_WRITE(sc, WMREG_GCR, gcr);
   8420 }
   8421 
   8422 /* special case - for 82575 - need to do manual init ... */
   8423 static void
   8424 wm_reset_init_script_82575(struct wm_softc *sc)
   8425 {
   8426 	/*
   8427 	 * remark: this is untested code - we have no board without EEPROM
   8428 	 *  same setup as mentioned int the freeBSD driver for the i82575
   8429 	 */
   8430 
   8431 	/* SerDes configuration via SERDESCTRL */
   8432 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   8433 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   8434 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   8435 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   8436 
   8437 	/* CCM configuration via CCMCTL register */
   8438 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   8439 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   8440 
   8441 	/* PCIe lanes configuration */
   8442 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   8443 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   8444 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   8445 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   8446 
   8447 	/* PCIe PLL Configuration */
   8448 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   8449 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   8450 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   8451 }
   8452 
   8453 static void
   8454 wm_init_manageability(struct wm_softc *sc)
   8455 {
   8456 
   8457 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   8458 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   8459 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   8460 
   8461 		/* disabl hardware interception of ARP */
   8462 		manc &= ~MANC_ARP_EN;
   8463 
   8464 		/* enable receiving management packets to the host */
   8465 		if (sc->sc_type >= WM_T_82571) {
   8466 			manc |= MANC_EN_MNG2HOST;
   8467 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   8468 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   8469 
   8470 		}
   8471 
   8472 		CSR_WRITE(sc, WMREG_MANC, manc);
   8473 	}
   8474 }
   8475 
   8476 static void
   8477 wm_release_manageability(struct wm_softc *sc)
   8478 {
   8479 
   8480 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   8481 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   8482 
   8483 		manc |= MANC_ARP_EN;
   8484 		if (sc->sc_type >= WM_T_82571)
   8485 			manc &= ~MANC_EN_MNG2HOST;
   8486 
   8487 		CSR_WRITE(sc, WMREG_MANC, manc);
   8488 	}
   8489 }
   8490 
   8491 static void
   8492 wm_get_wakeup(struct wm_softc *sc)
   8493 {
   8494 
   8495 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   8496 	switch (sc->sc_type) {
   8497 	case WM_T_82573:
   8498 	case WM_T_82583:
   8499 		sc->sc_flags |= WM_F_HAS_AMT;
   8500 		/* FALLTHROUGH */
   8501 	case WM_T_80003:
   8502 	case WM_T_82541:
   8503 	case WM_T_82547:
   8504 	case WM_T_82571:
   8505 	case WM_T_82572:
   8506 	case WM_T_82574:
   8507 	case WM_T_82575:
   8508 	case WM_T_82576:
   8509 	case WM_T_82580:
   8510 	case WM_T_82580ER:
   8511 	case WM_T_I350:
   8512 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   8513 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   8514 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   8515 		break;
   8516 	case WM_T_ICH8:
   8517 	case WM_T_ICH9:
   8518 	case WM_T_ICH10:
   8519 	case WM_T_PCH:
   8520 	case WM_T_PCH2:
   8521 	case WM_T_PCH_LPT:
   8522 		sc->sc_flags |= WM_F_HAS_AMT;
   8523 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   8524 		break;
   8525 	default:
   8526 		break;
   8527 	}
   8528 
   8529 	/* 1: HAS_MANAGE */
   8530 	if (wm_enable_mng_pass_thru(sc) != 0)
   8531 		sc->sc_flags |= WM_F_HAS_MANAGE;
   8532 
   8533 #ifdef WM_DEBUG
   8534 	printf("\n");
   8535 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   8536 		printf("HAS_AMT,");
   8537 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   8538 		printf("ARC_SUBSYS_VALID,");
   8539 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   8540 		printf("ASF_FIRMWARE_PRES,");
   8541 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   8542 		printf("HAS_MANAGE,");
   8543 	printf("\n");
   8544 #endif
   8545 	/*
   8546 	 * Note that the WOL flags is set after the resetting of the eeprom
   8547 	 * stuff
   8548 	 */
   8549 }
   8550 
   8551 #ifdef WM_WOL
   8552 /* WOL in the newer chipset interfaces (pchlan) */
   8553 static void
   8554 wm_enable_phy_wakeup(struct wm_softc *sc)
   8555 {
   8556 #if 0
   8557 	uint16_t preg;
   8558 
   8559 	/* Copy MAC RARs to PHY RARs */
   8560 
   8561 	/* Copy MAC MTA to PHY MTA */
   8562 
   8563 	/* Configure PHY Rx Control register */
   8564 
   8565 	/* Enable PHY wakeup in MAC register */
   8566 
   8567 	/* Configure and enable PHY wakeup in PHY registers */
   8568 
   8569 	/* Activate PHY wakeup */
   8570 
   8571 	/* XXX */
   8572 #endif
   8573 }
   8574 
   8575 static void
   8576 wm_enable_wakeup(struct wm_softc *sc)
   8577 {
   8578 	uint32_t reg, pmreg;
   8579 	pcireg_t pmode;
   8580 
   8581 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   8582 		&pmreg, NULL) == 0)
   8583 		return;
   8584 
   8585 	/* Advertise the wakeup capability */
   8586 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   8587 	    | CTRL_SWDPIN(3));
   8588 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   8589 
   8590 	/* ICH workaround */
   8591 	switch (sc->sc_type) {
   8592 	case WM_T_ICH8:
   8593 	case WM_T_ICH9:
   8594 	case WM_T_ICH10:
   8595 	case WM_T_PCH:
   8596 	case WM_T_PCH2:
   8597 	case WM_T_PCH_LPT:
   8598 		/* Disable gig during WOL */
   8599 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   8600 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   8601 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   8602 		if (sc->sc_type == WM_T_PCH)
   8603 			wm_gmii_reset(sc);
   8604 
   8605 		/* Power down workaround */
   8606 		if (sc->sc_phytype == WMPHY_82577) {
   8607 			struct mii_softc *child;
   8608 
   8609 			/* Assume that the PHY is copper */
   8610 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   8611 			if (child->mii_mpd_rev <= 2)
   8612 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   8613 				    (768 << 5) | 25, 0x0444); /* magic num */
   8614 		}
   8615 		break;
   8616 	default:
   8617 		break;
   8618 	}
   8619 
   8620 	/* Keep the laser running on fiber adapters */
   8621 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
   8622 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
   8623 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8624 		reg |= CTRL_EXT_SWDPIN(3);
   8625 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8626 	}
   8627 
   8628 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   8629 #if 0	/* for the multicast packet */
   8630 	reg |= WUFC_MC;
   8631 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   8632 #endif
   8633 
   8634 	if (sc->sc_type == WM_T_PCH) {
   8635 		wm_enable_phy_wakeup(sc);
   8636 	} else {
   8637 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   8638 		CSR_WRITE(sc, WMREG_WUFC, reg);
   8639 	}
   8640 
   8641 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   8642 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   8643 		|| (sc->sc_type == WM_T_PCH2))
   8644 		    && (sc->sc_phytype == WMPHY_IGP_3))
   8645 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   8646 
   8647 	/* Request PME */
   8648 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   8649 #if 0
   8650 	/* Disable WOL */
   8651 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   8652 #else
   8653 	/* For WOL */
   8654 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   8655 #endif
   8656 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   8657 }
   8658 #endif /* WM_WOL */
   8659 
   8660 static bool
   8661 wm_suspend(device_t self, const pmf_qual_t *qual)
   8662 {
   8663 	struct wm_softc *sc = device_private(self);
   8664 
   8665 	wm_release_manageability(sc);
   8666 	wm_release_hw_control(sc);
   8667 #ifdef WM_WOL
   8668 	wm_enable_wakeup(sc);
   8669 #endif
   8670 
   8671 	return true;
   8672 }
   8673 
   8674 static bool
   8675 wm_resume(device_t self, const pmf_qual_t *qual)
   8676 {
   8677 	struct wm_softc *sc = device_private(self);
   8678 
   8679 	wm_init_manageability(sc);
   8680 
   8681 	return true;
   8682 }
   8683 
   8684 static void
   8685 wm_set_eee_i350(struct wm_softc * sc)
   8686 {
   8687 	uint32_t ipcnfg, eeer;
   8688 
   8689 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   8690 	eeer = CSR_READ(sc, WMREG_EEER);
   8691 
   8692 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   8693 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   8694 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   8695 		    | EEER_LPI_FC);
   8696 	} else {
   8697 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   8698 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   8699 		    | EEER_LPI_FC);
   8700 	}
   8701 
   8702 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   8703 	CSR_WRITE(sc, WMREG_EEER, eeer);
   8704 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   8705 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   8706 }
   8707