Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.282
      1 /*	$NetBSD: if_wm.c,v 1.282 2014/07/25 18:28:03 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Rework how parameters are loaded from the EEPROM.
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.282 2014/07/25 18:28:03 msaitoh Exp $");
     80 
     81 #include <sys/param.h>
     82 #include <sys/systm.h>
     83 #include <sys/callout.h>
     84 #include <sys/mbuf.h>
     85 #include <sys/malloc.h>
     86 #include <sys/kernel.h>
     87 #include <sys/socket.h>
     88 #include <sys/ioctl.h>
     89 #include <sys/errno.h>
     90 #include <sys/device.h>
     91 #include <sys/queue.h>
     92 #include <sys/syslog.h>
     93 
     94 #include <sys/rnd.h>
     95 
     96 #include <net/if.h>
     97 #include <net/if_dl.h>
     98 #include <net/if_media.h>
     99 #include <net/if_ether.h>
    100 
    101 #include <net/bpf.h>
    102 
    103 #include <netinet/in.h>			/* XXX for struct ip */
    104 #include <netinet/in_systm.h>		/* XXX for struct ip */
    105 #include <netinet/ip.h>			/* XXX for struct ip */
    106 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    107 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    108 
    109 #include <sys/bus.h>
    110 #include <sys/intr.h>
    111 #include <machine/endian.h>
    112 
    113 #include <dev/mii/mii.h>
    114 #include <dev/mii/miivar.h>
    115 #include <dev/mii/miidevs.h>
    116 #include <dev/mii/mii_bitbang.h>
    117 #include <dev/mii/ikphyreg.h>
    118 #include <dev/mii/igphyreg.h>
    119 #include <dev/mii/igphyvar.h>
    120 #include <dev/mii/inbmphyreg.h>
    121 
    122 #include <dev/pci/pcireg.h>
    123 #include <dev/pci/pcivar.h>
    124 #include <dev/pci/pcidevs.h>
    125 
    126 #include <dev/pci/if_wmreg.h>
    127 #include <dev/pci/if_wmvar.h>
    128 
    129 #ifdef WM_DEBUG
    130 #define	WM_DEBUG_LINK		0x01
    131 #define	WM_DEBUG_TX		0x02
    132 #define	WM_DEBUG_RX		0x04
    133 #define	WM_DEBUG_GMII		0x08
    134 #define	WM_DEBUG_MANAGE		0x10
    135 #define	WM_DEBUG_NVM		0x20
    136 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    137     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    138 
    139 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    140 #else
    141 #define	DPRINTF(x, y)	/* nothing */
    142 #endif /* WM_DEBUG */
    143 
    144 #ifdef NET_MPSAFE
    145 #define WM_MPSAFE	1
    146 #endif
    147 
    148 /*
    149  * Transmit descriptor list size.  Due to errata, we can only have
    150  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    151  * on >= 82544.  We tell the upper layers that they can queue a lot
    152  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    153  * of them at a time.
    154  *
    155  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    156  * chains containing many small mbufs have been observed in zero-copy
    157  * situations with jumbo frames.
    158  */
    159 #define	WM_NTXSEGS		256
    160 #define	WM_IFQUEUELEN		256
    161 #define	WM_TXQUEUELEN_MAX	64
    162 #define	WM_TXQUEUELEN_MAX_82547	16
    163 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
    164 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
    165 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
    166 #define	WM_NTXDESC_82542	256
    167 #define	WM_NTXDESC_82544	4096
    168 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
    169 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
    170 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
    171 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
    172 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
    173 
    174 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    175 
    176 /*
    177  * Receive descriptor list size.  We have one Rx buffer for normal
    178  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    179  * packet.  We allocate 256 receive descriptors, each with a 2k
    180  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    181  */
    182 #define	WM_NRXDESC		256
    183 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    184 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    185 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    186 
    187 /*
    188  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    189  * a single clump that maps to a single DMA segment to make several things
    190  * easier.
    191  */
    192 struct wm_control_data_82544 {
    193 	/*
    194 	 * The receive descriptors.
    195 	 */
    196 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    197 
    198 	/*
    199 	 * The transmit descriptors.  Put these at the end, because
    200 	 * we might use a smaller number of them.
    201 	 */
    202 	union {
    203 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
    204 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
    205 	} wdc_u;
    206 };
    207 
    208 struct wm_control_data_82542 {
    209 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    210 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
    211 };
    212 
    213 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
    214 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
    215 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    216 
    217 /*
    218  * Software state for transmit jobs.
    219  */
    220 struct wm_txsoft {
    221 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    222 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    223 	int txs_firstdesc;		/* first descriptor in packet */
    224 	int txs_lastdesc;		/* last descriptor in packet */
    225 	int txs_ndesc;			/* # of descriptors used */
    226 };
    227 
    228 /*
    229  * Software state for receive buffers.  Each descriptor gets a
    230  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    231  * more than one buffer, we chain them together.
    232  */
    233 struct wm_rxsoft {
    234 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    235 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    236 };
    237 
    238 #define WM_LINKUP_TIMEOUT	50
    239 
    240 static uint16_t swfwphysem[] = {
    241 	SWFW_PHY0_SM,
    242 	SWFW_PHY1_SM,
    243 	SWFW_PHY2_SM,
    244 	SWFW_PHY3_SM
    245 };
    246 
    247 /*
    248  * Software state per device.
    249  */
    250 struct wm_softc {
    251 	device_t sc_dev;		/* generic device information */
    252 	bus_space_tag_t sc_st;		/* bus space tag */
    253 	bus_space_handle_t sc_sh;	/* bus space handle */
    254 	bus_size_t sc_ss;		/* bus space size */
    255 	bus_space_tag_t sc_iot;		/* I/O space tag */
    256 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    257 	bus_size_t sc_ios;		/* I/O space size */
    258 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    259 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    260 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    261 
    262 	struct ethercom sc_ethercom;	/* ethernet common data */
    263 	struct mii_data sc_mii;		/* MII/media information */
    264 
    265 	pci_chipset_tag_t sc_pc;
    266 	pcitag_t sc_pcitag;
    267 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    268 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    269 
    270 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
    271 	wm_chip_type sc_type;		/* MAC type */
    272 	int sc_rev;			/* MAC revision */
    273 	wm_phy_type sc_phytype;		/* PHY type */
    274 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    275 	int sc_flags;			/* flags; see below */
    276 	int sc_if_flags;		/* last if_flags */
    277 	int sc_flowflags;		/* 802.3x flow control flags */
    278 	int sc_align_tweak;
    279 
    280 	void *sc_ih;			/* interrupt cookie */
    281 	callout_t sc_tick_ch;		/* tick callout */
    282 	bool sc_stopping;
    283 
    284 	int sc_ee_addrbits;		/* EEPROM address bits */
    285 	int sc_ich8_flash_base;
    286 	int sc_ich8_flash_bank_size;
    287 	int sc_nvm_k1_enabled;
    288 
    289 	/* Software state for the transmit and receive descriptors. */
    290 	int sc_txnum;			/* must be a power of two */
    291 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
    292 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    293 
    294 	/* Control data structures. */
    295 	int sc_ntxdesc;			/* must be a power of two */
    296 	struct wm_control_data_82544 *sc_control_data;
    297 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    298 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
    299 	int sc_cd_rseg;			/* real number of control segment */
    300 	size_t sc_cd_size;		/* control data size */
    301 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    302 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
    303 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
    304 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    305 
    306 #ifdef WM_EVENT_COUNTERS
    307 	/* Event counters. */
    308 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    309 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    310 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    311 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    312 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    313 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    314 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    315 
    316 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    317 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    318 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    319 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    320 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    321 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    322 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    323 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    324 
    325 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    326 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    327 
    328 	struct evcnt sc_ev_tu;		/* Tx underrun */
    329 
    330 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    331 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    332 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    333 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    334 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    335 #endif /* WM_EVENT_COUNTERS */
    336 
    337 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    338 
    339 	int	sc_txfree;		/* number of free Tx descriptors */
    340 	int	sc_txnext;		/* next ready Tx descriptor */
    341 
    342 	int	sc_txsfree;		/* number of free Tx jobs */
    343 	int	sc_txsnext;		/* next free Tx job */
    344 	int	sc_txsdirty;		/* dirty Tx jobs */
    345 
    346 	/* These 5 variables are used only on the 82547. */
    347 	int	sc_txfifo_size;		/* Tx FIFO size */
    348 	int	sc_txfifo_head;		/* current head of FIFO */
    349 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
    350 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
    351 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    352 
    353 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    354 
    355 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    356 	int	sc_rxdiscard;
    357 	int	sc_rxlen;
    358 	struct mbuf *sc_rxhead;
    359 	struct mbuf *sc_rxtail;
    360 	struct mbuf **sc_rxtailp;
    361 
    362 	uint32_t sc_ctrl;		/* prototype CTRL register */
    363 #if 0
    364 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    365 #endif
    366 	uint32_t sc_icr;		/* prototype interrupt bits */
    367 	uint32_t sc_itr;		/* prototype intr throttling reg */
    368 	uint32_t sc_tctl;		/* prototype TCTL register */
    369 	uint32_t sc_rctl;		/* prototype RCTL register */
    370 	uint32_t sc_txcw;		/* prototype TXCW register */
    371 	uint32_t sc_tipg;		/* prototype TIPG register */
    372 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    373 	uint32_t sc_pba;		/* prototype PBA register */
    374 
    375 	int sc_tbi_linkup;		/* TBI link status */
    376 	int sc_tbi_anegticks;		/* autonegotiation ticks */
    377 	int sc_tbi_ticks;		/* tbi ticks */
    378 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
    379 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
    380 
    381 	int sc_mchash_type;		/* multicast filter offset */
    382 
    383 	krndsource_t rnd_source;	/* random source */
    384 
    385 	kmutex_t *sc_txrx_lock;		/* lock for tx/rx operations */
    386 					/* XXX need separation? */
    387 };
    388 
    389 #define WM_LOCK(_sc)	if ((_sc)->sc_txrx_lock) mutex_enter((_sc)->sc_txrx_lock)
    390 #define WM_UNLOCK(_sc)	if ((_sc)->sc_txrx_lock) mutex_exit((_sc)->sc_txrx_lock)
    391 #define WM_LOCKED(_sc)	(!(_sc)->sc_txrx_lock || mutex_owned((_sc)->sc_txrx_lock))
    392 
    393 #ifdef WM_MPSAFE
    394 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    395 #else
    396 #define CALLOUT_FLAGS	0
    397 #endif
    398 
    399 #define	WM_RXCHAIN_RESET(sc)						\
    400 do {									\
    401 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    402 	*(sc)->sc_rxtailp = NULL;					\
    403 	(sc)->sc_rxlen = 0;						\
    404 } while (/*CONSTCOND*/0)
    405 
    406 #define	WM_RXCHAIN_LINK(sc, m)						\
    407 do {									\
    408 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    409 	(sc)->sc_rxtailp = &(m)->m_next;				\
    410 } while (/*CONSTCOND*/0)
    411 
    412 #ifdef WM_EVENT_COUNTERS
    413 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    414 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    415 #else
    416 #define	WM_EVCNT_INCR(ev)	/* nothing */
    417 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    418 #endif
    419 
    420 #define	CSR_READ(sc, reg)						\
    421 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    422 #define	CSR_WRITE(sc, reg, val)						\
    423 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    424 #define	CSR_WRITE_FLUSH(sc)						\
    425 	(void) CSR_READ((sc), WMREG_STATUS)
    426 
    427 #define ICH8_FLASH_READ32(sc, reg) \
    428 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    429 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    430 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    431 
    432 #define ICH8_FLASH_READ16(sc, reg) \
    433 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    434 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    435 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    436 
    437 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    438 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    439 
    440 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
    441 #define	WM_CDTXADDR_HI(sc, x)						\
    442 	(sizeof(bus_addr_t) == 8 ?					\
    443 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
    444 
    445 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
    446 #define	WM_CDRXADDR_HI(sc, x)						\
    447 	(sizeof(bus_addr_t) == 8 ?					\
    448 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
    449 
    450 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    451 do {									\
    452 	int __x, __n;							\
    453 									\
    454 	__x = (x);							\
    455 	__n = (n);							\
    456 									\
    457 	/* If it will wrap around, sync to the end of the ring. */	\
    458 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
    459 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    460 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    461 		    (WM_NTXDESC(sc) - __x), (ops));			\
    462 		__n -= (WM_NTXDESC(sc) - __x);				\
    463 		__x = 0;						\
    464 	}								\
    465 									\
    466 	/* Now sync whatever is left. */				\
    467 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    468 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    469 } while (/*CONSTCOND*/0)
    470 
    471 #define	WM_CDRXSYNC(sc, x, ops)						\
    472 do {									\
    473 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    474 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    475 } while (/*CONSTCOND*/0)
    476 
    477 #define	WM_INIT_RXDESC(sc, x)						\
    478 do {									\
    479 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    480 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    481 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    482 									\
    483 	/*								\
    484 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    485 	 * so that the payload after the Ethernet header is aligned	\
    486 	 * to a 4-byte boundary.					\
    487 	 *								\
    488 	 * XXX BRAINDAMAGE ALERT!					\
    489 	 * The stupid chip uses the same size for every buffer, which	\
    490 	 * is set in the Receive Control register.  We are using the 2K	\
    491 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    492 	 * reason, we can't "scoot" packets longer than the standard	\
    493 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    494 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    495 	 * the upper layer copy the headers.				\
    496 	 */								\
    497 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    498 									\
    499 	wm_set_dma_addr(&__rxd->wrx_addr,				\
    500 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
    501 	__rxd->wrx_len = 0;						\
    502 	__rxd->wrx_cksum = 0;						\
    503 	__rxd->wrx_status = 0;						\
    504 	__rxd->wrx_errors = 0;						\
    505 	__rxd->wrx_special = 0;						\
    506 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    507 									\
    508 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    509 } while (/*CONSTCOND*/0)
    510 
    511 /*
    512  * Register read/write functions.
    513  * Other than CSR_{READ|WRITE}().
    514  */
    515 #if 0
    516 static inline uint32_t wm_io_read(struct wm_softc *, int);
    517 #endif
    518 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    519 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    520 	uint32_t, uint32_t);
    521 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    522 
    523 /*
    524  * Device driver interface functions and commonly used functions.
    525  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    526  */
    527 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    528 static int	wm_match(device_t, cfdata_t, void *);
    529 static void	wm_attach(device_t, device_t, void *);
    530 static int	wm_detach(device_t, int);
    531 static bool	wm_suspend(device_t, const pmf_qual_t *);
    532 static bool	wm_resume(device_t, const pmf_qual_t *);
    533 static void	wm_watchdog(struct ifnet *);
    534 static void	wm_tick(void *);
    535 static int	wm_ifflags_cb(struct ethercom *);
    536 static int	wm_ioctl(struct ifnet *, u_long, void *);
    537 /* MAC address related */
    538 static int	wm_check_alt_mac_addr(struct wm_softc *);
    539 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    540 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    541 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    542 static void	wm_set_filter(struct wm_softc *);
    543 /* Reset and init related */
    544 static void	wm_set_vlan(struct wm_softc *);
    545 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    546 static void	wm_get_auto_rd_done(struct wm_softc *);
    547 static void	wm_lan_init_done(struct wm_softc *);
    548 static void	wm_get_cfg_done(struct wm_softc *);
    549 static void	wm_reset(struct wm_softc *);
    550 static int	wm_add_rxbuf(struct wm_softc *, int);
    551 static void	wm_rxdrain(struct wm_softc *);
    552 static int	wm_init(struct ifnet *);
    553 static int	wm_init_locked(struct ifnet *);
    554 static void	wm_stop(struct ifnet *, int);
    555 static void	wm_stop_locked(struct ifnet *, int);
    556 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    557     uint32_t *, uint8_t *);
    558 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    559 static void	wm_82547_txfifo_stall(void *);
    560 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    561 /* Start */
    562 static void	wm_start(struct ifnet *);
    563 static void	wm_start_locked(struct ifnet *);
    564 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    565     uint32_t *, uint32_t *, bool *);
    566 static void	wm_nq_start(struct ifnet *);
    567 static void	wm_nq_start_locked(struct ifnet *);
    568 /* Interrupt */
    569 static void	wm_txintr(struct wm_softc *);
    570 static void	wm_rxintr(struct wm_softc *);
    571 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    572 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    573 static void	wm_linkintr(struct wm_softc *, uint32_t);
    574 static int	wm_intr(void *);
    575 
    576 /*
    577  * Media related.
    578  * GMII, SGMII, TBI (and SERDES)
    579  */
    580 /* GMII related */
    581 static void	wm_gmii_reset(struct wm_softc *);
    582 static int	wm_get_phy_id_82575(struct wm_softc *);
    583 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    584 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    585 static int	wm_gmii_mediachange(struct ifnet *);
    586 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    587 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    588 static int	wm_gmii_i82543_readreg(device_t, int, int);
    589 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    590 static int	wm_gmii_i82544_readreg(device_t, int, int);
    591 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    592 static int	wm_gmii_i80003_readreg(device_t, int, int);
    593 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    594 static int	wm_gmii_bm_readreg(device_t, int, int);
    595 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    596 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    597 static int	wm_gmii_hv_readreg(device_t, int, int);
    598 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    599 static int	wm_gmii_82580_readreg(device_t, int, int);
    600 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    601 static void	wm_gmii_statchg(struct ifnet *);
    602 static int	wm_kmrn_readreg(struct wm_softc *, int);
    603 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    604 /* SGMII */
    605 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    606 static int	wm_sgmii_readreg(device_t, int, int);
    607 static void	wm_sgmii_writereg(device_t, int, int, int);
    608 /* TBI related */
    609 static int	wm_check_for_link(struct wm_softc *);
    610 static void	wm_tbi_mediainit(struct wm_softc *);
    611 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    612 static int	wm_tbi_mediachange(struct ifnet *);
    613 static void	wm_tbi_set_linkled(struct wm_softc *);
    614 static void	wm_tbi_check_link(struct wm_softc *);
    615 
    616 /*
    617  * NVM related.
    618  * Microwire, SPI (w/wo EERD) and Flash.
    619  */
    620 /* Both spi and uwire */
    621 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    622 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    623 /* Microwire */
    624 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    625 /* SPI */
    626 static void	wm_set_spiaddrbits(struct wm_softc *);
    627 static int	wm_nvm_ready_spi(struct wm_softc *);
    628 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    629 /* Using with EERD */
    630 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    631 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    632 /* Flash */
    633 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    634     unsigned int *);
    635 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    636 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    637 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    638 	uint16_t *);
    639 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    640 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    641 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    642 /* Lock, detecting NVM type, validate checksum and read */
    643 static int	wm_nvm_acquire(struct wm_softc *);
    644 static void	wm_nvm_release(struct wm_softc *);
    645 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    646 static int	wm_nvm_validate_checksum(struct wm_softc *);
    647 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    648 
    649 /*
    650  * Hardware semaphores.
    651  * Very complexed...
    652  */
    653 static int	wm_get_swsm_semaphore(struct wm_softc *);
    654 static void	wm_put_swsm_semaphore(struct wm_softc *);
    655 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    656 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    657 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    658 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    659 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    660 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    661 
    662 /*
    663  * Management mode and power management related subroutines.
    664  * BMC, AMT, suspend/resume and EEE.
    665  */
    666 static int	wm_check_mng_mode(struct wm_softc *);
    667 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    668 static int	wm_check_mng_mode_82574(struct wm_softc *);
    669 static int	wm_check_mng_mode_generic(struct wm_softc *);
    670 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    671 static int	wm_check_reset_block(struct wm_softc *);
    672 static void	wm_get_hw_control(struct wm_softc *);
    673 static void	wm_release_hw_control(struct wm_softc *);
    674 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    675 static void	wm_smbustopci(struct wm_softc *);
    676 static void	wm_init_manageability(struct wm_softc *);
    677 static void	wm_release_manageability(struct wm_softc *);
    678 static void	wm_get_wakeup(struct wm_softc *);
    679 #ifdef WM_WOL
    680 static void	wm_enable_phy_wakeup(struct wm_softc *);
    681 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    682 static void	wm_enable_wakeup(struct wm_softc *);
    683 #endif
    684 /* EEE */
    685 static void	wm_set_eee_i350(struct wm_softc *);
    686 
    687 /*
    688  * Workarounds (mainly PHY related).
    689  * Basically, PHY's workarounds are in the PHY drivers.
    690  */
    691 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    692 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    693 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    694 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    695 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    696 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    697 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    698 static void	wm_reset_init_script_82575(struct wm_softc *);
    699 
    700 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    701     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    702 
    703 /*
    704  * Devices supported by this driver.
    705  */
    706 static const struct wm_product {
    707 	pci_vendor_id_t		wmp_vendor;
    708 	pci_product_id_t	wmp_product;
    709 	const char		*wmp_name;
    710 	wm_chip_type		wmp_type;
    711 	int			wmp_flags;
    712 #define	WMP_F_1000X		0x01
    713 #define	WMP_F_1000T		0x02
    714 #define	WMP_F_SERDES		0x04
    715 } wm_products[] = {
    716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    717 	  "Intel i82542 1000BASE-X Ethernet",
    718 	  WM_T_82542_2_1,	WMP_F_1000X },
    719 
    720 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    721 	  "Intel i82543GC 1000BASE-X Ethernet",
    722 	  WM_T_82543,		WMP_F_1000X },
    723 
    724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    725 	  "Intel i82543GC 1000BASE-T Ethernet",
    726 	  WM_T_82543,		WMP_F_1000T },
    727 
    728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    729 	  "Intel i82544EI 1000BASE-T Ethernet",
    730 	  WM_T_82544,		WMP_F_1000T },
    731 
    732 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    733 	  "Intel i82544EI 1000BASE-X Ethernet",
    734 	  WM_T_82544,		WMP_F_1000X },
    735 
    736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    737 	  "Intel i82544GC 1000BASE-T Ethernet",
    738 	  WM_T_82544,		WMP_F_1000T },
    739 
    740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    741 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    742 	  WM_T_82544,		WMP_F_1000T },
    743 
    744 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    745 	  "Intel i82540EM 1000BASE-T Ethernet",
    746 	  WM_T_82540,		WMP_F_1000T },
    747 
    748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    749 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    750 	  WM_T_82540,		WMP_F_1000T },
    751 
    752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    753 	  "Intel i82540EP 1000BASE-T Ethernet",
    754 	  WM_T_82540,		WMP_F_1000T },
    755 
    756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    757 	  "Intel i82540EP 1000BASE-T Ethernet",
    758 	  WM_T_82540,		WMP_F_1000T },
    759 
    760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    761 	  "Intel i82540EP 1000BASE-T Ethernet",
    762 	  WM_T_82540,		WMP_F_1000T },
    763 
    764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    765 	  "Intel i82545EM 1000BASE-T Ethernet",
    766 	  WM_T_82545,		WMP_F_1000T },
    767 
    768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    769 	  "Intel i82545GM 1000BASE-T Ethernet",
    770 	  WM_T_82545_3,		WMP_F_1000T },
    771 
    772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    773 	  "Intel i82545GM 1000BASE-X Ethernet",
    774 	  WM_T_82545_3,		WMP_F_1000X },
    775 
    776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    777 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    778 	  WM_T_82545_3,		WMP_F_SERDES },
    779 
    780 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    781 	  "Intel i82546EB 1000BASE-T Ethernet",
    782 	  WM_T_82546,		WMP_F_1000T },
    783 
    784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    785 	  "Intel i82546EB 1000BASE-T Ethernet",
    786 	  WM_T_82546,		WMP_F_1000T },
    787 
    788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    789 	  "Intel i82545EM 1000BASE-X Ethernet",
    790 	  WM_T_82545,		WMP_F_1000X },
    791 
    792 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    793 	  "Intel i82546EB 1000BASE-X Ethernet",
    794 	  WM_T_82546,		WMP_F_1000X },
    795 
    796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    797 	  "Intel i82546GB 1000BASE-T Ethernet",
    798 	  WM_T_82546_3,		WMP_F_1000T },
    799 
    800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    801 	  "Intel i82546GB 1000BASE-X Ethernet",
    802 	  WM_T_82546_3,		WMP_F_1000X },
    803 
    804 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    805 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    806 	  WM_T_82546_3,		WMP_F_SERDES },
    807 
    808 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    809 	  "i82546GB quad-port Gigabit Ethernet",
    810 	  WM_T_82546_3,		WMP_F_1000T },
    811 
    812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    813 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    814 	  WM_T_82546_3,		WMP_F_1000T },
    815 
    816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    817 	  "Intel PRO/1000MT (82546GB)",
    818 	  WM_T_82546_3,		WMP_F_1000T },
    819 
    820 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    821 	  "Intel i82541EI 1000BASE-T Ethernet",
    822 	  WM_T_82541,		WMP_F_1000T },
    823 
    824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    825 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    826 	  WM_T_82541,		WMP_F_1000T },
    827 
    828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    829 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    830 	  WM_T_82541,		WMP_F_1000T },
    831 
    832 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    833 	  "Intel i82541ER 1000BASE-T Ethernet",
    834 	  WM_T_82541_2,		WMP_F_1000T },
    835 
    836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    837 	  "Intel i82541GI 1000BASE-T Ethernet",
    838 	  WM_T_82541_2,		WMP_F_1000T },
    839 
    840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    841 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    842 	  WM_T_82541_2,		WMP_F_1000T },
    843 
    844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    845 	  "Intel i82541PI 1000BASE-T Ethernet",
    846 	  WM_T_82541_2,		WMP_F_1000T },
    847 
    848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    849 	  "Intel i82547EI 1000BASE-T Ethernet",
    850 	  WM_T_82547,		WMP_F_1000T },
    851 
    852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    853 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    854 	  WM_T_82547,		WMP_F_1000T },
    855 
    856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    857 	  "Intel i82547GI 1000BASE-T Ethernet",
    858 	  WM_T_82547_2,		WMP_F_1000T },
    859 
    860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    861 	  "Intel PRO/1000 PT (82571EB)",
    862 	  WM_T_82571,		WMP_F_1000T },
    863 
    864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    865 	  "Intel PRO/1000 PF (82571EB)",
    866 	  WM_T_82571,		WMP_F_1000X },
    867 
    868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    869 	  "Intel PRO/1000 PB (82571EB)",
    870 	  WM_T_82571,		WMP_F_SERDES },
    871 
    872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    873 	  "Intel PRO/1000 QT (82571EB)",
    874 	  WM_T_82571,		WMP_F_1000T },
    875 
    876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    877 	  "Intel i82572EI 1000baseT Ethernet",
    878 	  WM_T_82572,		WMP_F_1000T },
    879 
    880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    881 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    882 	  WM_T_82571,		WMP_F_1000T, },
    883 
    884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    885 	  "Intel i82572EI 1000baseX Ethernet",
    886 	  WM_T_82572,		WMP_F_1000X },
    887 
    888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    889 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    890 	  WM_T_82572,		WMP_F_SERDES },
    891 
    892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    893 	  "Intel i82572EI 1000baseT Ethernet",
    894 	  WM_T_82572,		WMP_F_1000T },
    895 
    896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    897 	  "Intel i82573E",
    898 	  WM_T_82573,		WMP_F_1000T },
    899 
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    901 	  "Intel i82573E IAMT",
    902 	  WM_T_82573,		WMP_F_1000T },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    905 	  "Intel i82573L Gigabit Ethernet",
    906 	  WM_T_82573,		WMP_F_1000T },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    909 	  "Intel i82574L",
    910 	  WM_T_82574,		WMP_F_1000T },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
    913 	  "Intel i82583V",
    914 	  WM_T_82583,		WMP_F_1000T },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
    917 	  "i80003 dual 1000baseT Ethernet",
    918 	  WM_T_80003,		WMP_F_1000T },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
    921 	  "i80003 dual 1000baseX Ethernet",
    922 	  WM_T_80003,		WMP_F_1000T },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
    925 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
    926 	  WM_T_80003,		WMP_F_SERDES },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
    929 	  "Intel i80003 1000baseT Ethernet",
    930 	  WM_T_80003,		WMP_F_1000T },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
    933 	  "Intel i80003 Gigabit Ethernet (SERDES)",
    934 	  WM_T_80003,		WMP_F_SERDES },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
    937 	  "Intel i82801H (M_AMT) LAN Controller",
    938 	  WM_T_ICH8,		WMP_F_1000T },
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
    940 	  "Intel i82801H (AMT) LAN Controller",
    941 	  WM_T_ICH8,		WMP_F_1000T },
    942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
    943 	  "Intel i82801H LAN Controller",
    944 	  WM_T_ICH8,		WMP_F_1000T },
    945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
    946 	  "Intel i82801H (IFE) LAN Controller",
    947 	  WM_T_ICH8,		WMP_F_1000T },
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
    949 	  "Intel i82801H (M) LAN Controller",
    950 	  WM_T_ICH8,		WMP_F_1000T },
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
    952 	  "Intel i82801H IFE (GT) LAN Controller",
    953 	  WM_T_ICH8,		WMP_F_1000T },
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
    955 	  "Intel i82801H IFE (G) LAN Controller",
    956 	  WM_T_ICH8,		WMP_F_1000T },
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
    958 	  "82801I (AMT) LAN Controller",
    959 	  WM_T_ICH9,		WMP_F_1000T },
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
    961 	  "82801I LAN Controller",
    962 	  WM_T_ICH9,		WMP_F_1000T },
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
    964 	  "82801I (G) LAN Controller",
    965 	  WM_T_ICH9,		WMP_F_1000T },
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
    967 	  "82801I (GT) LAN Controller",
    968 	  WM_T_ICH9,		WMP_F_1000T },
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
    970 	  "82801I (C) LAN Controller",
    971 	  WM_T_ICH9,		WMP_F_1000T },
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
    973 	  "82801I mobile LAN Controller",
    974 	  WM_T_ICH9,		WMP_F_1000T },
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
    976 	  "82801I mobile (V) LAN Controller",
    977 	  WM_T_ICH9,		WMP_F_1000T },
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
    979 	  "82801I mobile (AMT) LAN Controller",
    980 	  WM_T_ICH9,		WMP_F_1000T },
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
    982 	  "82567LM-4 LAN Controller",
    983 	  WM_T_ICH9,		WMP_F_1000T },
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
    985 	  "82567V-3 LAN Controller",
    986 	  WM_T_ICH9,		WMP_F_1000T },
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
    988 	  "82567LM-2 LAN Controller",
    989 	  WM_T_ICH10,		WMP_F_1000T },
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
    991 	  "82567LF-2 LAN Controller",
    992 	  WM_T_ICH10,		WMP_F_1000T },
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
    994 	  "82567LM-3 LAN Controller",
    995 	  WM_T_ICH10,		WMP_F_1000T },
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
    997 	  "82567LF-3 LAN Controller",
    998 	  WM_T_ICH10,		WMP_F_1000T },
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1000 	  "82567V-2 LAN Controller",
   1001 	  WM_T_ICH10,		WMP_F_1000T },
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1003 	  "82567V-3? LAN Controller",
   1004 	  WM_T_ICH10,		WMP_F_1000T },
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1006 	  "HANKSVILLE LAN Controller",
   1007 	  WM_T_ICH10,		WMP_F_1000T },
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1009 	  "PCH LAN (82577LM) Controller",
   1010 	  WM_T_PCH,		WMP_F_1000T },
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1012 	  "PCH LAN (82577LC) Controller",
   1013 	  WM_T_PCH,		WMP_F_1000T },
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1015 	  "PCH LAN (82578DM) Controller",
   1016 	  WM_T_PCH,		WMP_F_1000T },
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1018 	  "PCH LAN (82578DC) Controller",
   1019 	  WM_T_PCH,		WMP_F_1000T },
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1021 	  "PCH2 LAN (82579LM) Controller",
   1022 	  WM_T_PCH2,		WMP_F_1000T },
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1024 	  "PCH2 LAN (82579V) Controller",
   1025 	  WM_T_PCH2,		WMP_F_1000T },
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1027 	  "82575EB dual-1000baseT Ethernet",
   1028 	  WM_T_82575,		WMP_F_1000T },
   1029 #if 0
   1030 	/*
   1031 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
   1032 	 * disabled for now ...
   1033 	 */
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1035 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1036 	  WM_T_82575,		WMP_F_SERDES },
   1037 #endif
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1039 	  "82575GB quad-1000baseT Ethernet",
   1040 	  WM_T_82575,		WMP_F_1000T },
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1042 	  "82575GB quad-1000baseT Ethernet (PM)",
   1043 	  WM_T_82575,		WMP_F_1000T },
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1045 	  "82576 1000BaseT Ethernet",
   1046 	  WM_T_82576,		WMP_F_1000T },
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1048 	  "82576 1000BaseX Ethernet",
   1049 	  WM_T_82576,		WMP_F_1000X },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1052 	  "82576 gigabit Ethernet (SERDES)",
   1053 	  WM_T_82576,		WMP_F_SERDES },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1056 	  "82576 quad-1000BaseT Ethernet",
   1057 	  WM_T_82576,		WMP_F_1000T },
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1059 	  "82576 gigabit Ethernet",
   1060 	  WM_T_82576,		WMP_F_1000T },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1063 	  "82576 gigabit Ethernet (SERDES)",
   1064 	  WM_T_82576,		WMP_F_SERDES },
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1066 	  "82576 quad-gigabit Ethernet (SERDES)",
   1067 	  WM_T_82576,		WMP_F_SERDES },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1070 	  "82580 1000BaseT Ethernet",
   1071 	  WM_T_82580,		WMP_F_1000T },
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1073 	  "82580 1000BaseX Ethernet",
   1074 	  WM_T_82580,		WMP_F_1000X },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1077 	  "82580 1000BaseT Ethernet (SERDES)",
   1078 	  WM_T_82580,		WMP_F_SERDES },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1081 	  "82580 gigabit Ethernet (SGMII)",
   1082 	  WM_T_82580,		WMP_F_1000T },
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1084 	  "82580 dual-1000BaseT Ethernet",
   1085 	  WM_T_82580,		WMP_F_1000T },
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
   1087 	  "82580 1000BaseT Ethernet",
   1088 	  WM_T_82580ER,		WMP_F_1000T },
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
   1090 	  "82580 dual-1000BaseT Ethernet",
   1091 	  WM_T_82580ER,		WMP_F_1000T },
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1093 	  "82580 quad-1000BaseX Ethernet",
   1094 	  WM_T_82580,		WMP_F_1000X },
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1096 	  "I350 Gigabit Network Connection",
   1097 	  WM_T_I350,		WMP_F_1000T },
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1099 	  "I350 Gigabit Fiber Network Connection",
   1100 	  WM_T_I350,		WMP_F_1000X },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1103 	  "I350 Gigabit Backplane Connection",
   1104 	  WM_T_I350,		WMP_F_SERDES },
   1105 #if 0
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1107 	  "I350 Gigabit Connection",
   1108 	  WM_T_I350,		WMP_F_1000T },
   1109 #endif
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1111 	  "I354 Gigabit Connection",
   1112 	  WM_T_I354,		WMP_F_1000T },
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1114 	  "I210-T1 Ethernet Server Adapter",
   1115 	  WM_T_I210,		WMP_F_1000T },
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1117 	  "I210 Ethernet (Copper OEM)",
   1118 	  WM_T_I210,		WMP_F_1000T },
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1120 	  "I210 Ethernet (Copper IT)",
   1121 	  WM_T_I210,		WMP_F_1000T },
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1123 	  "I210 Gigabit Ethernet (Fiber)",
   1124 	  WM_T_I210,		WMP_F_1000X },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1127 	  "I210 Gigabit Ethernet (SERDES)",
   1128 	  WM_T_I210,		WMP_F_SERDES },
   1129 #if 0
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1131 	  "I210 Gigabit Ethernet (SGMII)",
   1132 	  WM_T_I210,		WMP_F_SERDES },
   1133 #endif
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1135 	  "I211 Ethernet (COPPER)",
   1136 	  WM_T_I211,		WMP_F_1000T },
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1138 	  "I217 V Ethernet Connection",
   1139 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1141 	  "I217 LM Ethernet Connection",
   1142 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1144 	  "I218 V Ethernet Connection",
   1145 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1147 	  "I218 LM Ethernet Connection",
   1148 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1149 	{ 0,			0,
   1150 	  NULL,
   1151 	  0,			0 },
   1152 };
   1153 
   1154 #ifdef WM_EVENT_COUNTERS
   1155 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1156 #endif /* WM_EVENT_COUNTERS */
   1157 
   1158 
   1159 /*
   1160  * Register read/write functions.
   1161  * Other than CSR_{READ|WRITE}().
   1162  */
   1163 
   1164 #if 0 /* Not currently used */
   1165 static inline uint32_t
   1166 wm_io_read(struct wm_softc *sc, int reg)
   1167 {
   1168 
   1169 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1170 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1171 }
   1172 #endif
   1173 
   1174 static inline void
   1175 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1176 {
   1177 
   1178 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1179 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1180 }
   1181 
   1182 static inline void
   1183 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1184     uint32_t data)
   1185 {
   1186 	uint32_t regval;
   1187 	int i;
   1188 
   1189 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1190 
   1191 	CSR_WRITE(sc, reg, regval);
   1192 
   1193 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1194 		delay(5);
   1195 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1196 			break;
   1197 	}
   1198 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1199 		aprint_error("%s: WARNING:"
   1200 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1201 		    device_xname(sc->sc_dev), reg);
   1202 	}
   1203 }
   1204 
   1205 static inline void
   1206 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1207 {
   1208 	wa->wa_low = htole32(v & 0xffffffffU);
   1209 	if (sizeof(bus_addr_t) == 8)
   1210 		wa->wa_high = htole32((uint64_t) v >> 32);
   1211 	else
   1212 		wa->wa_high = 0;
   1213 }
   1214 
   1215 /*
   1216  * Device driver interface functions and commonly used functions.
   1217  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1218  */
   1219 
   1220 /* Lookup supported device table */
   1221 static const struct wm_product *
   1222 wm_lookup(const struct pci_attach_args *pa)
   1223 {
   1224 	const struct wm_product *wmp;
   1225 
   1226 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1227 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1228 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1229 			return wmp;
   1230 	}
   1231 	return NULL;
   1232 }
   1233 
   1234 /* The match function (ca_match) */
   1235 static int
   1236 wm_match(device_t parent, cfdata_t cf, void *aux)
   1237 {
   1238 	struct pci_attach_args *pa = aux;
   1239 
   1240 	if (wm_lookup(pa) != NULL)
   1241 		return 1;
   1242 
   1243 	return 0;
   1244 }
   1245 
   1246 /* The attach function (ca_attach) */
   1247 static void
   1248 wm_attach(device_t parent, device_t self, void *aux)
   1249 {
   1250 	struct wm_softc *sc = device_private(self);
   1251 	struct pci_attach_args *pa = aux;
   1252 	prop_dictionary_t dict;
   1253 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1254 	pci_chipset_tag_t pc = pa->pa_pc;
   1255 	pci_intr_handle_t ih;
   1256 	const char *intrstr = NULL;
   1257 	const char *eetype, *xname;
   1258 	bus_space_tag_t memt;
   1259 	bus_space_handle_t memh;
   1260 	bus_size_t memsize;
   1261 	int memh_valid;
   1262 	int i, error;
   1263 	const struct wm_product *wmp;
   1264 	prop_data_t ea;
   1265 	prop_number_t pn;
   1266 	uint8_t enaddr[ETHER_ADDR_LEN];
   1267 	uint16_t cfg1, cfg2, swdpin, io3;
   1268 	pcireg_t preg, memtype;
   1269 	uint16_t eeprom_data, apme_mask;
   1270 	bool force_clear_smbi;
   1271 	uint32_t reg;
   1272 	char intrbuf[PCI_INTRSTR_LEN];
   1273 
   1274 	sc->sc_dev = self;
   1275 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1276 	sc->sc_stopping = false;
   1277 
   1278 	sc->sc_wmp = wmp = wm_lookup(pa);
   1279 	if (wmp == NULL) {
   1280 		printf("\n");
   1281 		panic("wm_attach: impossible");
   1282 	}
   1283 
   1284 	sc->sc_pc = pa->pa_pc;
   1285 	sc->sc_pcitag = pa->pa_tag;
   1286 
   1287 	if (pci_dma64_available(pa))
   1288 		sc->sc_dmat = pa->pa_dmat64;
   1289 	else
   1290 		sc->sc_dmat = pa->pa_dmat;
   1291 
   1292 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1293 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1294 
   1295 	sc->sc_type = wmp->wmp_type;
   1296 	if (sc->sc_type < WM_T_82543) {
   1297 		if (sc->sc_rev < 2) {
   1298 			aprint_error_dev(sc->sc_dev,
   1299 			    "i82542 must be at least rev. 2\n");
   1300 			return;
   1301 		}
   1302 		if (sc->sc_rev < 3)
   1303 			sc->sc_type = WM_T_82542_2_0;
   1304 	}
   1305 
   1306 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1307 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   1308 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1309 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1310 		sc->sc_flags |= WM_F_NEWQUEUE;
   1311 
   1312 	/* Set device properties (mactype) */
   1313 	dict = device_properties(sc->sc_dev);
   1314 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1315 
   1316 	/*
   1317 	 * Map the device.  All devices support memory-mapped acccess,
   1318 	 * and it is really required for normal operation.
   1319 	 */
   1320 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1321 	switch (memtype) {
   1322 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1323 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1324 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1325 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1326 		break;
   1327 	default:
   1328 		memh_valid = 0;
   1329 		break;
   1330 	}
   1331 
   1332 	if (memh_valid) {
   1333 		sc->sc_st = memt;
   1334 		sc->sc_sh = memh;
   1335 		sc->sc_ss = memsize;
   1336 	} else {
   1337 		aprint_error_dev(sc->sc_dev,
   1338 		    "unable to map device registers\n");
   1339 		return;
   1340 	}
   1341 
   1342 	/*
   1343 	 * In addition, i82544 and later support I/O mapped indirect
   1344 	 * register access.  It is not desirable (nor supported in
   1345 	 * this driver) to use it for normal operation, though it is
   1346 	 * required to work around bugs in some chip versions.
   1347 	 */
   1348 	if (sc->sc_type >= WM_T_82544) {
   1349 		/* First we have to find the I/O BAR. */
   1350 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1351 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1352 			if (memtype == PCI_MAPREG_TYPE_IO)
   1353 				break;
   1354 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1355 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1356 				i += 4;	/* skip high bits, too */
   1357 		}
   1358 		if (i < PCI_MAPREG_END) {
   1359 			/*
   1360 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1361 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1362 			 * It's no problem because newer chips has no this
   1363 			 * bug.
   1364 			 *
   1365 			 * The i8254x doesn't apparently respond when the
   1366 			 * I/O BAR is 0, which looks somewhat like it's not
   1367 			 * been configured.
   1368 			 */
   1369 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1370 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1371 				aprint_error_dev(sc->sc_dev,
   1372 				    "WARNING: I/O BAR at zero.\n");
   1373 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1374 					0, &sc->sc_iot, &sc->sc_ioh,
   1375 					NULL, &sc->sc_ios) == 0) {
   1376 				sc->sc_flags |= WM_F_IOH_VALID;
   1377 			} else {
   1378 				aprint_error_dev(sc->sc_dev,
   1379 				    "WARNING: unable to map I/O space\n");
   1380 			}
   1381 		}
   1382 
   1383 	}
   1384 
   1385 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1386 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1387 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1388 	if (sc->sc_type < WM_T_82542_2_1)
   1389 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1390 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1391 
   1392 	/* power up chip */
   1393 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1394 	    NULL)) && error != EOPNOTSUPP) {
   1395 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1396 		return;
   1397 	}
   1398 
   1399 	/*
   1400 	 * Map and establish our interrupt.
   1401 	 */
   1402 	if (pci_intr_map(pa, &ih)) {
   1403 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
   1404 		return;
   1405 	}
   1406 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
   1407 #ifdef WM_MPSAFE
   1408 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
   1409 #endif
   1410 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
   1411 	if (sc->sc_ih == NULL) {
   1412 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
   1413 		if (intrstr != NULL)
   1414 			aprint_error(" at %s", intrstr);
   1415 		aprint_error("\n");
   1416 		return;
   1417 	}
   1418 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   1419 
   1420 	/*
   1421 	 * Check the function ID (unit number of the chip).
   1422 	 */
   1423 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1424 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1425 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1426 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   1427 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1428 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1429 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1430 	else
   1431 		sc->sc_funcid = 0;
   1432 
   1433 	/*
   1434 	 * Determine a few things about the bus we're connected to.
   1435 	 */
   1436 	if (sc->sc_type < WM_T_82543) {
   1437 		/* We don't really know the bus characteristics here. */
   1438 		sc->sc_bus_speed = 33;
   1439 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1440 		/*
   1441 		 * CSA (Communication Streaming Architecture) is about as fast
   1442 		 * a 32-bit 66MHz PCI Bus.
   1443 		 */
   1444 		sc->sc_flags |= WM_F_CSA;
   1445 		sc->sc_bus_speed = 66;
   1446 		aprint_verbose_dev(sc->sc_dev,
   1447 		    "Communication Streaming Architecture\n");
   1448 		if (sc->sc_type == WM_T_82547) {
   1449 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1450 			callout_setfunc(&sc->sc_txfifo_ch,
   1451 					wm_82547_txfifo_stall, sc);
   1452 			aprint_verbose_dev(sc->sc_dev,
   1453 			    "using 82547 Tx FIFO stall work-around\n");
   1454 		}
   1455 	} else if (sc->sc_type >= WM_T_82571) {
   1456 		sc->sc_flags |= WM_F_PCIE;
   1457 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1458 		    && (sc->sc_type != WM_T_ICH10)
   1459 		    && (sc->sc_type != WM_T_PCH)
   1460 		    && (sc->sc_type != WM_T_PCH2)
   1461 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1462 			/* ICH* and PCH* have no PCIe capability registers */
   1463 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1464 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1465 				NULL) == 0)
   1466 				aprint_error_dev(sc->sc_dev,
   1467 				    "unable to find PCIe capability\n");
   1468 		}
   1469 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1470 	} else {
   1471 		reg = CSR_READ(sc, WMREG_STATUS);
   1472 		if (reg & STATUS_BUS64)
   1473 			sc->sc_flags |= WM_F_BUS64;
   1474 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1475 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1476 
   1477 			sc->sc_flags |= WM_F_PCIX;
   1478 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1479 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1480 				aprint_error_dev(sc->sc_dev,
   1481 				    "unable to find PCIX capability\n");
   1482 			else if (sc->sc_type != WM_T_82545_3 &&
   1483 				 sc->sc_type != WM_T_82546_3) {
   1484 				/*
   1485 				 * Work around a problem caused by the BIOS
   1486 				 * setting the max memory read byte count
   1487 				 * incorrectly.
   1488 				 */
   1489 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1490 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1491 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1492 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1493 
   1494 				bytecnt =
   1495 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1496 				    PCIX_CMD_BYTECNT_SHIFT;
   1497 				maxb =
   1498 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1499 				    PCIX_STATUS_MAXB_SHIFT;
   1500 				if (bytecnt > maxb) {
   1501 					aprint_verbose_dev(sc->sc_dev,
   1502 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1503 					    512 << bytecnt, 512 << maxb);
   1504 					pcix_cmd = (pcix_cmd &
   1505 					    ~PCIX_CMD_BYTECNT_MASK) |
   1506 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1507 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1508 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1509 					    pcix_cmd);
   1510 				}
   1511 			}
   1512 		}
   1513 		/*
   1514 		 * The quad port adapter is special; it has a PCIX-PCIX
   1515 		 * bridge on the board, and can run the secondary bus at
   1516 		 * a higher speed.
   1517 		 */
   1518 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1519 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1520 								      : 66;
   1521 		} else if (sc->sc_flags & WM_F_PCIX) {
   1522 			switch (reg & STATUS_PCIXSPD_MASK) {
   1523 			case STATUS_PCIXSPD_50_66:
   1524 				sc->sc_bus_speed = 66;
   1525 				break;
   1526 			case STATUS_PCIXSPD_66_100:
   1527 				sc->sc_bus_speed = 100;
   1528 				break;
   1529 			case STATUS_PCIXSPD_100_133:
   1530 				sc->sc_bus_speed = 133;
   1531 				break;
   1532 			default:
   1533 				aprint_error_dev(sc->sc_dev,
   1534 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1535 				    reg & STATUS_PCIXSPD_MASK);
   1536 				sc->sc_bus_speed = 66;
   1537 				break;
   1538 			}
   1539 		} else
   1540 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1541 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1542 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1543 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1544 	}
   1545 
   1546 	/*
   1547 	 * Allocate the control data structures, and create and load the
   1548 	 * DMA map for it.
   1549 	 *
   1550 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   1551 	 * memory.  So must Rx descriptors.  We simplify by allocating
   1552 	 * both sets within the same 4G segment.
   1553 	 */
   1554 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
   1555 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
   1556 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
   1557 	    sizeof(struct wm_control_data_82542) :
   1558 	    sizeof(struct wm_control_data_82544);
   1559 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
   1560 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
   1561 		    &sc->sc_cd_rseg, 0)) != 0) {
   1562 		aprint_error_dev(sc->sc_dev,
   1563 		    "unable to allocate control data, error = %d\n",
   1564 		    error);
   1565 		goto fail_0;
   1566 	}
   1567 
   1568 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
   1569 		    sc->sc_cd_rseg, sc->sc_cd_size,
   1570 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
   1571 		aprint_error_dev(sc->sc_dev,
   1572 		    "unable to map control data, error = %d\n", error);
   1573 		goto fail_1;
   1574 	}
   1575 
   1576 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
   1577 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
   1578 		aprint_error_dev(sc->sc_dev,
   1579 		    "unable to create control data DMA map, error = %d\n",
   1580 		    error);
   1581 		goto fail_2;
   1582 	}
   1583 
   1584 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
   1585 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
   1586 		aprint_error_dev(sc->sc_dev,
   1587 		    "unable to load control data DMA map, error = %d\n",
   1588 		    error);
   1589 		goto fail_3;
   1590 	}
   1591 
   1592 	/* Create the transmit buffer DMA maps. */
   1593 	WM_TXQUEUELEN(sc) =
   1594 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   1595 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   1596 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   1597 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   1598 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   1599 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
   1600 			aprint_error_dev(sc->sc_dev,
   1601 			    "unable to create Tx DMA map %d, error = %d\n",
   1602 			    i, error);
   1603 			goto fail_4;
   1604 		}
   1605 	}
   1606 
   1607 	/* Create the receive buffer DMA maps. */
   1608 	for (i = 0; i < WM_NRXDESC; i++) {
   1609 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   1610 			    MCLBYTES, 0, 0,
   1611 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
   1612 			aprint_error_dev(sc->sc_dev,
   1613 			    "unable to create Rx DMA map %d error = %d\n",
   1614 			    i, error);
   1615 			goto fail_5;
   1616 		}
   1617 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
   1618 	}
   1619 
   1620 	/* clear interesting stat counters */
   1621 	CSR_READ(sc, WMREG_COLC);
   1622 	CSR_READ(sc, WMREG_RXERRC);
   1623 
   1624 	/* get PHY control from SMBus to PCIe */
   1625 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1626 	    || (sc->sc_type == WM_T_PCH_LPT))
   1627 		wm_smbustopci(sc);
   1628 
   1629 	/* Reset the chip to a known state. */
   1630 	wm_reset(sc);
   1631 
   1632 	/* Get some information about the EEPROM. */
   1633 	switch (sc->sc_type) {
   1634 	case WM_T_82542_2_0:
   1635 	case WM_T_82542_2_1:
   1636 	case WM_T_82543:
   1637 	case WM_T_82544:
   1638 		/* Microwire */
   1639 		sc->sc_ee_addrbits = 6;
   1640 		break;
   1641 	case WM_T_82540:
   1642 	case WM_T_82545:
   1643 	case WM_T_82545_3:
   1644 	case WM_T_82546:
   1645 	case WM_T_82546_3:
   1646 		/* Microwire */
   1647 		reg = CSR_READ(sc, WMREG_EECD);
   1648 		if (reg & EECD_EE_SIZE)
   1649 			sc->sc_ee_addrbits = 8;
   1650 		else
   1651 			sc->sc_ee_addrbits = 6;
   1652 		sc->sc_flags |= WM_F_LOCK_EECD;
   1653 		break;
   1654 	case WM_T_82541:
   1655 	case WM_T_82541_2:
   1656 	case WM_T_82547:
   1657 	case WM_T_82547_2:
   1658 		reg = CSR_READ(sc, WMREG_EECD);
   1659 		if (reg & EECD_EE_TYPE) {
   1660 			/* SPI */
   1661 			wm_set_spiaddrbits(sc);
   1662 		} else
   1663 			/* Microwire */
   1664 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
   1665 		sc->sc_flags |= WM_F_LOCK_EECD;
   1666 		break;
   1667 	case WM_T_82571:
   1668 	case WM_T_82572:
   1669 		/* SPI */
   1670 		wm_set_spiaddrbits(sc);
   1671 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1672 		break;
   1673 	case WM_T_82573:
   1674 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1675 		/* FALLTHROUGH */
   1676 	case WM_T_82574:
   1677 	case WM_T_82583:
   1678 		if (wm_nvm_is_onboard_eeprom(sc) == 0)
   1679 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1680 		else {
   1681 			/* SPI */
   1682 			wm_set_spiaddrbits(sc);
   1683 		}
   1684 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1685 		break;
   1686 	case WM_T_82575:
   1687 	case WM_T_82576:
   1688 	case WM_T_82580:
   1689 	case WM_T_82580ER:
   1690 	case WM_T_I350:
   1691 	case WM_T_I354:
   1692 	case WM_T_80003:
   1693 		/* SPI */
   1694 		wm_set_spiaddrbits(sc);
   1695 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1696 		    | WM_F_LOCK_SWSM;
   1697 		break;
   1698 	case WM_T_ICH8:
   1699 	case WM_T_ICH9:
   1700 	case WM_T_ICH10:
   1701 	case WM_T_PCH:
   1702 	case WM_T_PCH2:
   1703 	case WM_T_PCH_LPT:
   1704 		/* FLASH */
   1705 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1706 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1707 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1708 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
   1709 			aprint_error_dev(sc->sc_dev,
   1710 			    "can't map FLASH registers\n");
   1711 			return;
   1712 		}
   1713 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1714 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1715 						ICH_FLASH_SECTOR_SIZE;
   1716 		sc->sc_ich8_flash_bank_size =
   1717 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1718 		sc->sc_ich8_flash_bank_size -=
   1719 		    (reg & ICH_GFPREG_BASE_MASK);
   1720 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1721 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1722 		break;
   1723 	case WM_T_I210:
   1724 	case WM_T_I211:
   1725 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1726 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1727 		break;
   1728 	default:
   1729 		break;
   1730 	}
   1731 
   1732 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1733 	switch (sc->sc_type) {
   1734 	case WM_T_82571:
   1735 	case WM_T_82572:
   1736 		reg = CSR_READ(sc, WMREG_SWSM2);
   1737 		if ((reg & SWSM2_LOCK) != 0) {
   1738 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1739 			force_clear_smbi = true;
   1740 		} else
   1741 			force_clear_smbi = false;
   1742 		break;
   1743 	default:
   1744 		force_clear_smbi = true;
   1745 		break;
   1746 	}
   1747 	if (force_clear_smbi) {
   1748 		reg = CSR_READ(sc, WMREG_SWSM);
   1749 		if ((reg & ~SWSM_SMBI) != 0)
   1750 			aprint_error_dev(sc->sc_dev,
   1751 			    "Please update the Bootagent\n");
   1752 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1753 	}
   1754 
   1755 	/*
   1756 	 * Defer printing the EEPROM type until after verifying the checksum
   1757 	 * This allows the EEPROM type to be printed correctly in the case
   1758 	 * that no EEPROM is attached.
   1759 	 */
   1760 	/*
   1761 	 * Validate the EEPROM checksum. If the checksum fails, flag
   1762 	 * this for later, so we can fail future reads from the EEPROM.
   1763 	 */
   1764 	if (wm_nvm_validate_checksum(sc)) {
   1765 		/*
   1766 		 * Read twice again because some PCI-e parts fail the
   1767 		 * first check due to the link being in sleep state.
   1768 		 */
   1769 		if (wm_nvm_validate_checksum(sc))
   1770 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   1771 	}
   1772 
   1773 	/* Set device properties (macflags) */
   1774 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   1775 
   1776 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1777 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
   1778 	else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
   1779 		aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
   1780 	} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   1781 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
   1782 	} else {
   1783 		if (sc->sc_flags & WM_F_EEPROM_SPI)
   1784 			eetype = "SPI";
   1785 		else
   1786 			eetype = "MicroWire";
   1787 		aprint_verbose_dev(sc->sc_dev,
   1788 		    "%u word (%d address bits) %s EEPROM\n",
   1789 		    1U << sc->sc_ee_addrbits,
   1790 		    sc->sc_ee_addrbits, eetype);
   1791 	}
   1792 
   1793 	switch (sc->sc_type) {
   1794 	case WM_T_82571:
   1795 	case WM_T_82572:
   1796 	case WM_T_82573:
   1797 	case WM_T_82574:
   1798 	case WM_T_82583:
   1799 	case WM_T_80003:
   1800 	case WM_T_ICH8:
   1801 	case WM_T_ICH9:
   1802 	case WM_T_ICH10:
   1803 	case WM_T_PCH:
   1804 	case WM_T_PCH2:
   1805 	case WM_T_PCH_LPT:
   1806 		if (wm_check_mng_mode(sc) != 0)
   1807 			wm_get_hw_control(sc);
   1808 		break;
   1809 	default:
   1810 		break;
   1811 	}
   1812 	wm_get_wakeup(sc);
   1813 	/*
   1814 	 * Read the Ethernet address from the EEPROM, if not first found
   1815 	 * in device properties.
   1816 	 */
   1817 	ea = prop_dictionary_get(dict, "mac-address");
   1818 	if (ea != NULL) {
   1819 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   1820 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   1821 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   1822 	} else {
   1823 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   1824 			aprint_error_dev(sc->sc_dev,
   1825 			    "unable to read Ethernet address\n");
   1826 			return;
   1827 		}
   1828 	}
   1829 
   1830 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   1831 	    ether_sprintf(enaddr));
   1832 
   1833 	/*
   1834 	 * Read the config info from the EEPROM, and set up various
   1835 	 * bits in the control registers based on their contents.
   1836 	 */
   1837 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   1838 	if (pn != NULL) {
   1839 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1840 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   1841 	} else {
   1842 		if (wm_nvm_read(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
   1843 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   1844 			return;
   1845 		}
   1846 	}
   1847 
   1848 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   1849 	if (pn != NULL) {
   1850 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1851 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   1852 	} else {
   1853 		if (wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
   1854 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   1855 			return;
   1856 		}
   1857 	}
   1858 
   1859 	/* check for WM_F_WOL */
   1860 	switch (sc->sc_type) {
   1861 	case WM_T_82542_2_0:
   1862 	case WM_T_82542_2_1:
   1863 	case WM_T_82543:
   1864 		/* dummy? */
   1865 		eeprom_data = 0;
   1866 		apme_mask = EEPROM_CFG3_APME;
   1867 		break;
   1868 	case WM_T_82544:
   1869 		apme_mask = EEPROM_CFG2_82544_APM_EN;
   1870 		eeprom_data = cfg2;
   1871 		break;
   1872 	case WM_T_82546:
   1873 	case WM_T_82546_3:
   1874 	case WM_T_82571:
   1875 	case WM_T_82572:
   1876 	case WM_T_82573:
   1877 	case WM_T_82574:
   1878 	case WM_T_82583:
   1879 	case WM_T_80003:
   1880 	default:
   1881 		apme_mask = EEPROM_CFG3_APME;
   1882 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
   1883 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
   1884 		break;
   1885 	case WM_T_82575:
   1886 	case WM_T_82576:
   1887 	case WM_T_82580:
   1888 	case WM_T_82580ER:
   1889 	case WM_T_I350:
   1890 	case WM_T_I354: /* XXX ok? */
   1891 	case WM_T_ICH8:
   1892 	case WM_T_ICH9:
   1893 	case WM_T_ICH10:
   1894 	case WM_T_PCH:
   1895 	case WM_T_PCH2:
   1896 	case WM_T_PCH_LPT:
   1897 		/* XXX The funcid should be checked on some devices */
   1898 		apme_mask = WUC_APME;
   1899 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   1900 		break;
   1901 	}
   1902 
   1903 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   1904 	if ((eeprom_data & apme_mask) != 0)
   1905 		sc->sc_flags |= WM_F_WOL;
   1906 #ifdef WM_DEBUG
   1907 	if ((sc->sc_flags & WM_F_WOL) != 0)
   1908 		printf("WOL\n");
   1909 #endif
   1910 
   1911 	/*
   1912 	 * XXX need special handling for some multiple port cards
   1913 	 * to disable a paticular port.
   1914 	 */
   1915 
   1916 	if (sc->sc_type >= WM_T_82544) {
   1917 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   1918 		if (pn != NULL) {
   1919 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1920 			swdpin = (uint16_t) prop_number_integer_value(pn);
   1921 		} else {
   1922 			if (wm_nvm_read(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
   1923 				aprint_error_dev(sc->sc_dev,
   1924 				    "unable to read SWDPIN\n");
   1925 				return;
   1926 			}
   1927 		}
   1928 	}
   1929 
   1930 	if (cfg1 & EEPROM_CFG1_ILOS)
   1931 		sc->sc_ctrl |= CTRL_ILOS;
   1932 	if (sc->sc_type >= WM_T_82544) {
   1933 		sc->sc_ctrl |=
   1934 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   1935 		    CTRL_SWDPIO_SHIFT;
   1936 		sc->sc_ctrl |=
   1937 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   1938 		    CTRL_SWDPINS_SHIFT;
   1939 	} else {
   1940 		sc->sc_ctrl |=
   1941 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   1942 		    CTRL_SWDPIO_SHIFT;
   1943 	}
   1944 
   1945 #if 0
   1946 	if (sc->sc_type >= WM_T_82544) {
   1947 		if (cfg1 & EEPROM_CFG1_IPS0)
   1948 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   1949 		if (cfg1 & EEPROM_CFG1_IPS1)
   1950 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   1951 		sc->sc_ctrl_ext |=
   1952 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   1953 		    CTRL_EXT_SWDPIO_SHIFT;
   1954 		sc->sc_ctrl_ext |=
   1955 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   1956 		    CTRL_EXT_SWDPINS_SHIFT;
   1957 	} else {
   1958 		sc->sc_ctrl_ext |=
   1959 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   1960 		    CTRL_EXT_SWDPIO_SHIFT;
   1961 	}
   1962 #endif
   1963 
   1964 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   1965 #if 0
   1966 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   1967 #endif
   1968 
   1969 	/*
   1970 	 * Set up some register offsets that are different between
   1971 	 * the i82542 and the i82543 and later chips.
   1972 	 */
   1973 	if (sc->sc_type < WM_T_82543) {
   1974 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   1975 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   1976 	} else {
   1977 		sc->sc_rdt_reg = WMREG_RDT;
   1978 		sc->sc_tdt_reg = WMREG_TDT;
   1979 	}
   1980 
   1981 	if (sc->sc_type == WM_T_PCH) {
   1982 		uint16_t val;
   1983 
   1984 		/* Save the NVM K1 bit setting */
   1985 		wm_nvm_read(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
   1986 
   1987 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
   1988 			sc->sc_nvm_k1_enabled = 1;
   1989 		else
   1990 			sc->sc_nvm_k1_enabled = 0;
   1991 	}
   1992 
   1993 	/*
   1994 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   1995 	 * media structures accordingly.
   1996 	 */
   1997 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   1998 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   1999 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2000 	    || sc->sc_type == WM_T_82573
   2001 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2002 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2003 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2004 	} else if (sc->sc_type < WM_T_82543 ||
   2005 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2006 		if (wmp->wmp_flags & WMP_F_1000T)
   2007 			aprint_error_dev(sc->sc_dev,
   2008 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2009 		wm_tbi_mediainit(sc);
   2010 	} else {
   2011 		switch (sc->sc_type) {
   2012 		case WM_T_82575:
   2013 		case WM_T_82576:
   2014 		case WM_T_82580:
   2015 		case WM_T_82580ER:
   2016 		case WM_T_I350:
   2017 		case WM_T_I354:
   2018 		case WM_T_I210:
   2019 		case WM_T_I211:
   2020 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2021 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
   2022 			case CTRL_EXT_LINK_MODE_1000KX:
   2023 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2024 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   2025 				    reg | CTRL_EXT_I2C_ENA);
   2026 				panic("not supported yet\n");
   2027 				break;
   2028 			case CTRL_EXT_LINK_MODE_SGMII:
   2029 				if (wm_sgmii_uses_mdio(sc)) {
   2030 					aprint_verbose_dev(sc->sc_dev,
   2031 					    "SGMII(MDIO)\n");
   2032 					sc->sc_flags |= WM_F_SGMII;
   2033 					wm_gmii_mediainit(sc,
   2034 					    wmp->wmp_product);
   2035 					break;
   2036 				}
   2037 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2038 				/*FALLTHROUGH*/
   2039 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2040 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2041 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   2042 				    reg | CTRL_EXT_I2C_ENA);
   2043 				panic("not supported yet\n");
   2044 				break;
   2045 			case CTRL_EXT_LINK_MODE_GMII:
   2046 			default:
   2047 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   2048 				    reg & ~CTRL_EXT_I2C_ENA);
   2049 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2050 				break;
   2051 			}
   2052 			break;
   2053 		default:
   2054 			if (wmp->wmp_flags & WMP_F_1000X)
   2055 				aprint_error_dev(sc->sc_dev,
   2056 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2057 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2058 		}
   2059 	}
   2060 
   2061 	ifp = &sc->sc_ethercom.ec_if;
   2062 	xname = device_xname(sc->sc_dev);
   2063 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2064 	ifp->if_softc = sc;
   2065 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2066 	ifp->if_ioctl = wm_ioctl;
   2067 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2068 		ifp->if_start = wm_nq_start;
   2069 	else
   2070 		ifp->if_start = wm_start;
   2071 	ifp->if_watchdog = wm_watchdog;
   2072 	ifp->if_init = wm_init;
   2073 	ifp->if_stop = wm_stop;
   2074 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2075 	IFQ_SET_READY(&ifp->if_snd);
   2076 
   2077 	/* Check for jumbo frame */
   2078 	switch (sc->sc_type) {
   2079 	case WM_T_82573:
   2080 		/* XXX limited to 9234 if ASPM is disabled */
   2081 		wm_nvm_read(sc, EEPROM_INIT_3GIO_3, 1, &io3);
   2082 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
   2083 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2084 		break;
   2085 	case WM_T_82571:
   2086 	case WM_T_82572:
   2087 	case WM_T_82574:
   2088 	case WM_T_82575:
   2089 	case WM_T_82576:
   2090 	case WM_T_82580:
   2091 	case WM_T_82580ER:
   2092 	case WM_T_I350:
   2093 	case WM_T_I354: /* XXXX ok? */
   2094 	case WM_T_I210:
   2095 	case WM_T_I211:
   2096 	case WM_T_80003:
   2097 	case WM_T_ICH9:
   2098 	case WM_T_ICH10:
   2099 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2100 	case WM_T_PCH_LPT:
   2101 		/* XXX limited to 9234 */
   2102 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2103 		break;
   2104 	case WM_T_PCH:
   2105 		/* XXX limited to 4096 */
   2106 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2107 		break;
   2108 	case WM_T_82542_2_0:
   2109 	case WM_T_82542_2_1:
   2110 	case WM_T_82583:
   2111 	case WM_T_ICH8:
   2112 		/* No support for jumbo frame */
   2113 		break;
   2114 	default:
   2115 		/* ETHER_MAX_LEN_JUMBO */
   2116 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2117 		break;
   2118 	}
   2119 
   2120 	/* If we're a i82543 or greater, we can support VLANs. */
   2121 	if (sc->sc_type >= WM_T_82543)
   2122 		sc->sc_ethercom.ec_capabilities |=
   2123 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2124 
   2125 	/*
   2126 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2127 	 * on i82543 and later.
   2128 	 */
   2129 	if (sc->sc_type >= WM_T_82543) {
   2130 		ifp->if_capabilities |=
   2131 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2132 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2133 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2134 		    IFCAP_CSUM_TCPv6_Tx |
   2135 		    IFCAP_CSUM_UDPv6_Tx;
   2136 	}
   2137 
   2138 	/*
   2139 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2140 	 *
   2141 	 *	82541GI (8086:1076) ... no
   2142 	 *	82572EI (8086:10b9) ... yes
   2143 	 */
   2144 	if (sc->sc_type >= WM_T_82571) {
   2145 		ifp->if_capabilities |=
   2146 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2147 	}
   2148 
   2149 	/*
   2150 	 * If we're a i82544 or greater (except i82547), we can do
   2151 	 * TCP segmentation offload.
   2152 	 */
   2153 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2154 		ifp->if_capabilities |= IFCAP_TSOv4;
   2155 	}
   2156 
   2157 	if (sc->sc_type >= WM_T_82571) {
   2158 		ifp->if_capabilities |= IFCAP_TSOv6;
   2159 	}
   2160 
   2161 #ifdef WM_MPSAFE
   2162 	sc->sc_txrx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2163 #else
   2164 	sc->sc_txrx_lock = NULL;
   2165 #endif
   2166 
   2167 	/* Attach the interface. */
   2168 	if_attach(ifp);
   2169 	ether_ifattach(ifp, enaddr);
   2170 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2171 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
   2172 
   2173 #ifdef WM_EVENT_COUNTERS
   2174 	/* Attach event counters. */
   2175 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2176 	    NULL, xname, "txsstall");
   2177 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2178 	    NULL, xname, "txdstall");
   2179 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2180 	    NULL, xname, "txfifo_stall");
   2181 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2182 	    NULL, xname, "txdw");
   2183 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2184 	    NULL, xname, "txqe");
   2185 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2186 	    NULL, xname, "rxintr");
   2187 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2188 	    NULL, xname, "linkintr");
   2189 
   2190 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2191 	    NULL, xname, "rxipsum");
   2192 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2193 	    NULL, xname, "rxtusum");
   2194 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2195 	    NULL, xname, "txipsum");
   2196 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2197 	    NULL, xname, "txtusum");
   2198 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2199 	    NULL, xname, "txtusum6");
   2200 
   2201 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2202 	    NULL, xname, "txtso");
   2203 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2204 	    NULL, xname, "txtso6");
   2205 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2206 	    NULL, xname, "txtsopain");
   2207 
   2208 	for (i = 0; i < WM_NTXSEGS; i++) {
   2209 		snprintf(wm_txseg_evcnt_names[i],
   2210 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2211 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2212 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2213 	}
   2214 
   2215 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2216 	    NULL, xname, "txdrop");
   2217 
   2218 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2219 	    NULL, xname, "tu");
   2220 
   2221 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2222 	    NULL, xname, "tx_xoff");
   2223 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2224 	    NULL, xname, "tx_xon");
   2225 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2226 	    NULL, xname, "rx_xoff");
   2227 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2228 	    NULL, xname, "rx_xon");
   2229 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2230 	    NULL, xname, "rx_macctl");
   2231 #endif /* WM_EVENT_COUNTERS */
   2232 
   2233 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2234 		pmf_class_network_register(self, ifp);
   2235 	else
   2236 		aprint_error_dev(self, "couldn't establish power handler\n");
   2237 
   2238 	return;
   2239 
   2240 	/*
   2241 	 * Free any resources we've allocated during the failed attach
   2242 	 * attempt.  Do this in reverse order and fall through.
   2243 	 */
   2244  fail_5:
   2245 	for (i = 0; i < WM_NRXDESC; i++) {
   2246 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2247 			bus_dmamap_destroy(sc->sc_dmat,
   2248 			    sc->sc_rxsoft[i].rxs_dmamap);
   2249 	}
   2250  fail_4:
   2251 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2252 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2253 			bus_dmamap_destroy(sc->sc_dmat,
   2254 			    sc->sc_txsoft[i].txs_dmamap);
   2255 	}
   2256 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2257  fail_3:
   2258 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2259  fail_2:
   2260 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2261 	    sc->sc_cd_size);
   2262  fail_1:
   2263 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2264  fail_0:
   2265 	return;
   2266 }
   2267 
   2268 /* The detach function (ca_detach) */
   2269 static int
   2270 wm_detach(device_t self, int flags __unused)
   2271 {
   2272 	struct wm_softc *sc = device_private(self);
   2273 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2274 	int i;
   2275 #ifndef WM_MPSAFE
   2276 	int s;
   2277 
   2278 	s = splnet();
   2279 #endif
   2280 	/* Stop the interface. Callouts are stopped in it. */
   2281 	wm_stop(ifp, 1);
   2282 
   2283 #ifndef WM_MPSAFE
   2284 	splx(s);
   2285 #endif
   2286 
   2287 	pmf_device_deregister(self);
   2288 
   2289 	/* Tell the firmware about the release */
   2290 	WM_LOCK(sc);
   2291 	wm_release_manageability(sc);
   2292 	wm_release_hw_control(sc);
   2293 	WM_UNLOCK(sc);
   2294 
   2295 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2296 
   2297 	/* Delete all remaining media. */
   2298 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2299 
   2300 	ether_ifdetach(ifp);
   2301 	if_detach(ifp);
   2302 
   2303 
   2304 	/* Unload RX dmamaps and free mbufs */
   2305 	WM_LOCK(sc);
   2306 	wm_rxdrain(sc);
   2307 	WM_UNLOCK(sc);
   2308 	/* Must unlock here */
   2309 
   2310 	/* Free dmamap. It's the same as the end of the wm_attach() function */
   2311 	for (i = 0; i < WM_NRXDESC; i++) {
   2312 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2313 			bus_dmamap_destroy(sc->sc_dmat,
   2314 			    sc->sc_rxsoft[i].rxs_dmamap);
   2315 	}
   2316 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2317 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2318 			bus_dmamap_destroy(sc->sc_dmat,
   2319 			    sc->sc_txsoft[i].txs_dmamap);
   2320 	}
   2321 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2322 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2323 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2324 	    sc->sc_cd_size);
   2325 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2326 
   2327 	/* Disestablish the interrupt handler */
   2328 	if (sc->sc_ih != NULL) {
   2329 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   2330 		sc->sc_ih = NULL;
   2331 	}
   2332 
   2333 	/* Unmap the registers */
   2334 	if (sc->sc_ss) {
   2335 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2336 		sc->sc_ss = 0;
   2337 	}
   2338 
   2339 	if (sc->sc_ios) {
   2340 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2341 		sc->sc_ios = 0;
   2342 	}
   2343 
   2344 	if (sc->sc_txrx_lock)
   2345 		mutex_obj_free(sc->sc_txrx_lock);
   2346 
   2347 	return 0;
   2348 }
   2349 
   2350 static bool
   2351 wm_suspend(device_t self, const pmf_qual_t *qual)
   2352 {
   2353 	struct wm_softc *sc = device_private(self);
   2354 
   2355 	wm_release_manageability(sc);
   2356 	wm_release_hw_control(sc);
   2357 #ifdef WM_WOL
   2358 	wm_enable_wakeup(sc);
   2359 #endif
   2360 
   2361 	return true;
   2362 }
   2363 
   2364 static bool
   2365 wm_resume(device_t self, const pmf_qual_t *qual)
   2366 {
   2367 	struct wm_softc *sc = device_private(self);
   2368 
   2369 	wm_init_manageability(sc);
   2370 
   2371 	return true;
   2372 }
   2373 
   2374 /*
   2375  * wm_watchdog:		[ifnet interface function]
   2376  *
   2377  *	Watchdog timer handler.
   2378  */
   2379 static void
   2380 wm_watchdog(struct ifnet *ifp)
   2381 {
   2382 	struct wm_softc *sc = ifp->if_softc;
   2383 
   2384 	/*
   2385 	 * Since we're using delayed interrupts, sweep up
   2386 	 * before we report an error.
   2387 	 */
   2388 	WM_LOCK(sc);
   2389 	wm_txintr(sc);
   2390 	WM_UNLOCK(sc);
   2391 
   2392 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
   2393 #ifdef WM_DEBUG
   2394 		int i, j;
   2395 		struct wm_txsoft *txs;
   2396 #endif
   2397 		log(LOG_ERR,
   2398 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2399 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
   2400 		    sc->sc_txnext);
   2401 		ifp->if_oerrors++;
   2402 #ifdef WM_DEBUG
   2403 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
   2404 		    i = WM_NEXTTXS(sc, i)) {
   2405 		    txs = &sc->sc_txsoft[i];
   2406 		    printf("txs %d tx %d -> %d\n",
   2407 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2408 		    for (j = txs->txs_firstdesc; ;
   2409 			j = WM_NEXTTX(sc, j)) {
   2410 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2411 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
   2412 			printf("\t %#08x%08x\n",
   2413 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
   2414 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
   2415 			if (j == txs->txs_lastdesc)
   2416 				break;
   2417 			}
   2418 		}
   2419 #endif
   2420 		/* Reset the interface. */
   2421 		(void) wm_init(ifp);
   2422 	}
   2423 
   2424 	/* Try to get more packets going. */
   2425 	ifp->if_start(ifp);
   2426 }
   2427 
   2428 /*
   2429  * wm_tick:
   2430  *
   2431  *	One second timer, used to check link status, sweep up
   2432  *	completed transmit jobs, etc.
   2433  */
   2434 static void
   2435 wm_tick(void *arg)
   2436 {
   2437 	struct wm_softc *sc = arg;
   2438 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2439 #ifndef WM_MPSAFE
   2440 	int s;
   2441 
   2442 	s = splnet();
   2443 #endif
   2444 
   2445 	WM_LOCK(sc);
   2446 
   2447 	if (sc->sc_stopping)
   2448 		goto out;
   2449 
   2450 	if (sc->sc_type >= WM_T_82542_2_1) {
   2451 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2452 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2453 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2454 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2455 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2456 	}
   2457 
   2458 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2459 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2460 	    + CSR_READ(sc, WMREG_CRCERRS)
   2461 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2462 	    + CSR_READ(sc, WMREG_SYMERRC)
   2463 	    + CSR_READ(sc, WMREG_RXERRC)
   2464 	    + CSR_READ(sc, WMREG_SEC)
   2465 	    + CSR_READ(sc, WMREG_CEXTERR)
   2466 	    + CSR_READ(sc, WMREG_RLEC);
   2467 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2468 
   2469 	if (sc->sc_flags & WM_F_HAS_MII)
   2470 		mii_tick(&sc->sc_mii);
   2471 	else
   2472 		wm_tbi_check_link(sc);
   2473 
   2474 out:
   2475 	WM_UNLOCK(sc);
   2476 #ifndef WM_MPSAFE
   2477 	splx(s);
   2478 #endif
   2479 
   2480 	if (!sc->sc_stopping)
   2481 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2482 }
   2483 
   2484 static int
   2485 wm_ifflags_cb(struct ethercom *ec)
   2486 {
   2487 	struct ifnet *ifp = &ec->ec_if;
   2488 	struct wm_softc *sc = ifp->if_softc;
   2489 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2490 	int rc = 0;
   2491 
   2492 	WM_LOCK(sc);
   2493 
   2494 	if (change != 0)
   2495 		sc->sc_if_flags = ifp->if_flags;
   2496 
   2497 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   2498 		rc = ENETRESET;
   2499 		goto out;
   2500 	}
   2501 
   2502 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2503 		wm_set_filter(sc);
   2504 
   2505 	wm_set_vlan(sc);
   2506 
   2507 out:
   2508 	WM_UNLOCK(sc);
   2509 
   2510 	return rc;
   2511 }
   2512 
   2513 /*
   2514  * wm_ioctl:		[ifnet interface function]
   2515  *
   2516  *	Handle control requests from the operator.
   2517  */
   2518 static int
   2519 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2520 {
   2521 	struct wm_softc *sc = ifp->if_softc;
   2522 	struct ifreq *ifr = (struct ifreq *) data;
   2523 	struct ifaddr *ifa = (struct ifaddr *)data;
   2524 	struct sockaddr_dl *sdl;
   2525 	int s, error;
   2526 
   2527 #ifndef WM_MPSAFE
   2528 	s = splnet();
   2529 #endif
   2530 	WM_LOCK(sc);
   2531 
   2532 	switch (cmd) {
   2533 	case SIOCSIFMEDIA:
   2534 	case SIOCGIFMEDIA:
   2535 		/* Flow control requires full-duplex mode. */
   2536 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2537 		    (ifr->ifr_media & IFM_FDX) == 0)
   2538 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2539 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2540 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2541 				/* We can do both TXPAUSE and RXPAUSE. */
   2542 				ifr->ifr_media |=
   2543 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2544 			}
   2545 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2546 		}
   2547 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2548 		break;
   2549 	case SIOCINITIFADDR:
   2550 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2551 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2552 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2553 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2554 			/* unicast address is first multicast entry */
   2555 			wm_set_filter(sc);
   2556 			error = 0;
   2557 			break;
   2558 		}
   2559 		/*FALLTHROUGH*/
   2560 	default:
   2561 		WM_UNLOCK(sc);
   2562 #ifdef WM_MPSAFE
   2563 		s = splnet();
   2564 #endif
   2565 		/* It may call wm_start, so unlock here */
   2566 		error = ether_ioctl(ifp, cmd, data);
   2567 #ifdef WM_MPSAFE
   2568 		splx(s);
   2569 #endif
   2570 		WM_LOCK(sc);
   2571 
   2572 		if (error != ENETRESET)
   2573 			break;
   2574 
   2575 		error = 0;
   2576 
   2577 		if (cmd == SIOCSIFCAP) {
   2578 			WM_UNLOCK(sc);
   2579 			error = (*ifp->if_init)(ifp);
   2580 			WM_LOCK(sc);
   2581 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2582 			;
   2583 		else if (ifp->if_flags & IFF_RUNNING) {
   2584 			/*
   2585 			 * Multicast list has changed; set the hardware filter
   2586 			 * accordingly.
   2587 			 */
   2588 			wm_set_filter(sc);
   2589 		}
   2590 		break;
   2591 	}
   2592 
   2593 	WM_UNLOCK(sc);
   2594 
   2595 	/* Try to get more packets going. */
   2596 	ifp->if_start(ifp);
   2597 
   2598 #ifndef WM_MPSAFE
   2599 	splx(s);
   2600 #endif
   2601 	return error;
   2602 }
   2603 
   2604 /* MAC address related */
   2605 
   2606 static int
   2607 wm_check_alt_mac_addr(struct wm_softc *sc)
   2608 {
   2609 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2610 	uint16_t offset = EEPROM_OFF_MACADDR;
   2611 
   2612 	/* Try to read alternative MAC address pointer */
   2613 	if (wm_nvm_read(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2614 		return -1;
   2615 
   2616 	/* Check pointer */
   2617 	if (offset == 0xffff)
   2618 		return -1;
   2619 
   2620 	/*
   2621 	 * Check whether alternative MAC address is valid or not.
   2622 	 * Some cards have non 0xffff pointer but those don't use
   2623 	 * alternative MAC address in reality.
   2624 	 *
   2625 	 * Check whether the broadcast bit is set or not.
   2626 	 */
   2627 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2628 		if (((myea[0] & 0xff) & 0x01) == 0)
   2629 			return 0; /* found! */
   2630 
   2631 	/* not found */
   2632 	return -1;
   2633 }
   2634 
   2635 static int
   2636 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2637 {
   2638 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2639 	uint16_t offset = EEPROM_OFF_MACADDR;
   2640 	int do_invert = 0;
   2641 
   2642 	switch (sc->sc_type) {
   2643 	case WM_T_82580:
   2644 	case WM_T_82580ER:
   2645 	case WM_T_I350:
   2646 	case WM_T_I354:
   2647 		switch (sc->sc_funcid) {
   2648 		case 0:
   2649 			/* default value (== EEPROM_OFF_MACADDR) */
   2650 			break;
   2651 		case 1:
   2652 			offset = EEPROM_OFF_LAN1;
   2653 			break;
   2654 		case 2:
   2655 			offset = EEPROM_OFF_LAN2;
   2656 			break;
   2657 		case 3:
   2658 			offset = EEPROM_OFF_LAN3;
   2659 			break;
   2660 		default:
   2661 			goto bad;
   2662 			/* NOTREACHED */
   2663 			break;
   2664 		}
   2665 		break;
   2666 	case WM_T_82571:
   2667 	case WM_T_82575:
   2668 	case WM_T_82576:
   2669 	case WM_T_80003:
   2670 	case WM_T_I210:
   2671 	case WM_T_I211:
   2672 		if (wm_check_alt_mac_addr(sc) != 0) {
   2673 			/* reset the offset to LAN0 */
   2674 			offset = EEPROM_OFF_MACADDR;
   2675 			if ((sc->sc_funcid & 0x01) == 1)
   2676 				do_invert = 1;
   2677 			goto do_read;
   2678 		}
   2679 		switch (sc->sc_funcid) {
   2680 		case 0:
   2681 			/*
   2682 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
   2683 			 * itself.
   2684 			 */
   2685 			break;
   2686 		case 1:
   2687 			offset += EEPROM_OFF_MACADDR_LAN1;
   2688 			break;
   2689 		case 2:
   2690 			offset += EEPROM_OFF_MACADDR_LAN2;
   2691 			break;
   2692 		case 3:
   2693 			offset += EEPROM_OFF_MACADDR_LAN3;
   2694 			break;
   2695 		default:
   2696 			goto bad;
   2697 			/* NOTREACHED */
   2698 			break;
   2699 		}
   2700 		break;
   2701 	default:
   2702 		if ((sc->sc_funcid & 0x01) == 1)
   2703 			do_invert = 1;
   2704 		break;
   2705 	}
   2706 
   2707  do_read:
   2708 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2709 		myea) != 0) {
   2710 		goto bad;
   2711 	}
   2712 
   2713 	enaddr[0] = myea[0] & 0xff;
   2714 	enaddr[1] = myea[0] >> 8;
   2715 	enaddr[2] = myea[1] & 0xff;
   2716 	enaddr[3] = myea[1] >> 8;
   2717 	enaddr[4] = myea[2] & 0xff;
   2718 	enaddr[5] = myea[2] >> 8;
   2719 
   2720 	/*
   2721 	 * Toggle the LSB of the MAC address on the second port
   2722 	 * of some dual port cards.
   2723 	 */
   2724 	if (do_invert != 0)
   2725 		enaddr[5] ^= 1;
   2726 
   2727 	return 0;
   2728 
   2729  bad:
   2730 	return -1;
   2731 }
   2732 
   2733 /*
   2734  * wm_set_ral:
   2735  *
   2736  *	Set an entery in the receive address list.
   2737  */
   2738 static void
   2739 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2740 {
   2741 	uint32_t ral_lo, ral_hi;
   2742 
   2743 	if (enaddr != NULL) {
   2744 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2745 		    (enaddr[3] << 24);
   2746 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2747 		ral_hi |= RAL_AV;
   2748 	} else {
   2749 		ral_lo = 0;
   2750 		ral_hi = 0;
   2751 	}
   2752 
   2753 	if (sc->sc_type >= WM_T_82544) {
   2754 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2755 		    ral_lo);
   2756 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2757 		    ral_hi);
   2758 	} else {
   2759 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2760 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2761 	}
   2762 }
   2763 
   2764 /*
   2765  * wm_mchash:
   2766  *
   2767  *	Compute the hash of the multicast address for the 4096-bit
   2768  *	multicast filter.
   2769  */
   2770 static uint32_t
   2771 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2772 {
   2773 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2774 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2775 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   2776 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   2777 	uint32_t hash;
   2778 
   2779 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2780 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2781 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   2782 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   2783 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   2784 		return (hash & 0x3ff);
   2785 	}
   2786 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   2787 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   2788 
   2789 	return (hash & 0xfff);
   2790 }
   2791 
   2792 /*
   2793  * wm_set_filter:
   2794  *
   2795  *	Set up the receive filter.
   2796  */
   2797 static void
   2798 wm_set_filter(struct wm_softc *sc)
   2799 {
   2800 	struct ethercom *ec = &sc->sc_ethercom;
   2801 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2802 	struct ether_multi *enm;
   2803 	struct ether_multistep step;
   2804 	bus_addr_t mta_reg;
   2805 	uint32_t hash, reg, bit;
   2806 	int i, size;
   2807 
   2808 	if (sc->sc_type >= WM_T_82544)
   2809 		mta_reg = WMREG_CORDOVA_MTA;
   2810 	else
   2811 		mta_reg = WMREG_MTA;
   2812 
   2813 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   2814 
   2815 	if (ifp->if_flags & IFF_BROADCAST)
   2816 		sc->sc_rctl |= RCTL_BAM;
   2817 	if (ifp->if_flags & IFF_PROMISC) {
   2818 		sc->sc_rctl |= RCTL_UPE;
   2819 		goto allmulti;
   2820 	}
   2821 
   2822 	/*
   2823 	 * Set the station address in the first RAL slot, and
   2824 	 * clear the remaining slots.
   2825 	 */
   2826 	if (sc->sc_type == WM_T_ICH8)
   2827 		size = WM_RAL_TABSIZE_ICH8 -1;
   2828 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   2829 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   2830 	    || (sc->sc_type == WM_T_PCH_LPT))
   2831 		size = WM_RAL_TABSIZE_ICH8;
   2832 	else if (sc->sc_type == WM_T_82575)
   2833 		size = WM_RAL_TABSIZE_82575;
   2834 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   2835 		size = WM_RAL_TABSIZE_82576;
   2836 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2837 		size = WM_RAL_TABSIZE_I350;
   2838 	else
   2839 		size = WM_RAL_TABSIZE;
   2840 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   2841 	for (i = 1; i < size; i++)
   2842 		wm_set_ral(sc, NULL, i);
   2843 
   2844 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2845 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2846 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   2847 		size = WM_ICH8_MC_TABSIZE;
   2848 	else
   2849 		size = WM_MC_TABSIZE;
   2850 	/* Clear out the multicast table. */
   2851 	for (i = 0; i < size; i++)
   2852 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   2853 
   2854 	ETHER_FIRST_MULTI(step, ec, enm);
   2855 	while (enm != NULL) {
   2856 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2857 			/*
   2858 			 * We must listen to a range of multicast addresses.
   2859 			 * For now, just accept all multicasts, rather than
   2860 			 * trying to set only those filter bits needed to match
   2861 			 * the range.  (At this time, the only use of address
   2862 			 * ranges is for IP multicast routing, for which the
   2863 			 * range is big enough to require all bits set.)
   2864 			 */
   2865 			goto allmulti;
   2866 		}
   2867 
   2868 		hash = wm_mchash(sc, enm->enm_addrlo);
   2869 
   2870 		reg = (hash >> 5);
   2871 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2872 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2873 		    || (sc->sc_type == WM_T_PCH2)
   2874 		    || (sc->sc_type == WM_T_PCH_LPT))
   2875 			reg &= 0x1f;
   2876 		else
   2877 			reg &= 0x7f;
   2878 		bit = hash & 0x1f;
   2879 
   2880 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   2881 		hash |= 1U << bit;
   2882 
   2883 		/* XXX Hardware bug?? */
   2884 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   2885 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   2886 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2887 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   2888 		} else
   2889 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2890 
   2891 		ETHER_NEXT_MULTI(step, enm);
   2892 	}
   2893 
   2894 	ifp->if_flags &= ~IFF_ALLMULTI;
   2895 	goto setit;
   2896 
   2897  allmulti:
   2898 	ifp->if_flags |= IFF_ALLMULTI;
   2899 	sc->sc_rctl |= RCTL_MPE;
   2900 
   2901  setit:
   2902 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   2903 }
   2904 
   2905 /* Reset and init related */
   2906 
   2907 static void
   2908 wm_set_vlan(struct wm_softc *sc)
   2909 {
   2910 	/* Deal with VLAN enables. */
   2911 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   2912 		sc->sc_ctrl |= CTRL_VME;
   2913 	else
   2914 		sc->sc_ctrl &= ~CTRL_VME;
   2915 
   2916 	/* Write the control registers. */
   2917 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2918 }
   2919 
   2920 static void
   2921 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   2922 {
   2923 	uint32_t gcr;
   2924 	pcireg_t ctrl2;
   2925 
   2926 	gcr = CSR_READ(sc, WMREG_GCR);
   2927 
   2928 	/* Only take action if timeout value is defaulted to 0 */
   2929 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   2930 		goto out;
   2931 
   2932 	if ((gcr & GCR_CAP_VER2) == 0) {
   2933 		gcr |= GCR_CMPL_TMOUT_10MS;
   2934 		goto out;
   2935 	}
   2936 
   2937 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   2938 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   2939 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   2940 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   2941 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   2942 
   2943 out:
   2944 	/* Disable completion timeout resend */
   2945 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   2946 
   2947 	CSR_WRITE(sc, WMREG_GCR, gcr);
   2948 }
   2949 
   2950 void
   2951 wm_get_auto_rd_done(struct wm_softc *sc)
   2952 {
   2953 	int i;
   2954 
   2955 	/* wait for eeprom to reload */
   2956 	switch (sc->sc_type) {
   2957 	case WM_T_82571:
   2958 	case WM_T_82572:
   2959 	case WM_T_82573:
   2960 	case WM_T_82574:
   2961 	case WM_T_82583:
   2962 	case WM_T_82575:
   2963 	case WM_T_82576:
   2964 	case WM_T_82580:
   2965 	case WM_T_82580ER:
   2966 	case WM_T_I350:
   2967 	case WM_T_I354:
   2968 	case WM_T_I210:
   2969 	case WM_T_I211:
   2970 	case WM_T_80003:
   2971 	case WM_T_ICH8:
   2972 	case WM_T_ICH9:
   2973 		for (i = 0; i < 10; i++) {
   2974 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   2975 				break;
   2976 			delay(1000);
   2977 		}
   2978 		if (i == 10) {
   2979 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   2980 			    "complete\n", device_xname(sc->sc_dev));
   2981 		}
   2982 		break;
   2983 	default:
   2984 		break;
   2985 	}
   2986 }
   2987 
   2988 void
   2989 wm_lan_init_done(struct wm_softc *sc)
   2990 {
   2991 	uint32_t reg = 0;
   2992 	int i;
   2993 
   2994 	/* wait for eeprom to reload */
   2995 	switch (sc->sc_type) {
   2996 	case WM_T_ICH10:
   2997 	case WM_T_PCH:
   2998 	case WM_T_PCH2:
   2999 	case WM_T_PCH_LPT:
   3000 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3001 			reg = CSR_READ(sc, WMREG_STATUS);
   3002 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3003 				break;
   3004 			delay(100);
   3005 		}
   3006 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3007 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3008 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3009 		}
   3010 		break;
   3011 	default:
   3012 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3013 		    __func__);
   3014 		break;
   3015 	}
   3016 
   3017 	reg &= ~STATUS_LAN_INIT_DONE;
   3018 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3019 }
   3020 
   3021 void
   3022 wm_get_cfg_done(struct wm_softc *sc)
   3023 {
   3024 	int mask;
   3025 	uint32_t reg;
   3026 	int i;
   3027 
   3028 	/* wait for eeprom to reload */
   3029 	switch (sc->sc_type) {
   3030 	case WM_T_82542_2_0:
   3031 	case WM_T_82542_2_1:
   3032 		/* null */
   3033 		break;
   3034 	case WM_T_82543:
   3035 	case WM_T_82544:
   3036 	case WM_T_82540:
   3037 	case WM_T_82545:
   3038 	case WM_T_82545_3:
   3039 	case WM_T_82546:
   3040 	case WM_T_82546_3:
   3041 	case WM_T_82541:
   3042 	case WM_T_82541_2:
   3043 	case WM_T_82547:
   3044 	case WM_T_82547_2:
   3045 	case WM_T_82573:
   3046 	case WM_T_82574:
   3047 	case WM_T_82583:
   3048 		/* generic */
   3049 		delay(10*1000);
   3050 		break;
   3051 	case WM_T_80003:
   3052 	case WM_T_82571:
   3053 	case WM_T_82572:
   3054 	case WM_T_82575:
   3055 	case WM_T_82576:
   3056 	case WM_T_82580:
   3057 	case WM_T_82580ER:
   3058 	case WM_T_I350:
   3059 	case WM_T_I354:
   3060 	case WM_T_I210:
   3061 	case WM_T_I211:
   3062 		if (sc->sc_type == WM_T_82571) {
   3063 			/* Only 82571 shares port 0 */
   3064 			mask = EEMNGCTL_CFGDONE_0;
   3065 		} else
   3066 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3067 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3068 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3069 				break;
   3070 			delay(1000);
   3071 		}
   3072 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3073 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3074 				device_xname(sc->sc_dev), __func__));
   3075 		}
   3076 		break;
   3077 	case WM_T_ICH8:
   3078 	case WM_T_ICH9:
   3079 	case WM_T_ICH10:
   3080 	case WM_T_PCH:
   3081 	case WM_T_PCH2:
   3082 	case WM_T_PCH_LPT:
   3083 		delay(10*1000);
   3084 		if (sc->sc_type >= WM_T_ICH10)
   3085 			wm_lan_init_done(sc);
   3086 		else
   3087 			wm_get_auto_rd_done(sc);
   3088 
   3089 		reg = CSR_READ(sc, WMREG_STATUS);
   3090 		if ((reg & STATUS_PHYRA) != 0)
   3091 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3092 		break;
   3093 	default:
   3094 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3095 		    __func__);
   3096 		break;
   3097 	}
   3098 }
   3099 
   3100 /*
   3101  * wm_reset:
   3102  *
   3103  *	Reset the i82542 chip.
   3104  */
   3105 static void
   3106 wm_reset(struct wm_softc *sc)
   3107 {
   3108 	int phy_reset = 0;
   3109 	int error = 0;
   3110 	uint32_t reg, mask;
   3111 
   3112 	/*
   3113 	 * Allocate on-chip memory according to the MTU size.
   3114 	 * The Packet Buffer Allocation register must be written
   3115 	 * before the chip is reset.
   3116 	 */
   3117 	switch (sc->sc_type) {
   3118 	case WM_T_82547:
   3119 	case WM_T_82547_2:
   3120 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3121 		    PBA_22K : PBA_30K;
   3122 		sc->sc_txfifo_head = 0;
   3123 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3124 		sc->sc_txfifo_size =
   3125 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3126 		sc->sc_txfifo_stall = 0;
   3127 		break;
   3128 	case WM_T_82571:
   3129 	case WM_T_82572:
   3130 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3131 	case WM_T_I350:
   3132 	case WM_T_I354:
   3133 	case WM_T_80003:
   3134 		sc->sc_pba = PBA_32K;
   3135 		break;
   3136 	case WM_T_82580:
   3137 	case WM_T_82580ER:
   3138 		sc->sc_pba = PBA_35K;
   3139 		break;
   3140 	case WM_T_I210:
   3141 	case WM_T_I211:
   3142 		sc->sc_pba = PBA_34K;
   3143 		break;
   3144 	case WM_T_82576:
   3145 		sc->sc_pba = PBA_64K;
   3146 		break;
   3147 	case WM_T_82573:
   3148 		sc->sc_pba = PBA_12K;
   3149 		break;
   3150 	case WM_T_82574:
   3151 	case WM_T_82583:
   3152 		sc->sc_pba = PBA_20K;
   3153 		break;
   3154 	case WM_T_ICH8:
   3155 		sc->sc_pba = PBA_8K;
   3156 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3157 		break;
   3158 	case WM_T_ICH9:
   3159 	case WM_T_ICH10:
   3160 		sc->sc_pba = PBA_10K;
   3161 		break;
   3162 	case WM_T_PCH:
   3163 	case WM_T_PCH2:
   3164 	case WM_T_PCH_LPT:
   3165 		sc->sc_pba = PBA_26K;
   3166 		break;
   3167 	default:
   3168 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3169 		    PBA_40K : PBA_48K;
   3170 		break;
   3171 	}
   3172 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3173 
   3174 	/* Prevent the PCI-E bus from sticking */
   3175 	if (sc->sc_flags & WM_F_PCIE) {
   3176 		int timeout = 800;
   3177 
   3178 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3179 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3180 
   3181 		while (timeout--) {
   3182 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3183 			    == 0)
   3184 				break;
   3185 			delay(100);
   3186 		}
   3187 	}
   3188 
   3189 	/* Set the completion timeout for interface */
   3190 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3191 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   3192 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3193 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3194 		wm_set_pcie_completion_timeout(sc);
   3195 
   3196 	/* Clear interrupt */
   3197 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3198 
   3199 	/* Stop the transmit and receive processes. */
   3200 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3201 	sc->sc_rctl &= ~RCTL_EN;
   3202 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3203 	CSR_WRITE_FLUSH(sc);
   3204 
   3205 	/* XXX set_tbi_sbp_82543() */
   3206 
   3207 	delay(10*1000);
   3208 
   3209 	/* Must acquire the MDIO ownership before MAC reset */
   3210 	switch (sc->sc_type) {
   3211 	case WM_T_82573:
   3212 	case WM_T_82574:
   3213 	case WM_T_82583:
   3214 		error = wm_get_hw_semaphore_82573(sc);
   3215 		break;
   3216 	default:
   3217 		break;
   3218 	}
   3219 
   3220 	/*
   3221 	 * 82541 Errata 29? & 82547 Errata 28?
   3222 	 * See also the description about PHY_RST bit in CTRL register
   3223 	 * in 8254x_GBe_SDM.pdf.
   3224 	 */
   3225 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3226 		CSR_WRITE(sc, WMREG_CTRL,
   3227 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3228 		CSR_WRITE_FLUSH(sc);
   3229 		delay(5000);
   3230 	}
   3231 
   3232 	switch (sc->sc_type) {
   3233 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3234 	case WM_T_82541:
   3235 	case WM_T_82541_2:
   3236 	case WM_T_82547:
   3237 	case WM_T_82547_2:
   3238 		/*
   3239 		 * On some chipsets, a reset through a memory-mapped write
   3240 		 * cycle can cause the chip to reset before completing the
   3241 		 * write cycle.  This causes major headache that can be
   3242 		 * avoided by issuing the reset via indirect register writes
   3243 		 * through I/O space.
   3244 		 *
   3245 		 * So, if we successfully mapped the I/O BAR at attach time,
   3246 		 * use that.  Otherwise, try our luck with a memory-mapped
   3247 		 * reset.
   3248 		 */
   3249 		if (sc->sc_flags & WM_F_IOH_VALID)
   3250 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3251 		else
   3252 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3253 		break;
   3254 	case WM_T_82545_3:
   3255 	case WM_T_82546_3:
   3256 		/* Use the shadow control register on these chips. */
   3257 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3258 		break;
   3259 	case WM_T_80003:
   3260 		mask = swfwphysem[sc->sc_funcid];
   3261 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3262 		wm_get_swfw_semaphore(sc, mask);
   3263 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3264 		wm_put_swfw_semaphore(sc, mask);
   3265 		break;
   3266 	case WM_T_ICH8:
   3267 	case WM_T_ICH9:
   3268 	case WM_T_ICH10:
   3269 	case WM_T_PCH:
   3270 	case WM_T_PCH2:
   3271 	case WM_T_PCH_LPT:
   3272 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3273 		if (wm_check_reset_block(sc) == 0) {
   3274 			/*
   3275 			 * Gate automatic PHY configuration by hardware on
   3276 			 * non-managed 82579
   3277 			 */
   3278 			if ((sc->sc_type == WM_T_PCH2)
   3279 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3280 				!= 0))
   3281 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3282 
   3283 
   3284 			reg |= CTRL_PHY_RESET;
   3285 			phy_reset = 1;
   3286 		}
   3287 		wm_get_swfwhw_semaphore(sc);
   3288 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3289 		/* Don't insert a completion barrier when reset */
   3290 		delay(20*1000);
   3291 		wm_put_swfwhw_semaphore(sc);
   3292 		break;
   3293 	case WM_T_82542_2_0:
   3294 	case WM_T_82542_2_1:
   3295 	case WM_T_82543:
   3296 	case WM_T_82540:
   3297 	case WM_T_82545:
   3298 	case WM_T_82546:
   3299 	case WM_T_82571:
   3300 	case WM_T_82572:
   3301 	case WM_T_82573:
   3302 	case WM_T_82574:
   3303 	case WM_T_82575:
   3304 	case WM_T_82576:
   3305 	case WM_T_82580:
   3306 	case WM_T_82580ER:
   3307 	case WM_T_82583:
   3308 	case WM_T_I350:
   3309 	case WM_T_I354:
   3310 	case WM_T_I210:
   3311 	case WM_T_I211:
   3312 	default:
   3313 		/* Everything else can safely use the documented method. */
   3314 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3315 		break;
   3316 	}
   3317 
   3318 	/* Must release the MDIO ownership after MAC reset */
   3319 	switch (sc->sc_type) {
   3320 	case WM_T_82573:
   3321 	case WM_T_82574:
   3322 	case WM_T_82583:
   3323 		if (error == 0)
   3324 			wm_put_hw_semaphore_82573(sc);
   3325 		break;
   3326 	default:
   3327 		break;
   3328 	}
   3329 
   3330 	if (phy_reset != 0)
   3331 		wm_get_cfg_done(sc);
   3332 
   3333 	/* reload EEPROM */
   3334 	switch (sc->sc_type) {
   3335 	case WM_T_82542_2_0:
   3336 	case WM_T_82542_2_1:
   3337 	case WM_T_82543:
   3338 	case WM_T_82544:
   3339 		delay(10);
   3340 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3341 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3342 		CSR_WRITE_FLUSH(sc);
   3343 		delay(2000);
   3344 		break;
   3345 	case WM_T_82540:
   3346 	case WM_T_82545:
   3347 	case WM_T_82545_3:
   3348 	case WM_T_82546:
   3349 	case WM_T_82546_3:
   3350 		delay(5*1000);
   3351 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3352 		break;
   3353 	case WM_T_82541:
   3354 	case WM_T_82541_2:
   3355 	case WM_T_82547:
   3356 	case WM_T_82547_2:
   3357 		delay(20000);
   3358 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3359 		break;
   3360 	case WM_T_82571:
   3361 	case WM_T_82572:
   3362 	case WM_T_82573:
   3363 	case WM_T_82574:
   3364 	case WM_T_82583:
   3365 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3366 			delay(10);
   3367 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3368 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3369 			CSR_WRITE_FLUSH(sc);
   3370 		}
   3371 		/* check EECD_EE_AUTORD */
   3372 		wm_get_auto_rd_done(sc);
   3373 		/*
   3374 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3375 		 * is set.
   3376 		 */
   3377 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3378 		    || (sc->sc_type == WM_T_82583))
   3379 			delay(25*1000);
   3380 		break;
   3381 	case WM_T_82575:
   3382 	case WM_T_82576:
   3383 	case WM_T_82580:
   3384 	case WM_T_82580ER:
   3385 	case WM_T_I350:
   3386 	case WM_T_I354:
   3387 	case WM_T_I210:
   3388 	case WM_T_I211:
   3389 	case WM_T_80003:
   3390 		/* check EECD_EE_AUTORD */
   3391 		wm_get_auto_rd_done(sc);
   3392 		break;
   3393 	case WM_T_ICH8:
   3394 	case WM_T_ICH9:
   3395 	case WM_T_ICH10:
   3396 	case WM_T_PCH:
   3397 	case WM_T_PCH2:
   3398 	case WM_T_PCH_LPT:
   3399 		break;
   3400 	default:
   3401 		panic("%s: unknown type\n", __func__);
   3402 	}
   3403 
   3404 	/* Check whether EEPROM is present or not */
   3405 	switch (sc->sc_type) {
   3406 	case WM_T_82575:
   3407 	case WM_T_82576:
   3408 #if 0 /* XXX */
   3409 	case WM_T_82580:
   3410 	case WM_T_82580ER:
   3411 #endif
   3412 	case WM_T_I350:
   3413 	case WM_T_I354:
   3414 	case WM_T_ICH8:
   3415 	case WM_T_ICH9:
   3416 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3417 			/* Not found */
   3418 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3419 			if ((sc->sc_type == WM_T_82575)
   3420 			    || (sc->sc_type == WM_T_82576)
   3421 			    || (sc->sc_type == WM_T_82580)
   3422 			    || (sc->sc_type == WM_T_82580ER)
   3423 			    || (sc->sc_type == WM_T_I350)
   3424 			    || (sc->sc_type == WM_T_I354))
   3425 				wm_reset_init_script_82575(sc);
   3426 		}
   3427 		break;
   3428 	default:
   3429 		break;
   3430 	}
   3431 
   3432 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   3433 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3434 		/* clear global device reset status bit */
   3435 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3436 	}
   3437 
   3438 	/* Clear any pending interrupt events. */
   3439 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3440 	reg = CSR_READ(sc, WMREG_ICR);
   3441 
   3442 	/* reload sc_ctrl */
   3443 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3444 
   3445 	if (sc->sc_type == WM_T_I350)
   3446 		wm_set_eee_i350(sc);
   3447 
   3448 	/* dummy read from WUC */
   3449 	if (sc->sc_type == WM_T_PCH)
   3450 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   3451 	/*
   3452 	 * For PCH, this write will make sure that any noise will be detected
   3453 	 * as a CRC error and be dropped rather than show up as a bad packet
   3454 	 * to the DMA engine
   3455 	 */
   3456 	if (sc->sc_type == WM_T_PCH)
   3457 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   3458 
   3459 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3460 		CSR_WRITE(sc, WMREG_WUC, 0);
   3461 
   3462 	/* XXX need special handling for 82580 */
   3463 }
   3464 
   3465 /*
   3466  * wm_add_rxbuf:
   3467  *
   3468  *	Add a receive buffer to the indiciated descriptor.
   3469  */
   3470 static int
   3471 wm_add_rxbuf(struct wm_softc *sc, int idx)
   3472 {
   3473 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   3474 	struct mbuf *m;
   3475 	int error;
   3476 
   3477 	KASSERT(WM_LOCKED(sc));
   3478 
   3479 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3480 	if (m == NULL)
   3481 		return ENOBUFS;
   3482 
   3483 	MCLGET(m, M_DONTWAIT);
   3484 	if ((m->m_flags & M_EXT) == 0) {
   3485 		m_freem(m);
   3486 		return ENOBUFS;
   3487 	}
   3488 
   3489 	if (rxs->rxs_mbuf != NULL)
   3490 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3491 
   3492 	rxs->rxs_mbuf = m;
   3493 
   3494 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3495 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3496 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3497 	if (error) {
   3498 		/* XXX XXX XXX */
   3499 		aprint_error_dev(sc->sc_dev,
   3500 		    "unable to load rx DMA map %d, error = %d\n",
   3501 		    idx, error);
   3502 		panic("wm_add_rxbuf");
   3503 	}
   3504 
   3505 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3506 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3507 
   3508 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3509 		if ((sc->sc_rctl & RCTL_EN) != 0)
   3510 			WM_INIT_RXDESC(sc, idx);
   3511 	} else
   3512 		WM_INIT_RXDESC(sc, idx);
   3513 
   3514 	return 0;
   3515 }
   3516 
   3517 /*
   3518  * wm_rxdrain:
   3519  *
   3520  *	Drain the receive queue.
   3521  */
   3522 static void
   3523 wm_rxdrain(struct wm_softc *sc)
   3524 {
   3525 	struct wm_rxsoft *rxs;
   3526 	int i;
   3527 
   3528 	KASSERT(WM_LOCKED(sc));
   3529 
   3530 	for (i = 0; i < WM_NRXDESC; i++) {
   3531 		rxs = &sc->sc_rxsoft[i];
   3532 		if (rxs->rxs_mbuf != NULL) {
   3533 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3534 			m_freem(rxs->rxs_mbuf);
   3535 			rxs->rxs_mbuf = NULL;
   3536 		}
   3537 	}
   3538 }
   3539 
   3540 /*
   3541  * wm_init:		[ifnet interface function]
   3542  *
   3543  *	Initialize the interface.
   3544  */
   3545 static int
   3546 wm_init(struct ifnet *ifp)
   3547 {
   3548 	struct wm_softc *sc = ifp->if_softc;
   3549 	int ret;
   3550 
   3551 	WM_LOCK(sc);
   3552 	ret = wm_init_locked(ifp);
   3553 	WM_UNLOCK(sc);
   3554 
   3555 	return ret;
   3556 }
   3557 
   3558 static int
   3559 wm_init_locked(struct ifnet *ifp)
   3560 {
   3561 	struct wm_softc *sc = ifp->if_softc;
   3562 	struct wm_rxsoft *rxs;
   3563 	int i, j, trynum, error = 0;
   3564 	uint32_t reg;
   3565 
   3566 	KASSERT(WM_LOCKED(sc));
   3567 	/*
   3568 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   3569 	 * There is a small but measurable benefit to avoiding the adjusment
   3570 	 * of the descriptor so that the headers are aligned, for normal mtu,
   3571 	 * on such platforms.  One possibility is that the DMA itself is
   3572 	 * slightly more efficient if the front of the entire packet (instead
   3573 	 * of the front of the headers) is aligned.
   3574 	 *
   3575 	 * Note we must always set align_tweak to 0 if we are using
   3576 	 * jumbo frames.
   3577 	 */
   3578 #ifdef __NO_STRICT_ALIGNMENT
   3579 	sc->sc_align_tweak = 0;
   3580 #else
   3581 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   3582 		sc->sc_align_tweak = 0;
   3583 	else
   3584 		sc->sc_align_tweak = 2;
   3585 #endif /* __NO_STRICT_ALIGNMENT */
   3586 
   3587 	/* Cancel any pending I/O. */
   3588 	wm_stop_locked(ifp, 0);
   3589 
   3590 	/* update statistics before reset */
   3591 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3592 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   3593 
   3594 	/* Reset the chip to a known state. */
   3595 	wm_reset(sc);
   3596 
   3597 	switch (sc->sc_type) {
   3598 	case WM_T_82571:
   3599 	case WM_T_82572:
   3600 	case WM_T_82573:
   3601 	case WM_T_82574:
   3602 	case WM_T_82583:
   3603 	case WM_T_80003:
   3604 	case WM_T_ICH8:
   3605 	case WM_T_ICH9:
   3606 	case WM_T_ICH10:
   3607 	case WM_T_PCH:
   3608 	case WM_T_PCH2:
   3609 	case WM_T_PCH_LPT:
   3610 		if (wm_check_mng_mode(sc) != 0)
   3611 			wm_get_hw_control(sc);
   3612 		break;
   3613 	default:
   3614 		break;
   3615 	}
   3616 
   3617 	/* Reset the PHY. */
   3618 	if (sc->sc_flags & WM_F_HAS_MII)
   3619 		wm_gmii_reset(sc);
   3620 
   3621 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3622 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
   3623 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   3624 	    || (sc->sc_type == WM_T_PCH_LPT))
   3625 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
   3626 
   3627 	/* Initialize the transmit descriptor ring. */
   3628 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
   3629 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
   3630 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   3631 	sc->sc_txfree = WM_NTXDESC(sc);
   3632 	sc->sc_txnext = 0;
   3633 
   3634 	if (sc->sc_type < WM_T_82543) {
   3635 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
   3636 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
   3637 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
   3638 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   3639 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   3640 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   3641 	} else {
   3642 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
   3643 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
   3644 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
   3645 		CSR_WRITE(sc, WMREG_TDH, 0);
   3646 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
   3647 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
   3648 
   3649 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3650 			/*
   3651 			 * Don't write TDT before TCTL.EN is set.
   3652 			 * See the document.
   3653 			 */
   3654 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
   3655 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   3656 			    | TXDCTL_WTHRESH(0));
   3657 		else {
   3658 			CSR_WRITE(sc, WMREG_TDT, 0);
   3659 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   3660 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   3661 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   3662 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   3663 		}
   3664 	}
   3665 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   3666 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   3667 
   3668 	/* Initialize the transmit job descriptors. */
   3669 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
   3670 		sc->sc_txsoft[i].txs_mbuf = NULL;
   3671 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
   3672 	sc->sc_txsnext = 0;
   3673 	sc->sc_txsdirty = 0;
   3674 
   3675 	/*
   3676 	 * Initialize the receive descriptor and receive job
   3677 	 * descriptor rings.
   3678 	 */
   3679 	if (sc->sc_type < WM_T_82543) {
   3680 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
   3681 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
   3682 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   3683 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   3684 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   3685 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   3686 
   3687 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   3688 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   3689 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   3690 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   3691 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   3692 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   3693 	} else {
   3694 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
   3695 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
   3696 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   3697 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3698 			CSR_WRITE(sc, WMREG_EITR(0), 450);
   3699 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   3700 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   3701 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
   3702 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   3703 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
   3704 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   3705 			    | RXDCTL_WTHRESH(1));
   3706 		} else {
   3707 			CSR_WRITE(sc, WMREG_RDH, 0);
   3708 			CSR_WRITE(sc, WMREG_RDT, 0);
   3709 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
   3710 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
   3711 		}
   3712 	}
   3713 	for (i = 0; i < WM_NRXDESC; i++) {
   3714 		rxs = &sc->sc_rxsoft[i];
   3715 		if (rxs->rxs_mbuf == NULL) {
   3716 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   3717 				log(LOG_ERR, "%s: unable to allocate or map "
   3718 				    "rx buffer %d, error = %d\n",
   3719 				    device_xname(sc->sc_dev), i, error);
   3720 				/*
   3721 				 * XXX Should attempt to run with fewer receive
   3722 				 * XXX buffers instead of just failing.
   3723 				 */
   3724 				wm_rxdrain(sc);
   3725 				goto out;
   3726 			}
   3727 		} else {
   3728 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3729 				WM_INIT_RXDESC(sc, i);
   3730 			/*
   3731 			 * For 82575 and newer device, the RX descriptors
   3732 			 * must be initialized after the setting of RCTL.EN in
   3733 			 * wm_set_filter()
   3734 			 */
   3735 		}
   3736 	}
   3737 	sc->sc_rxptr = 0;
   3738 	sc->sc_rxdiscard = 0;
   3739 	WM_RXCHAIN_RESET(sc);
   3740 
   3741 	/*
   3742 	 * Clear out the VLAN table -- we don't use it (yet).
   3743 	 */
   3744 	CSR_WRITE(sc, WMREG_VET, 0);
   3745 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3746 		trynum = 10; /* Due to hw errata */
   3747 	else
   3748 		trynum = 1;
   3749 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   3750 		for (j = 0; j < trynum; j++)
   3751 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   3752 
   3753 	/*
   3754 	 * Set up flow-control parameters.
   3755 	 *
   3756 	 * XXX Values could probably stand some tuning.
   3757 	 */
   3758 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   3759 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   3760 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   3761 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   3762 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   3763 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   3764 	}
   3765 
   3766 	sc->sc_fcrtl = FCRTL_DFLT;
   3767 	if (sc->sc_type < WM_T_82543) {
   3768 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   3769 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   3770 	} else {
   3771 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   3772 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   3773 	}
   3774 
   3775 	if (sc->sc_type == WM_T_80003)
   3776 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   3777 	else
   3778 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   3779 
   3780 	/* Writes the control register. */
   3781 	wm_set_vlan(sc);
   3782 
   3783 	if (sc->sc_flags & WM_F_HAS_MII) {
   3784 		int val;
   3785 
   3786 		switch (sc->sc_type) {
   3787 		case WM_T_80003:
   3788 		case WM_T_ICH8:
   3789 		case WM_T_ICH9:
   3790 		case WM_T_ICH10:
   3791 		case WM_T_PCH:
   3792 		case WM_T_PCH2:
   3793 		case WM_T_PCH_LPT:
   3794 			/*
   3795 			 * Set the mac to wait the maximum time between each
   3796 			 * iteration and increase the max iterations when
   3797 			 * polling the phy; this fixes erroneous timeouts at
   3798 			 * 10Mbps.
   3799 			 */
   3800 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   3801 			    0xFFFF);
   3802 			val = wm_kmrn_readreg(sc,
   3803 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   3804 			val |= 0x3F;
   3805 			wm_kmrn_writereg(sc,
   3806 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   3807 			break;
   3808 		default:
   3809 			break;
   3810 		}
   3811 
   3812 		if (sc->sc_type == WM_T_80003) {
   3813 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   3814 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   3815 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   3816 
   3817 			/* Bypass RX and TX FIFO's */
   3818 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   3819 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   3820 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   3821 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   3822 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   3823 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   3824 		}
   3825 	}
   3826 #if 0
   3827 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   3828 #endif
   3829 
   3830 	/* Set up checksum offload parameters. */
   3831 	reg = CSR_READ(sc, WMREG_RXCSUM);
   3832 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   3833 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   3834 		reg |= RXCSUM_IPOFL;
   3835 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   3836 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   3837 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   3838 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   3839 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   3840 
   3841 	/* Reset TBI's RXCFG count */
   3842 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
   3843 
   3844 	/* Set up the interrupt registers. */
   3845 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3846 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   3847 	    ICR_RXO | ICR_RXT0;
   3848 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
   3849 		sc->sc_icr |= ICR_RXCFG;
   3850 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   3851 
   3852 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3853 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3854 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   3855 		reg = CSR_READ(sc, WMREG_KABGTXD);
   3856 		reg |= KABGTXD_BGSQLBIAS;
   3857 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   3858 	}
   3859 
   3860 	/* Set up the inter-packet gap. */
   3861 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   3862 
   3863 	if (sc->sc_type >= WM_T_82543) {
   3864 		/*
   3865 		 * Set up the interrupt throttling register (units of 256ns)
   3866 		 * Note that a footnote in Intel's documentation says this
   3867 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   3868 		 * or 10Mbit mode.  Empirically, it appears to be the case
   3869 		 * that that is also true for the 1024ns units of the other
   3870 		 * interrupt-related timer registers -- so, really, we ought
   3871 		 * to divide this value by 4 when the link speed is low.
   3872 		 *
   3873 		 * XXX implement this division at link speed change!
   3874 		 */
   3875 
   3876 		/*
   3877 		 * For N interrupts/sec, set this value to:
   3878 		 * 1000000000 / (N * 256).  Note that we set the
   3879 		 * absolute and packet timer values to this value
   3880 		 * divided by 4 to get "simple timer" behavior.
   3881 		 */
   3882 
   3883 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   3884 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   3885 	}
   3886 
   3887 	/* Set the VLAN ethernetype. */
   3888 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   3889 
   3890 	/*
   3891 	 * Set up the transmit control register; we start out with
   3892 	 * a collision distance suitable for FDX, but update it whe
   3893 	 * we resolve the media type.
   3894 	 */
   3895 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   3896 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   3897 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3898 	if (sc->sc_type >= WM_T_82571)
   3899 		sc->sc_tctl |= TCTL_MULR;
   3900 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3901 
   3902 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3903 		/* Write TDT after TCTL.EN is set. See the document. */
   3904 		CSR_WRITE(sc, WMREG_TDT, 0);
   3905 	}
   3906 
   3907 	if (sc->sc_type == WM_T_80003) {
   3908 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   3909 		reg &= ~TCTL_EXT_GCEX_MASK;
   3910 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   3911 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   3912 	}
   3913 
   3914 	/* Set the media. */
   3915 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   3916 		goto out;
   3917 
   3918 	/* Configure for OS presence */
   3919 	wm_init_manageability(sc);
   3920 
   3921 	/*
   3922 	 * Set up the receive control register; we actually program
   3923 	 * the register when we set the receive filter.  Use multicast
   3924 	 * address offset type 0.
   3925 	 *
   3926 	 * Only the i82544 has the ability to strip the incoming
   3927 	 * CRC, so we don't enable that feature.
   3928 	 */
   3929 	sc->sc_mchash_type = 0;
   3930 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   3931 	    | RCTL_MO(sc->sc_mchash_type);
   3932 
   3933 	/*
   3934 	 * The I350 has a bug where it always strips the CRC whether
   3935 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3936 	 */
   3937 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3938 	    || (sc->sc_type == WM_T_I210))
   3939 		sc->sc_rctl |= RCTL_SECRC;
   3940 
   3941 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   3942 	    && (ifp->if_mtu > ETHERMTU)) {
   3943 		sc->sc_rctl |= RCTL_LPE;
   3944 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3945 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   3946 	}
   3947 
   3948 	if (MCLBYTES == 2048) {
   3949 		sc->sc_rctl |= RCTL_2k;
   3950 	} else {
   3951 		if (sc->sc_type >= WM_T_82543) {
   3952 			switch (MCLBYTES) {
   3953 			case 4096:
   3954 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   3955 				break;
   3956 			case 8192:
   3957 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   3958 				break;
   3959 			case 16384:
   3960 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   3961 				break;
   3962 			default:
   3963 				panic("wm_init: MCLBYTES %d unsupported",
   3964 				    MCLBYTES);
   3965 				break;
   3966 			}
   3967 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   3968 	}
   3969 
   3970 	/* Set the receive filter. */
   3971 	wm_set_filter(sc);
   3972 
   3973 	/* Enable ECC */
   3974 	switch (sc->sc_type) {
   3975 	case WM_T_82571:
   3976 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   3977 		reg |= PBA_ECC_CORR_EN;
   3978 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   3979 		break;
   3980 	case WM_T_PCH_LPT:
   3981 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   3982 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   3983 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   3984 
   3985 		reg = CSR_READ(sc, WMREG_CTRL);
   3986 		reg |= CTRL_MEHE;
   3987 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3988 		break;
   3989 	default:
   3990 		break;
   3991 	}
   3992 
   3993 	/* On 575 and later set RDT only if RX enabled */
   3994 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3995 		for (i = 0; i < WM_NRXDESC; i++)
   3996 			WM_INIT_RXDESC(sc, i);
   3997 
   3998 	sc->sc_stopping = false;
   3999 
   4000 	/* Start the one second link check clock. */
   4001 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4002 
   4003 	/* ...all done! */
   4004 	ifp->if_flags |= IFF_RUNNING;
   4005 	ifp->if_flags &= ~IFF_OACTIVE;
   4006 
   4007  out:
   4008 	sc->sc_if_flags = ifp->if_flags;
   4009 	if (error)
   4010 		log(LOG_ERR, "%s: interface not running\n",
   4011 		    device_xname(sc->sc_dev));
   4012 	return error;
   4013 }
   4014 
   4015 /*
   4016  * wm_stop:		[ifnet interface function]
   4017  *
   4018  *	Stop transmission on the interface.
   4019  */
   4020 static void
   4021 wm_stop(struct ifnet *ifp, int disable)
   4022 {
   4023 	struct wm_softc *sc = ifp->if_softc;
   4024 
   4025 	WM_LOCK(sc);
   4026 	wm_stop_locked(ifp, disable);
   4027 	WM_UNLOCK(sc);
   4028 }
   4029 
   4030 static void
   4031 wm_stop_locked(struct ifnet *ifp, int disable)
   4032 {
   4033 	struct wm_softc *sc = ifp->if_softc;
   4034 	struct wm_txsoft *txs;
   4035 	int i;
   4036 
   4037 	KASSERT(WM_LOCKED(sc));
   4038 
   4039 	sc->sc_stopping = true;
   4040 
   4041 	/* Stop the one second clock. */
   4042 	callout_stop(&sc->sc_tick_ch);
   4043 
   4044 	/* Stop the 82547 Tx FIFO stall check timer. */
   4045 	if (sc->sc_type == WM_T_82547)
   4046 		callout_stop(&sc->sc_txfifo_ch);
   4047 
   4048 	if (sc->sc_flags & WM_F_HAS_MII) {
   4049 		/* Down the MII. */
   4050 		mii_down(&sc->sc_mii);
   4051 	} else {
   4052 #if 0
   4053 		/* Should we clear PHY's status properly? */
   4054 		wm_reset(sc);
   4055 #endif
   4056 	}
   4057 
   4058 	/* Stop the transmit and receive processes. */
   4059 	CSR_WRITE(sc, WMREG_TCTL, 0);
   4060 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4061 	sc->sc_rctl &= ~RCTL_EN;
   4062 
   4063 	/*
   4064 	 * Clear the interrupt mask to ensure the device cannot assert its
   4065 	 * interrupt line.
   4066 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
   4067 	 * any currently pending or shared interrupt.
   4068 	 */
   4069 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4070 	sc->sc_icr = 0;
   4071 
   4072 	/* Release any queued transmit buffers. */
   4073 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   4074 		txs = &sc->sc_txsoft[i];
   4075 		if (txs->txs_mbuf != NULL) {
   4076 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   4077 			m_freem(txs->txs_mbuf);
   4078 			txs->txs_mbuf = NULL;
   4079 		}
   4080 	}
   4081 
   4082 	/* Mark the interface as down and cancel the watchdog timer. */
   4083 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   4084 	ifp->if_timer = 0;
   4085 
   4086 	if (disable)
   4087 		wm_rxdrain(sc);
   4088 
   4089 #if 0 /* notyet */
   4090 	if (sc->sc_type >= WM_T_82544)
   4091 		CSR_WRITE(sc, WMREG_WUC, 0);
   4092 #endif
   4093 }
   4094 
   4095 /*
   4096  * wm_tx_offload:
   4097  *
   4098  *	Set up TCP/IP checksumming parameters for the
   4099  *	specified packet.
   4100  */
   4101 static int
   4102 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   4103     uint8_t *fieldsp)
   4104 {
   4105 	struct mbuf *m0 = txs->txs_mbuf;
   4106 	struct livengood_tcpip_ctxdesc *t;
   4107 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   4108 	uint32_t ipcse;
   4109 	struct ether_header *eh;
   4110 	int offset, iphl;
   4111 	uint8_t fields;
   4112 
   4113 	/*
   4114 	 * XXX It would be nice if the mbuf pkthdr had offset
   4115 	 * fields for the protocol headers.
   4116 	 */
   4117 
   4118 	eh = mtod(m0, struct ether_header *);
   4119 	switch (htons(eh->ether_type)) {
   4120 	case ETHERTYPE_IP:
   4121 	case ETHERTYPE_IPV6:
   4122 		offset = ETHER_HDR_LEN;
   4123 		break;
   4124 
   4125 	case ETHERTYPE_VLAN:
   4126 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   4127 		break;
   4128 
   4129 	default:
   4130 		/*
   4131 		 * Don't support this protocol or encapsulation.
   4132 		 */
   4133 		*fieldsp = 0;
   4134 		*cmdp = 0;
   4135 		return 0;
   4136 	}
   4137 
   4138 	if ((m0->m_pkthdr.csum_flags &
   4139 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   4140 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   4141 	} else {
   4142 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   4143 	}
   4144 	ipcse = offset + iphl - 1;
   4145 
   4146 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   4147 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   4148 	seg = 0;
   4149 	fields = 0;
   4150 
   4151 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   4152 		int hlen = offset + iphl;
   4153 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   4154 
   4155 		if (__predict_false(m0->m_len <
   4156 				    (hlen + sizeof(struct tcphdr)))) {
   4157 			/*
   4158 			 * TCP/IP headers are not in the first mbuf; we need
   4159 			 * to do this the slow and painful way.  Let's just
   4160 			 * hope this doesn't happen very often.
   4161 			 */
   4162 			struct tcphdr th;
   4163 
   4164 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   4165 
   4166 			m_copydata(m0, hlen, sizeof(th), &th);
   4167 			if (v4) {
   4168 				struct ip ip;
   4169 
   4170 				m_copydata(m0, offset, sizeof(ip), &ip);
   4171 				ip.ip_len = 0;
   4172 				m_copyback(m0,
   4173 				    offset + offsetof(struct ip, ip_len),
   4174 				    sizeof(ip.ip_len), &ip.ip_len);
   4175 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   4176 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   4177 			} else {
   4178 				struct ip6_hdr ip6;
   4179 
   4180 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   4181 				ip6.ip6_plen = 0;
   4182 				m_copyback(m0,
   4183 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   4184 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   4185 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   4186 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   4187 			}
   4188 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   4189 			    sizeof(th.th_sum), &th.th_sum);
   4190 
   4191 			hlen += th.th_off << 2;
   4192 		} else {
   4193 			/*
   4194 			 * TCP/IP headers are in the first mbuf; we can do
   4195 			 * this the easy way.
   4196 			 */
   4197 			struct tcphdr *th;
   4198 
   4199 			if (v4) {
   4200 				struct ip *ip =
   4201 				    (void *)(mtod(m0, char *) + offset);
   4202 				th = (void *)(mtod(m0, char *) + hlen);
   4203 
   4204 				ip->ip_len = 0;
   4205 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   4206 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   4207 			} else {
   4208 				struct ip6_hdr *ip6 =
   4209 				    (void *)(mtod(m0, char *) + offset);
   4210 				th = (void *)(mtod(m0, char *) + hlen);
   4211 
   4212 				ip6->ip6_plen = 0;
   4213 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   4214 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   4215 			}
   4216 			hlen += th->th_off << 2;
   4217 		}
   4218 
   4219 		if (v4) {
   4220 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   4221 			cmdlen |= WTX_TCPIP_CMD_IP;
   4222 		} else {
   4223 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   4224 			ipcse = 0;
   4225 		}
   4226 		cmd |= WTX_TCPIP_CMD_TSE;
   4227 		cmdlen |= WTX_TCPIP_CMD_TSE |
   4228 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   4229 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   4230 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   4231 	}
   4232 
   4233 	/*
   4234 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   4235 	 * offload feature, if we load the context descriptor, we
   4236 	 * MUST provide valid values for IPCSS and TUCSS fields.
   4237 	 */
   4238 
   4239 	ipcs = WTX_TCPIP_IPCSS(offset) |
   4240 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   4241 	    WTX_TCPIP_IPCSE(ipcse);
   4242 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   4243 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   4244 		fields |= WTX_IXSM;
   4245 	}
   4246 
   4247 	offset += iphl;
   4248 
   4249 	if (m0->m_pkthdr.csum_flags &
   4250 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   4251 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   4252 		fields |= WTX_TXSM;
   4253 		tucs = WTX_TCPIP_TUCSS(offset) |
   4254 		    WTX_TCPIP_TUCSO(offset +
   4255 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   4256 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4257 	} else if ((m0->m_pkthdr.csum_flags &
   4258 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   4259 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   4260 		fields |= WTX_TXSM;
   4261 		tucs = WTX_TCPIP_TUCSS(offset) |
   4262 		    WTX_TCPIP_TUCSO(offset +
   4263 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   4264 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4265 	} else {
   4266 		/* Just initialize it to a valid TCP context. */
   4267 		tucs = WTX_TCPIP_TUCSS(offset) |
   4268 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   4269 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4270 	}
   4271 
   4272 	/* Fill in the context descriptor. */
   4273 	t = (struct livengood_tcpip_ctxdesc *)
   4274 	    &sc->sc_txdescs[sc->sc_txnext];
   4275 	t->tcpip_ipcs = htole32(ipcs);
   4276 	t->tcpip_tucs = htole32(tucs);
   4277 	t->tcpip_cmdlen = htole32(cmdlen);
   4278 	t->tcpip_seg = htole32(seg);
   4279 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   4280 
   4281 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   4282 	txs->txs_ndesc++;
   4283 
   4284 	*cmdp = cmd;
   4285 	*fieldsp = fields;
   4286 
   4287 	return 0;
   4288 }
   4289 
   4290 static void
   4291 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   4292 {
   4293 	struct mbuf *m;
   4294 	int i;
   4295 
   4296 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   4297 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   4298 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   4299 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   4300 		    m->m_data, m->m_len, m->m_flags);
   4301 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   4302 	    i, i == 1 ? "" : "s");
   4303 }
   4304 
   4305 /*
   4306  * wm_82547_txfifo_stall:
   4307  *
   4308  *	Callout used to wait for the 82547 Tx FIFO to drain,
   4309  *	reset the FIFO pointers, and restart packet transmission.
   4310  */
   4311 static void
   4312 wm_82547_txfifo_stall(void *arg)
   4313 {
   4314 	struct wm_softc *sc = arg;
   4315 #ifndef WM_MPSAFE
   4316 	int s;
   4317 
   4318 	s = splnet();
   4319 #endif
   4320 	WM_LOCK(sc);
   4321 
   4322 	if (sc->sc_stopping)
   4323 		goto out;
   4324 
   4325 	if (sc->sc_txfifo_stall) {
   4326 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
   4327 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   4328 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   4329 			/*
   4330 			 * Packets have drained.  Stop transmitter, reset
   4331 			 * FIFO pointers, restart transmitter, and kick
   4332 			 * the packet queue.
   4333 			 */
   4334 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   4335 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   4336 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
   4337 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
   4338 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
   4339 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
   4340 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   4341 			CSR_WRITE_FLUSH(sc);
   4342 
   4343 			sc->sc_txfifo_head = 0;
   4344 			sc->sc_txfifo_stall = 0;
   4345 			wm_start_locked(&sc->sc_ethercom.ec_if);
   4346 		} else {
   4347 			/*
   4348 			 * Still waiting for packets to drain; try again in
   4349 			 * another tick.
   4350 			 */
   4351 			callout_schedule(&sc->sc_txfifo_ch, 1);
   4352 		}
   4353 	}
   4354 
   4355 out:
   4356 	WM_UNLOCK(sc);
   4357 #ifndef WM_MPSAFE
   4358 	splx(s);
   4359 #endif
   4360 }
   4361 
   4362 /*
   4363  * wm_82547_txfifo_bugchk:
   4364  *
   4365  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   4366  *	prevent enqueueing a packet that would wrap around the end
   4367  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   4368  *
   4369  *	We do this by checking the amount of space before the end
   4370  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   4371  *	the Tx FIFO, wait for all remaining packets to drain, reset
   4372  *	the internal FIFO pointers to the beginning, and restart
   4373  *	transmission on the interface.
   4374  */
   4375 #define	WM_FIFO_HDR		0x10
   4376 #define	WM_82547_PAD_LEN	0x3e0
   4377 static int
   4378 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   4379 {
   4380 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
   4381 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   4382 
   4383 	/* Just return if already stalled. */
   4384 	if (sc->sc_txfifo_stall)
   4385 		return 1;
   4386 
   4387 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   4388 		/* Stall only occurs in half-duplex mode. */
   4389 		goto send_packet;
   4390 	}
   4391 
   4392 	if (len >= WM_82547_PAD_LEN + space) {
   4393 		sc->sc_txfifo_stall = 1;
   4394 		callout_schedule(&sc->sc_txfifo_ch, 1);
   4395 		return 1;
   4396 	}
   4397 
   4398  send_packet:
   4399 	sc->sc_txfifo_head += len;
   4400 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
   4401 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
   4402 
   4403 	return 0;
   4404 }
   4405 
   4406 /*
   4407  * wm_start:		[ifnet interface function]
   4408  *
   4409  *	Start packet transmission on the interface.
   4410  */
   4411 static void
   4412 wm_start(struct ifnet *ifp)
   4413 {
   4414 	struct wm_softc *sc = ifp->if_softc;
   4415 
   4416 	WM_LOCK(sc);
   4417 	if (!sc->sc_stopping)
   4418 		wm_start_locked(ifp);
   4419 	WM_UNLOCK(sc);
   4420 }
   4421 
   4422 static void
   4423 wm_start_locked(struct ifnet *ifp)
   4424 {
   4425 	struct wm_softc *sc = ifp->if_softc;
   4426 	struct mbuf *m0;
   4427 	struct m_tag *mtag;
   4428 	struct wm_txsoft *txs;
   4429 	bus_dmamap_t dmamap;
   4430 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   4431 	bus_addr_t curaddr;
   4432 	bus_size_t seglen, curlen;
   4433 	uint32_t cksumcmd;
   4434 	uint8_t cksumfields;
   4435 
   4436 	KASSERT(WM_LOCKED(sc));
   4437 
   4438 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   4439 		return;
   4440 
   4441 	/* Remember the previous number of free descriptors. */
   4442 	ofree = sc->sc_txfree;
   4443 
   4444 	/*
   4445 	 * Loop through the send queue, setting up transmit descriptors
   4446 	 * until we drain the queue, or use up all available transmit
   4447 	 * descriptors.
   4448 	 */
   4449 	for (;;) {
   4450 		m0 = NULL;
   4451 
   4452 		/* Get a work queue entry. */
   4453 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   4454 			wm_txintr(sc);
   4455 			if (sc->sc_txsfree == 0) {
   4456 				DPRINTF(WM_DEBUG_TX,
   4457 				    ("%s: TX: no free job descriptors\n",
   4458 					device_xname(sc->sc_dev)));
   4459 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   4460 				break;
   4461 			}
   4462 		}
   4463 
   4464 		/* Grab a packet off the queue. */
   4465 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   4466 		if (m0 == NULL)
   4467 			break;
   4468 
   4469 		DPRINTF(WM_DEBUG_TX,
   4470 		    ("%s: TX: have packet to transmit: %p\n",
   4471 		    device_xname(sc->sc_dev), m0));
   4472 
   4473 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   4474 		dmamap = txs->txs_dmamap;
   4475 
   4476 		use_tso = (m0->m_pkthdr.csum_flags &
   4477 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   4478 
   4479 		/*
   4480 		 * So says the Linux driver:
   4481 		 * The controller does a simple calculation to make sure
   4482 		 * there is enough room in the FIFO before initiating the
   4483 		 * DMA for each buffer.  The calc is:
   4484 		 *	4 = ceil(buffer len / MSS)
   4485 		 * To make sure we don't overrun the FIFO, adjust the max
   4486 		 * buffer len if the MSS drops.
   4487 		 */
   4488 		dmamap->dm_maxsegsz =
   4489 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   4490 		    ? m0->m_pkthdr.segsz << 2
   4491 		    : WTX_MAX_LEN;
   4492 
   4493 		/*
   4494 		 * Load the DMA map.  If this fails, the packet either
   4495 		 * didn't fit in the allotted number of segments, or we
   4496 		 * were short on resources.  For the too-many-segments
   4497 		 * case, we simply report an error and drop the packet,
   4498 		 * since we can't sanely copy a jumbo packet to a single
   4499 		 * buffer.
   4500 		 */
   4501 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   4502 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   4503 		if (error) {
   4504 			if (error == EFBIG) {
   4505 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   4506 				log(LOG_ERR, "%s: Tx packet consumes too many "
   4507 				    "DMA segments, dropping...\n",
   4508 				    device_xname(sc->sc_dev));
   4509 				wm_dump_mbuf_chain(sc, m0);
   4510 				m_freem(m0);
   4511 				continue;
   4512 			}
   4513 			/*  Short on resources, just stop for now. */
   4514 			DPRINTF(WM_DEBUG_TX,
   4515 			    ("%s: TX: dmamap load failed: %d\n",
   4516 			    device_xname(sc->sc_dev), error));
   4517 			break;
   4518 		}
   4519 
   4520 		segs_needed = dmamap->dm_nsegs;
   4521 		if (use_tso) {
   4522 			/* For sentinel descriptor; see below. */
   4523 			segs_needed++;
   4524 		}
   4525 
   4526 		/*
   4527 		 * Ensure we have enough descriptors free to describe
   4528 		 * the packet.  Note, we always reserve one descriptor
   4529 		 * at the end of the ring due to the semantics of the
   4530 		 * TDT register, plus one more in the event we need
   4531 		 * to load offload context.
   4532 		 */
   4533 		if (segs_needed > sc->sc_txfree - 2) {
   4534 			/*
   4535 			 * Not enough free descriptors to transmit this
   4536 			 * packet.  We haven't committed anything yet,
   4537 			 * so just unload the DMA map, put the packet
   4538 			 * pack on the queue, and punt.  Notify the upper
   4539 			 * layer that there are no more slots left.
   4540 			 */
   4541 			DPRINTF(WM_DEBUG_TX,
   4542 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   4543 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   4544 			    segs_needed, sc->sc_txfree - 1));
   4545 			ifp->if_flags |= IFF_OACTIVE;
   4546 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   4547 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   4548 			break;
   4549 		}
   4550 
   4551 		/*
   4552 		 * Check for 82547 Tx FIFO bug.  We need to do this
   4553 		 * once we know we can transmit the packet, since we
   4554 		 * do some internal FIFO space accounting here.
   4555 		 */
   4556 		if (sc->sc_type == WM_T_82547 &&
   4557 		    wm_82547_txfifo_bugchk(sc, m0)) {
   4558 			DPRINTF(WM_DEBUG_TX,
   4559 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   4560 			    device_xname(sc->sc_dev)));
   4561 			ifp->if_flags |= IFF_OACTIVE;
   4562 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   4563 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   4564 			break;
   4565 		}
   4566 
   4567 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   4568 
   4569 		DPRINTF(WM_DEBUG_TX,
   4570 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   4571 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   4572 
   4573 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   4574 
   4575 		/*
   4576 		 * Store a pointer to the packet so that we can free it
   4577 		 * later.
   4578 		 *
   4579 		 * Initially, we consider the number of descriptors the
   4580 		 * packet uses the number of DMA segments.  This may be
   4581 		 * incremented by 1 if we do checksum offload (a descriptor
   4582 		 * is used to set the checksum context).
   4583 		 */
   4584 		txs->txs_mbuf = m0;
   4585 		txs->txs_firstdesc = sc->sc_txnext;
   4586 		txs->txs_ndesc = segs_needed;
   4587 
   4588 		/* Set up offload parameters for this packet. */
   4589 		if (m0->m_pkthdr.csum_flags &
   4590 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   4591 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   4592 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   4593 			if (wm_tx_offload(sc, txs, &cksumcmd,
   4594 					  &cksumfields) != 0) {
   4595 				/* Error message already displayed. */
   4596 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   4597 				continue;
   4598 			}
   4599 		} else {
   4600 			cksumcmd = 0;
   4601 			cksumfields = 0;
   4602 		}
   4603 
   4604 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   4605 
   4606 		/* Sync the DMA map. */
   4607 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   4608 		    BUS_DMASYNC_PREWRITE);
   4609 
   4610 		/* Initialize the transmit descriptor. */
   4611 		for (nexttx = sc->sc_txnext, seg = 0;
   4612 		     seg < dmamap->dm_nsegs; seg++) {
   4613 			for (seglen = dmamap->dm_segs[seg].ds_len,
   4614 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   4615 			     seglen != 0;
   4616 			     curaddr += curlen, seglen -= curlen,
   4617 			     nexttx = WM_NEXTTX(sc, nexttx)) {
   4618 				curlen = seglen;
   4619 
   4620 				/*
   4621 				 * So says the Linux driver:
   4622 				 * Work around for premature descriptor
   4623 				 * write-backs in TSO mode.  Append a
   4624 				 * 4-byte sentinel descriptor.
   4625 				 */
   4626 				if (use_tso &&
   4627 				    seg == dmamap->dm_nsegs - 1 &&
   4628 				    curlen > 8)
   4629 					curlen -= 4;
   4630 
   4631 				wm_set_dma_addr(
   4632 				    &sc->sc_txdescs[nexttx].wtx_addr,
   4633 				    curaddr);
   4634 				sc->sc_txdescs[nexttx].wtx_cmdlen =
   4635 				    htole32(cksumcmd | curlen);
   4636 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
   4637 				    0;
   4638 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
   4639 				    cksumfields;
   4640 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   4641 				lasttx = nexttx;
   4642 
   4643 				DPRINTF(WM_DEBUG_TX,
   4644 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   4645 				     "len %#04zx\n",
   4646 				    device_xname(sc->sc_dev), nexttx,
   4647 				    (uint64_t)curaddr, curlen));
   4648 			}
   4649 		}
   4650 
   4651 		KASSERT(lasttx != -1);
   4652 
   4653 		/*
   4654 		 * Set up the command byte on the last descriptor of
   4655 		 * the packet.  If we're in the interrupt delay window,
   4656 		 * delay the interrupt.
   4657 		 */
   4658 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   4659 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   4660 
   4661 		/*
   4662 		 * If VLANs are enabled and the packet has a VLAN tag, set
   4663 		 * up the descriptor to encapsulate the packet for us.
   4664 		 *
   4665 		 * This is only valid on the last descriptor of the packet.
   4666 		 */
   4667 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   4668 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   4669 			    htole32(WTX_CMD_VLE);
   4670 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
   4671 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   4672 		}
   4673 
   4674 		txs->txs_lastdesc = lasttx;
   4675 
   4676 		DPRINTF(WM_DEBUG_TX,
   4677 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   4678 		    device_xname(sc->sc_dev),
   4679 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   4680 
   4681 		/* Sync the descriptors we're using. */
   4682 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   4683 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   4684 
   4685 		/* Give the packet to the chip. */
   4686 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   4687 
   4688 		DPRINTF(WM_DEBUG_TX,
   4689 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   4690 
   4691 		DPRINTF(WM_DEBUG_TX,
   4692 		    ("%s: TX: finished transmitting packet, job %d\n",
   4693 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   4694 
   4695 		/* Advance the tx pointer. */
   4696 		sc->sc_txfree -= txs->txs_ndesc;
   4697 		sc->sc_txnext = nexttx;
   4698 
   4699 		sc->sc_txsfree--;
   4700 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   4701 
   4702 		/* Pass the packet to any BPF listeners. */
   4703 		bpf_mtap(ifp, m0);
   4704 	}
   4705 
   4706 	if (m0 != NULL) {
   4707 		ifp->if_flags |= IFF_OACTIVE;
   4708 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   4709 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   4710 		m_freem(m0);
   4711 	}
   4712 
   4713 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   4714 		/* No more slots; notify upper layer. */
   4715 		ifp->if_flags |= IFF_OACTIVE;
   4716 	}
   4717 
   4718 	if (sc->sc_txfree != ofree) {
   4719 		/* Set a watchdog timer in case the chip flakes out. */
   4720 		ifp->if_timer = 5;
   4721 	}
   4722 }
   4723 
   4724 /*
   4725  * wm_nq_tx_offload:
   4726  *
   4727  *	Set up TCP/IP checksumming parameters for the
   4728  *	specified packet, for NEWQUEUE devices
   4729  */
   4730 static int
   4731 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   4732     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   4733 {
   4734 	struct mbuf *m0 = txs->txs_mbuf;
   4735 	struct m_tag *mtag;
   4736 	uint32_t vl_len, mssidx, cmdc;
   4737 	struct ether_header *eh;
   4738 	int offset, iphl;
   4739 
   4740 	/*
   4741 	 * XXX It would be nice if the mbuf pkthdr had offset
   4742 	 * fields for the protocol headers.
   4743 	 */
   4744 	*cmdlenp = 0;
   4745 	*fieldsp = 0;
   4746 
   4747 	eh = mtod(m0, struct ether_header *);
   4748 	switch (htons(eh->ether_type)) {
   4749 	case ETHERTYPE_IP:
   4750 	case ETHERTYPE_IPV6:
   4751 		offset = ETHER_HDR_LEN;
   4752 		break;
   4753 
   4754 	case ETHERTYPE_VLAN:
   4755 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   4756 		break;
   4757 
   4758 	default:
   4759 		/* Don't support this protocol or encapsulation. */
   4760 		*do_csum = false;
   4761 		return 0;
   4762 	}
   4763 	*do_csum = true;
   4764 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   4765 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   4766 
   4767 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   4768 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   4769 
   4770 	if ((m0->m_pkthdr.csum_flags &
   4771 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   4772 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   4773 	} else {
   4774 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   4775 	}
   4776 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   4777 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   4778 
   4779 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   4780 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   4781 		     << NQTXC_VLLEN_VLAN_SHIFT);
   4782 		*cmdlenp |= NQTX_CMD_VLE;
   4783 	}
   4784 
   4785 	mssidx = 0;
   4786 
   4787 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   4788 		int hlen = offset + iphl;
   4789 		int tcp_hlen;
   4790 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   4791 
   4792 		if (__predict_false(m0->m_len <
   4793 				    (hlen + sizeof(struct tcphdr)))) {
   4794 			/*
   4795 			 * TCP/IP headers are not in the first mbuf; we need
   4796 			 * to do this the slow and painful way.  Let's just
   4797 			 * hope this doesn't happen very often.
   4798 			 */
   4799 			struct tcphdr th;
   4800 
   4801 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   4802 
   4803 			m_copydata(m0, hlen, sizeof(th), &th);
   4804 			if (v4) {
   4805 				struct ip ip;
   4806 
   4807 				m_copydata(m0, offset, sizeof(ip), &ip);
   4808 				ip.ip_len = 0;
   4809 				m_copyback(m0,
   4810 				    offset + offsetof(struct ip, ip_len),
   4811 				    sizeof(ip.ip_len), &ip.ip_len);
   4812 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   4813 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   4814 			} else {
   4815 				struct ip6_hdr ip6;
   4816 
   4817 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   4818 				ip6.ip6_plen = 0;
   4819 				m_copyback(m0,
   4820 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   4821 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   4822 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   4823 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   4824 			}
   4825 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   4826 			    sizeof(th.th_sum), &th.th_sum);
   4827 
   4828 			tcp_hlen = th.th_off << 2;
   4829 		} else {
   4830 			/*
   4831 			 * TCP/IP headers are in the first mbuf; we can do
   4832 			 * this the easy way.
   4833 			 */
   4834 			struct tcphdr *th;
   4835 
   4836 			if (v4) {
   4837 				struct ip *ip =
   4838 				    (void *)(mtod(m0, char *) + offset);
   4839 				th = (void *)(mtod(m0, char *) + hlen);
   4840 
   4841 				ip->ip_len = 0;
   4842 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   4843 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   4844 			} else {
   4845 				struct ip6_hdr *ip6 =
   4846 				    (void *)(mtod(m0, char *) + offset);
   4847 				th = (void *)(mtod(m0, char *) + hlen);
   4848 
   4849 				ip6->ip6_plen = 0;
   4850 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   4851 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   4852 			}
   4853 			tcp_hlen = th->th_off << 2;
   4854 		}
   4855 		hlen += tcp_hlen;
   4856 		*cmdlenp |= NQTX_CMD_TSE;
   4857 
   4858 		if (v4) {
   4859 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   4860 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   4861 		} else {
   4862 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   4863 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   4864 		}
   4865 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   4866 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   4867 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   4868 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   4869 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   4870 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   4871 	} else {
   4872 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   4873 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   4874 	}
   4875 
   4876 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   4877 		*fieldsp |= NQTXD_FIELDS_IXSM;
   4878 		cmdc |= NQTXC_CMD_IP4;
   4879 	}
   4880 
   4881 	if (m0->m_pkthdr.csum_flags &
   4882 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   4883 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   4884 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   4885 			cmdc |= NQTXC_CMD_TCP;
   4886 		} else {
   4887 			cmdc |= NQTXC_CMD_UDP;
   4888 		}
   4889 		cmdc |= NQTXC_CMD_IP4;
   4890 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   4891 	}
   4892 	if (m0->m_pkthdr.csum_flags &
   4893 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   4894 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   4895 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   4896 			cmdc |= NQTXC_CMD_TCP;
   4897 		} else {
   4898 			cmdc |= NQTXC_CMD_UDP;
   4899 		}
   4900 		cmdc |= NQTXC_CMD_IP6;
   4901 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   4902 	}
   4903 
   4904 	/* Fill in the context descriptor. */
   4905 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
   4906 	    htole32(vl_len);
   4907 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
   4908 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
   4909 	    htole32(cmdc);
   4910 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
   4911 	    htole32(mssidx);
   4912 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   4913 	DPRINTF(WM_DEBUG_TX,
   4914 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   4915 	    sc->sc_txnext, 0, vl_len));
   4916 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   4917 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   4918 	txs->txs_ndesc++;
   4919 	return 0;
   4920 }
   4921 
   4922 /*
   4923  * wm_nq_start:		[ifnet interface function]
   4924  *
   4925  *	Start packet transmission on the interface for NEWQUEUE devices
   4926  */
   4927 static void
   4928 wm_nq_start(struct ifnet *ifp)
   4929 {
   4930 	struct wm_softc *sc = ifp->if_softc;
   4931 
   4932 	WM_LOCK(sc);
   4933 	if (!sc->sc_stopping)
   4934 		wm_nq_start_locked(ifp);
   4935 	WM_UNLOCK(sc);
   4936 }
   4937 
   4938 static void
   4939 wm_nq_start_locked(struct ifnet *ifp)
   4940 {
   4941 	struct wm_softc *sc = ifp->if_softc;
   4942 	struct mbuf *m0;
   4943 	struct m_tag *mtag;
   4944 	struct wm_txsoft *txs;
   4945 	bus_dmamap_t dmamap;
   4946 	int error, nexttx, lasttx = -1, seg, segs_needed;
   4947 	bool do_csum, sent;
   4948 
   4949 	KASSERT(WM_LOCKED(sc));
   4950 
   4951 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   4952 		return;
   4953 
   4954 	sent = false;
   4955 
   4956 	/*
   4957 	 * Loop through the send queue, setting up transmit descriptors
   4958 	 * until we drain the queue, or use up all available transmit
   4959 	 * descriptors.
   4960 	 */
   4961 	for (;;) {
   4962 		m0 = NULL;
   4963 
   4964 		/* Get a work queue entry. */
   4965 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   4966 			wm_txintr(sc);
   4967 			if (sc->sc_txsfree == 0) {
   4968 				DPRINTF(WM_DEBUG_TX,
   4969 				    ("%s: TX: no free job descriptors\n",
   4970 					device_xname(sc->sc_dev)));
   4971 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   4972 				break;
   4973 			}
   4974 		}
   4975 
   4976 		/* Grab a packet off the queue. */
   4977 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   4978 		if (m0 == NULL)
   4979 			break;
   4980 
   4981 		DPRINTF(WM_DEBUG_TX,
   4982 		    ("%s: TX: have packet to transmit: %p\n",
   4983 		    device_xname(sc->sc_dev), m0));
   4984 
   4985 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   4986 		dmamap = txs->txs_dmamap;
   4987 
   4988 		/*
   4989 		 * Load the DMA map.  If this fails, the packet either
   4990 		 * didn't fit in the allotted number of segments, or we
   4991 		 * were short on resources.  For the too-many-segments
   4992 		 * case, we simply report an error and drop the packet,
   4993 		 * since we can't sanely copy a jumbo packet to a single
   4994 		 * buffer.
   4995 		 */
   4996 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   4997 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   4998 		if (error) {
   4999 			if (error == EFBIG) {
   5000 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5001 				log(LOG_ERR, "%s: Tx packet consumes too many "
   5002 				    "DMA segments, dropping...\n",
   5003 				    device_xname(sc->sc_dev));
   5004 				wm_dump_mbuf_chain(sc, m0);
   5005 				m_freem(m0);
   5006 				continue;
   5007 			}
   5008 			/* Short on resources, just stop for now. */
   5009 			DPRINTF(WM_DEBUG_TX,
   5010 			    ("%s: TX: dmamap load failed: %d\n",
   5011 			    device_xname(sc->sc_dev), error));
   5012 			break;
   5013 		}
   5014 
   5015 		segs_needed = dmamap->dm_nsegs;
   5016 
   5017 		/*
   5018 		 * Ensure we have enough descriptors free to describe
   5019 		 * the packet.  Note, we always reserve one descriptor
   5020 		 * at the end of the ring due to the semantics of the
   5021 		 * TDT register, plus one more in the event we need
   5022 		 * to load offload context.
   5023 		 */
   5024 		if (segs_needed > sc->sc_txfree - 2) {
   5025 			/*
   5026 			 * Not enough free descriptors to transmit this
   5027 			 * packet.  We haven't committed anything yet,
   5028 			 * so just unload the DMA map, put the packet
   5029 			 * pack on the queue, and punt.  Notify the upper
   5030 			 * layer that there are no more slots left.
   5031 			 */
   5032 			DPRINTF(WM_DEBUG_TX,
   5033 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   5034 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   5035 			    segs_needed, sc->sc_txfree - 1));
   5036 			ifp->if_flags |= IFF_OACTIVE;
   5037 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   5038 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   5039 			break;
   5040 		}
   5041 
   5042 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   5043 
   5044 		DPRINTF(WM_DEBUG_TX,
   5045 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   5046 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   5047 
   5048 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   5049 
   5050 		/*
   5051 		 * Store a pointer to the packet so that we can free it
   5052 		 * later.
   5053 		 *
   5054 		 * Initially, we consider the number of descriptors the
   5055 		 * packet uses the number of DMA segments.  This may be
   5056 		 * incremented by 1 if we do checksum offload (a descriptor
   5057 		 * is used to set the checksum context).
   5058 		 */
   5059 		txs->txs_mbuf = m0;
   5060 		txs->txs_firstdesc = sc->sc_txnext;
   5061 		txs->txs_ndesc = segs_needed;
   5062 
   5063 		/* Set up offload parameters for this packet. */
   5064 		uint32_t cmdlen, fields, dcmdlen;
   5065 		if (m0->m_pkthdr.csum_flags &
   5066 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   5067 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   5068 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   5069 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   5070 			    &do_csum) != 0) {
   5071 				/* Error message already displayed. */
   5072 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   5073 				continue;
   5074 			}
   5075 		} else {
   5076 			do_csum = false;
   5077 			cmdlen = 0;
   5078 			fields = 0;
   5079 		}
   5080 
   5081 		/* Sync the DMA map. */
   5082 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   5083 		    BUS_DMASYNC_PREWRITE);
   5084 
   5085 		/* Initialize the first transmit descriptor. */
   5086 		nexttx = sc->sc_txnext;
   5087 		if (!do_csum) {
   5088 			/* setup a legacy descriptor */
   5089 			wm_set_dma_addr(
   5090 			    &sc->sc_txdescs[nexttx].wtx_addr,
   5091 			    dmamap->dm_segs[0].ds_addr);
   5092 			sc->sc_txdescs[nexttx].wtx_cmdlen =
   5093 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   5094 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
   5095 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
   5096 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   5097 			    NULL) {
   5098 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
   5099 				    htole32(WTX_CMD_VLE);
   5100 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
   5101 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   5102 			} else {
   5103 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   5104 			}
   5105 			dcmdlen = 0;
   5106 		} else {
   5107 			/* setup an advanced data descriptor */
   5108 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   5109 			    htole64(dmamap->dm_segs[0].ds_addr);
   5110 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   5111 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   5112 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   5113 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
   5114 			    htole32(fields);
   5115 			DPRINTF(WM_DEBUG_TX,
   5116 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   5117 			    device_xname(sc->sc_dev), nexttx,
   5118 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   5119 			DPRINTF(WM_DEBUG_TX,
   5120 			    ("\t 0x%08x%08x\n", fields,
   5121 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   5122 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   5123 		}
   5124 
   5125 		lasttx = nexttx;
   5126 		nexttx = WM_NEXTTX(sc, nexttx);
   5127 		/*
   5128 		 * fill in the next descriptors. legacy or adcanced format
   5129 		 * is the same here
   5130 		 */
   5131 		for (seg = 1; seg < dmamap->dm_nsegs;
   5132 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
   5133 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   5134 			    htole64(dmamap->dm_segs[seg].ds_addr);
   5135 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   5136 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   5137 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   5138 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
   5139 			lasttx = nexttx;
   5140 
   5141 			DPRINTF(WM_DEBUG_TX,
   5142 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   5143 			     "len %#04zx\n",
   5144 			    device_xname(sc->sc_dev), nexttx,
   5145 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   5146 			    dmamap->dm_segs[seg].ds_len));
   5147 		}
   5148 
   5149 		KASSERT(lasttx != -1);
   5150 
   5151 		/*
   5152 		 * Set up the command byte on the last descriptor of
   5153 		 * the packet.  If we're in the interrupt delay window,
   5154 		 * delay the interrupt.
   5155 		 */
   5156 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   5157 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   5158 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   5159 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   5160 
   5161 		txs->txs_lastdesc = lasttx;
   5162 
   5163 		DPRINTF(WM_DEBUG_TX,
   5164 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   5165 		    device_xname(sc->sc_dev),
   5166 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   5167 
   5168 		/* Sync the descriptors we're using. */
   5169 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   5170 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5171 
   5172 		/* Give the packet to the chip. */
   5173 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   5174 		sent = true;
   5175 
   5176 		DPRINTF(WM_DEBUG_TX,
   5177 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   5178 
   5179 		DPRINTF(WM_DEBUG_TX,
   5180 		    ("%s: TX: finished transmitting packet, job %d\n",
   5181 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   5182 
   5183 		/* Advance the tx pointer. */
   5184 		sc->sc_txfree -= txs->txs_ndesc;
   5185 		sc->sc_txnext = nexttx;
   5186 
   5187 		sc->sc_txsfree--;
   5188 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   5189 
   5190 		/* Pass the packet to any BPF listeners. */
   5191 		bpf_mtap(ifp, m0);
   5192 	}
   5193 
   5194 	if (m0 != NULL) {
   5195 		ifp->if_flags |= IFF_OACTIVE;
   5196 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5197 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   5198 		m_freem(m0);
   5199 	}
   5200 
   5201 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   5202 		/* No more slots; notify upper layer. */
   5203 		ifp->if_flags |= IFF_OACTIVE;
   5204 	}
   5205 
   5206 	if (sent) {
   5207 		/* Set a watchdog timer in case the chip flakes out. */
   5208 		ifp->if_timer = 5;
   5209 	}
   5210 }
   5211 
   5212 /* Interrupt */
   5213 
   5214 /*
   5215  * wm_txintr:
   5216  *
   5217  *	Helper; handle transmit interrupts.
   5218  */
   5219 static void
   5220 wm_txintr(struct wm_softc *sc)
   5221 {
   5222 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5223 	struct wm_txsoft *txs;
   5224 	uint8_t status;
   5225 	int i;
   5226 
   5227 	if (sc->sc_stopping)
   5228 		return;
   5229 
   5230 	ifp->if_flags &= ~IFF_OACTIVE;
   5231 
   5232 	/*
   5233 	 * Go through the Tx list and free mbufs for those
   5234 	 * frames which have been transmitted.
   5235 	 */
   5236 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
   5237 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
   5238 		txs = &sc->sc_txsoft[i];
   5239 
   5240 		DPRINTF(WM_DEBUG_TX,
   5241 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   5242 
   5243 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
   5244 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   5245 
   5246 		status =
   5247 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   5248 		if ((status & WTX_ST_DD) == 0) {
   5249 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   5250 			    BUS_DMASYNC_PREREAD);
   5251 			break;
   5252 		}
   5253 
   5254 		DPRINTF(WM_DEBUG_TX,
   5255 		    ("%s: TX: job %d done: descs %d..%d\n",
   5256 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   5257 		    txs->txs_lastdesc));
   5258 
   5259 		/*
   5260 		 * XXX We should probably be using the statistics
   5261 		 * XXX registers, but I don't know if they exist
   5262 		 * XXX on chips before the i82544.
   5263 		 */
   5264 
   5265 #ifdef WM_EVENT_COUNTERS
   5266 		if (status & WTX_ST_TU)
   5267 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   5268 #endif /* WM_EVENT_COUNTERS */
   5269 
   5270 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   5271 			ifp->if_oerrors++;
   5272 			if (status & WTX_ST_LC)
   5273 				log(LOG_WARNING, "%s: late collision\n",
   5274 				    device_xname(sc->sc_dev));
   5275 			else if (status & WTX_ST_EC) {
   5276 				ifp->if_collisions += 16;
   5277 				log(LOG_WARNING, "%s: excessive collisions\n",
   5278 				    device_xname(sc->sc_dev));
   5279 			}
   5280 		} else
   5281 			ifp->if_opackets++;
   5282 
   5283 		sc->sc_txfree += txs->txs_ndesc;
   5284 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   5285 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   5286 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   5287 		m_freem(txs->txs_mbuf);
   5288 		txs->txs_mbuf = NULL;
   5289 	}
   5290 
   5291 	/* Update the dirty transmit buffer pointer. */
   5292 	sc->sc_txsdirty = i;
   5293 	DPRINTF(WM_DEBUG_TX,
   5294 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   5295 
   5296 	/*
   5297 	 * If there are no more pending transmissions, cancel the watchdog
   5298 	 * timer.
   5299 	 */
   5300 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
   5301 		ifp->if_timer = 0;
   5302 }
   5303 
   5304 /*
   5305  * wm_rxintr:
   5306  *
   5307  *	Helper; handle receive interrupts.
   5308  */
   5309 static void
   5310 wm_rxintr(struct wm_softc *sc)
   5311 {
   5312 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5313 	struct wm_rxsoft *rxs;
   5314 	struct mbuf *m;
   5315 	int i, len;
   5316 	uint8_t status, errors;
   5317 	uint16_t vlantag;
   5318 
   5319 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   5320 		rxs = &sc->sc_rxsoft[i];
   5321 
   5322 		DPRINTF(WM_DEBUG_RX,
   5323 		    ("%s: RX: checking descriptor %d\n",
   5324 		    device_xname(sc->sc_dev), i));
   5325 
   5326 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   5327 
   5328 		status = sc->sc_rxdescs[i].wrx_status;
   5329 		errors = sc->sc_rxdescs[i].wrx_errors;
   5330 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   5331 		vlantag = sc->sc_rxdescs[i].wrx_special;
   5332 
   5333 		if ((status & WRX_ST_DD) == 0) {
   5334 			/* We have processed all of the receive descriptors. */
   5335 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   5336 			break;
   5337 		}
   5338 
   5339 		if (__predict_false(sc->sc_rxdiscard)) {
   5340 			DPRINTF(WM_DEBUG_RX,
   5341 			    ("%s: RX: discarding contents of descriptor %d\n",
   5342 			    device_xname(sc->sc_dev), i));
   5343 			WM_INIT_RXDESC(sc, i);
   5344 			if (status & WRX_ST_EOP) {
   5345 				/* Reset our state. */
   5346 				DPRINTF(WM_DEBUG_RX,
   5347 				    ("%s: RX: resetting rxdiscard -> 0\n",
   5348 				    device_xname(sc->sc_dev)));
   5349 				sc->sc_rxdiscard = 0;
   5350 			}
   5351 			continue;
   5352 		}
   5353 
   5354 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5355 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   5356 
   5357 		m = rxs->rxs_mbuf;
   5358 
   5359 		/*
   5360 		 * Add a new receive buffer to the ring, unless of
   5361 		 * course the length is zero. Treat the latter as a
   5362 		 * failed mapping.
   5363 		 */
   5364 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
   5365 			/*
   5366 			 * Failed, throw away what we've done so
   5367 			 * far, and discard the rest of the packet.
   5368 			 */
   5369 			ifp->if_ierrors++;
   5370 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5371 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5372 			WM_INIT_RXDESC(sc, i);
   5373 			if ((status & WRX_ST_EOP) == 0)
   5374 				sc->sc_rxdiscard = 1;
   5375 			if (sc->sc_rxhead != NULL)
   5376 				m_freem(sc->sc_rxhead);
   5377 			WM_RXCHAIN_RESET(sc);
   5378 			DPRINTF(WM_DEBUG_RX,
   5379 			    ("%s: RX: Rx buffer allocation failed, "
   5380 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   5381 			    sc->sc_rxdiscard ? " (discard)" : ""));
   5382 			continue;
   5383 		}
   5384 
   5385 		m->m_len = len;
   5386 		sc->sc_rxlen += len;
   5387 		DPRINTF(WM_DEBUG_RX,
   5388 		    ("%s: RX: buffer at %p len %d\n",
   5389 		    device_xname(sc->sc_dev), m->m_data, len));
   5390 
   5391 		/* If this is not the end of the packet, keep looking. */
   5392 		if ((status & WRX_ST_EOP) == 0) {
   5393 			WM_RXCHAIN_LINK(sc, m);
   5394 			DPRINTF(WM_DEBUG_RX,
   5395 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   5396 			    device_xname(sc->sc_dev), sc->sc_rxlen));
   5397 			continue;
   5398 		}
   5399 
   5400 		/*
   5401 		 * Okay, we have the entire packet now.  The chip is
   5402 		 * configured to include the FCS except I350 and I21[01]
   5403 		 * (not all chips can be configured to strip it),
   5404 		 * so we need to trim it.
   5405 		 * May need to adjust length of previous mbuf in the
   5406 		 * chain if the current mbuf is too short.
   5407 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   5408 		 * is always set in I350, so we don't trim it.
   5409 		 */
   5410 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   5411 		    && (sc->sc_type != WM_T_I210)
   5412 		    && (sc->sc_type != WM_T_I211)) {
   5413 			if (m->m_len < ETHER_CRC_LEN) {
   5414 				sc->sc_rxtail->m_len
   5415 				    -= (ETHER_CRC_LEN - m->m_len);
   5416 				m->m_len = 0;
   5417 			} else
   5418 				m->m_len -= ETHER_CRC_LEN;
   5419 			len = sc->sc_rxlen - ETHER_CRC_LEN;
   5420 		} else
   5421 			len = sc->sc_rxlen;
   5422 
   5423 		WM_RXCHAIN_LINK(sc, m);
   5424 
   5425 		*sc->sc_rxtailp = NULL;
   5426 		m = sc->sc_rxhead;
   5427 
   5428 		WM_RXCHAIN_RESET(sc);
   5429 
   5430 		DPRINTF(WM_DEBUG_RX,
   5431 		    ("%s: RX: have entire packet, len -> %d\n",
   5432 		    device_xname(sc->sc_dev), len));
   5433 
   5434 		/* If an error occurred, update stats and drop the packet. */
   5435 		if (errors &
   5436 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   5437 			if (errors & WRX_ER_SE)
   5438 				log(LOG_WARNING, "%s: symbol error\n",
   5439 				    device_xname(sc->sc_dev));
   5440 			else if (errors & WRX_ER_SEQ)
   5441 				log(LOG_WARNING, "%s: receive sequence error\n",
   5442 				    device_xname(sc->sc_dev));
   5443 			else if (errors & WRX_ER_CE)
   5444 				log(LOG_WARNING, "%s: CRC error\n",
   5445 				    device_xname(sc->sc_dev));
   5446 			m_freem(m);
   5447 			continue;
   5448 		}
   5449 
   5450 		/* No errors.  Receive the packet. */
   5451 		m->m_pkthdr.rcvif = ifp;
   5452 		m->m_pkthdr.len = len;
   5453 
   5454 		/*
   5455 		 * If VLANs are enabled, VLAN packets have been unwrapped
   5456 		 * for us.  Associate the tag with the packet.
   5457 		 */
   5458 		/* XXXX should check for i350 and i354 */
   5459 		if ((status & WRX_ST_VP) != 0) {
   5460 			VLAN_INPUT_TAG(ifp, m,
   5461 			    le16toh(vlantag),
   5462 			    continue);
   5463 		}
   5464 
   5465 		/* Set up checksum info for this packet. */
   5466 		if ((status & WRX_ST_IXSM) == 0) {
   5467 			if (status & WRX_ST_IPCS) {
   5468 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   5469 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   5470 				if (errors & WRX_ER_IPE)
   5471 					m->m_pkthdr.csum_flags |=
   5472 					    M_CSUM_IPv4_BAD;
   5473 			}
   5474 			if (status & WRX_ST_TCPCS) {
   5475 				/*
   5476 				 * Note: we don't know if this was TCP or UDP,
   5477 				 * so we just set both bits, and expect the
   5478 				 * upper layers to deal.
   5479 				 */
   5480 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   5481 				m->m_pkthdr.csum_flags |=
   5482 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   5483 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   5484 				if (errors & WRX_ER_TCPE)
   5485 					m->m_pkthdr.csum_flags |=
   5486 					    M_CSUM_TCP_UDP_BAD;
   5487 			}
   5488 		}
   5489 
   5490 		ifp->if_ipackets++;
   5491 
   5492 		WM_UNLOCK(sc);
   5493 
   5494 		/* Pass this up to any BPF listeners. */
   5495 		bpf_mtap(ifp, m);
   5496 
   5497 		/* Pass it on. */
   5498 		(*ifp->if_input)(ifp, m);
   5499 
   5500 		WM_LOCK(sc);
   5501 
   5502 		if (sc->sc_stopping)
   5503 			break;
   5504 	}
   5505 
   5506 	/* Update the receive pointer. */
   5507 	sc->sc_rxptr = i;
   5508 
   5509 	DPRINTF(WM_DEBUG_RX,
   5510 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   5511 }
   5512 
   5513 /*
   5514  * wm_linkintr_gmii:
   5515  *
   5516  *	Helper; handle link interrupts for GMII.
   5517  */
   5518 static void
   5519 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   5520 {
   5521 
   5522 	KASSERT(WM_LOCKED(sc));
   5523 
   5524 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   5525 		__func__));
   5526 
   5527 	if (icr & ICR_LSC) {
   5528 		DPRINTF(WM_DEBUG_LINK,
   5529 		    ("%s: LINK: LSC -> mii_pollstat\n",
   5530 			device_xname(sc->sc_dev)));
   5531 		mii_pollstat(&sc->sc_mii);
   5532 		if (sc->sc_type == WM_T_82543) {
   5533 			int miistatus, active;
   5534 
   5535 			/*
   5536 			 * With 82543, we need to force speed and
   5537 			 * duplex on the MAC equal to what the PHY
   5538 			 * speed and duplex configuration is.
   5539 			 */
   5540 			miistatus = sc->sc_mii.mii_media_status;
   5541 
   5542 			if (miistatus & IFM_ACTIVE) {
   5543 				active = sc->sc_mii.mii_media_active;
   5544 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   5545 				switch (IFM_SUBTYPE(active)) {
   5546 				case IFM_10_T:
   5547 					sc->sc_ctrl |= CTRL_SPEED_10;
   5548 					break;
   5549 				case IFM_100_TX:
   5550 					sc->sc_ctrl |= CTRL_SPEED_100;
   5551 					break;
   5552 				case IFM_1000_T:
   5553 					sc->sc_ctrl |= CTRL_SPEED_1000;
   5554 					break;
   5555 				default:
   5556 					/*
   5557 					 * fiber?
   5558 					 * Shoud not enter here.
   5559 					 */
   5560 					printf("unknown media (%x)\n",
   5561 					    active);
   5562 					break;
   5563 				}
   5564 				if (active & IFM_FDX)
   5565 					sc->sc_ctrl |= CTRL_FD;
   5566 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5567 			}
   5568 		} else if ((sc->sc_type == WM_T_ICH8)
   5569 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   5570 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   5571 		} else if (sc->sc_type == WM_T_PCH) {
   5572 			wm_k1_gig_workaround_hv(sc,
   5573 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   5574 		}
   5575 
   5576 		if ((sc->sc_phytype == WMPHY_82578)
   5577 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   5578 			== IFM_1000_T)) {
   5579 
   5580 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   5581 				delay(200*1000); /* XXX too big */
   5582 
   5583 				/* Link stall fix for link up */
   5584 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   5585 				    HV_MUX_DATA_CTRL,
   5586 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   5587 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   5588 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   5589 				    HV_MUX_DATA_CTRL,
   5590 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   5591 			}
   5592 		}
   5593 	} else if (icr & ICR_RXSEQ) {
   5594 		DPRINTF(WM_DEBUG_LINK,
   5595 		    ("%s: LINK Receive sequence error\n",
   5596 			device_xname(sc->sc_dev)));
   5597 	}
   5598 }
   5599 
   5600 /*
   5601  * wm_linkintr_tbi:
   5602  *
   5603  *	Helper; handle link interrupts for TBI mode.
   5604  */
   5605 static void
   5606 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   5607 {
   5608 	uint32_t status;
   5609 
   5610 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   5611 		__func__));
   5612 
   5613 	status = CSR_READ(sc, WMREG_STATUS);
   5614 	if (icr & ICR_LSC) {
   5615 		if (status & STATUS_LU) {
   5616 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   5617 			    device_xname(sc->sc_dev),
   5618 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   5619 			/*
   5620 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   5621 			 * so we should update sc->sc_ctrl
   5622 			 */
   5623 
   5624 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5625 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   5626 			sc->sc_fcrtl &= ~FCRTL_XONE;
   5627 			if (status & STATUS_FD)
   5628 				sc->sc_tctl |=
   5629 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5630 			else
   5631 				sc->sc_tctl |=
   5632 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   5633 			if (sc->sc_ctrl & CTRL_TFCE)
   5634 				sc->sc_fcrtl |= FCRTL_XONE;
   5635 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5636 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   5637 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   5638 				      sc->sc_fcrtl);
   5639 			sc->sc_tbi_linkup = 1;
   5640 		} else {
   5641 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   5642 			    device_xname(sc->sc_dev)));
   5643 			sc->sc_tbi_linkup = 0;
   5644 		}
   5645 		wm_tbi_set_linkled(sc);
   5646 	} else if (icr & ICR_RXCFG) {
   5647 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
   5648 		    device_xname(sc->sc_dev)));
   5649 		sc->sc_tbi_nrxcfg++;
   5650 		wm_check_for_link(sc);
   5651 	} else if (icr & ICR_RXSEQ) {
   5652 		DPRINTF(WM_DEBUG_LINK,
   5653 		    ("%s: LINK: Receive sequence error\n",
   5654 		    device_xname(sc->sc_dev)));
   5655 	}
   5656 }
   5657 
   5658 /*
   5659  * wm_linkintr:
   5660  *
   5661  *	Helper; handle link interrupts.
   5662  */
   5663 static void
   5664 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   5665 {
   5666 
   5667 	if (sc->sc_flags & WM_F_HAS_MII)
   5668 		wm_linkintr_gmii(sc, icr);
   5669 	else
   5670 		wm_linkintr_tbi(sc, icr);
   5671 }
   5672 
   5673 /*
   5674  * wm_intr:
   5675  *
   5676  *	Interrupt service routine.
   5677  */
   5678 static int
   5679 wm_intr(void *arg)
   5680 {
   5681 	struct wm_softc *sc = arg;
   5682 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5683 	uint32_t icr;
   5684 	int handled = 0;
   5685 
   5686 	while (1 /* CONSTCOND */) {
   5687 		icr = CSR_READ(sc, WMREG_ICR);
   5688 		if ((icr & sc->sc_icr) == 0)
   5689 			break;
   5690 		rnd_add_uint32(&sc->rnd_source, icr);
   5691 
   5692 		WM_LOCK(sc);
   5693 
   5694 		if (sc->sc_stopping) {
   5695 			WM_UNLOCK(sc);
   5696 			break;
   5697 		}
   5698 
   5699 		handled = 1;
   5700 
   5701 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   5702 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   5703 			DPRINTF(WM_DEBUG_RX,
   5704 			    ("%s: RX: got Rx intr 0x%08x\n",
   5705 			    device_xname(sc->sc_dev),
   5706 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   5707 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   5708 		}
   5709 #endif
   5710 		wm_rxintr(sc);
   5711 
   5712 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   5713 		if (icr & ICR_TXDW) {
   5714 			DPRINTF(WM_DEBUG_TX,
   5715 			    ("%s: TX: got TXDW interrupt\n",
   5716 			    device_xname(sc->sc_dev)));
   5717 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   5718 		}
   5719 #endif
   5720 		wm_txintr(sc);
   5721 
   5722 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
   5723 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   5724 			wm_linkintr(sc, icr);
   5725 		}
   5726 
   5727 		WM_UNLOCK(sc);
   5728 
   5729 		if (icr & ICR_RXO) {
   5730 #if defined(WM_DEBUG)
   5731 			log(LOG_WARNING, "%s: Receive overrun\n",
   5732 			    device_xname(sc->sc_dev));
   5733 #endif /* defined(WM_DEBUG) */
   5734 		}
   5735 	}
   5736 
   5737 	if (handled) {
   5738 		/* Try to get more packets going. */
   5739 		ifp->if_start(ifp);
   5740 	}
   5741 
   5742 	return handled;
   5743 }
   5744 
   5745 /*
   5746  * Media related.
   5747  * GMII, SGMII, TBI (and SERDES)
   5748  */
   5749 
   5750 /* GMII related */
   5751 
   5752 /*
   5753  * wm_gmii_reset:
   5754  *
   5755  *	Reset the PHY.
   5756  */
   5757 static void
   5758 wm_gmii_reset(struct wm_softc *sc)
   5759 {
   5760 	uint32_t reg;
   5761 	int rv;
   5762 
   5763 	/* get phy semaphore */
   5764 	switch (sc->sc_type) {
   5765 	case WM_T_82571:
   5766 	case WM_T_82572:
   5767 	case WM_T_82573:
   5768 	case WM_T_82574:
   5769 	case WM_T_82583:
   5770 		 /* XXX should get sw semaphore, too */
   5771 		rv = wm_get_swsm_semaphore(sc);
   5772 		break;
   5773 	case WM_T_82575:
   5774 	case WM_T_82576:
   5775 	case WM_T_82580:
   5776 	case WM_T_82580ER:
   5777 	case WM_T_I350:
   5778 	case WM_T_I354:
   5779 	case WM_T_I210:
   5780 	case WM_T_I211:
   5781 	case WM_T_80003:
   5782 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   5783 		break;
   5784 	case WM_T_ICH8:
   5785 	case WM_T_ICH9:
   5786 	case WM_T_ICH10:
   5787 	case WM_T_PCH:
   5788 	case WM_T_PCH2:
   5789 	case WM_T_PCH_LPT:
   5790 		rv = wm_get_swfwhw_semaphore(sc);
   5791 		break;
   5792 	default:
   5793 		/* nothing to do*/
   5794 		rv = 0;
   5795 		break;
   5796 	}
   5797 	if (rv != 0) {
   5798 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   5799 		    __func__);
   5800 		return;
   5801 	}
   5802 
   5803 	switch (sc->sc_type) {
   5804 	case WM_T_82542_2_0:
   5805 	case WM_T_82542_2_1:
   5806 		/* null */
   5807 		break;
   5808 	case WM_T_82543:
   5809 		/*
   5810 		 * With 82543, we need to force speed and duplex on the MAC
   5811 		 * equal to what the PHY speed and duplex configuration is.
   5812 		 * In addition, we need to perform a hardware reset on the PHY
   5813 		 * to take it out of reset.
   5814 		 */
   5815 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   5816 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5817 
   5818 		/* The PHY reset pin is active-low. */
   5819 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5820 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   5821 		    CTRL_EXT_SWDPIN(4));
   5822 		reg |= CTRL_EXT_SWDPIO(4);
   5823 
   5824 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5825 		CSR_WRITE_FLUSH(sc);
   5826 		delay(10*1000);
   5827 
   5828 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   5829 		CSR_WRITE_FLUSH(sc);
   5830 		delay(150);
   5831 #if 0
   5832 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   5833 #endif
   5834 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   5835 		break;
   5836 	case WM_T_82544:	/* reset 10000us */
   5837 	case WM_T_82540:
   5838 	case WM_T_82545:
   5839 	case WM_T_82545_3:
   5840 	case WM_T_82546:
   5841 	case WM_T_82546_3:
   5842 	case WM_T_82541:
   5843 	case WM_T_82541_2:
   5844 	case WM_T_82547:
   5845 	case WM_T_82547_2:
   5846 	case WM_T_82571:	/* reset 100us */
   5847 	case WM_T_82572:
   5848 	case WM_T_82573:
   5849 	case WM_T_82574:
   5850 	case WM_T_82575:
   5851 	case WM_T_82576:
   5852 	case WM_T_82580:
   5853 	case WM_T_82580ER:
   5854 	case WM_T_I350:
   5855 	case WM_T_I354:
   5856 	case WM_T_I210:
   5857 	case WM_T_I211:
   5858 	case WM_T_82583:
   5859 	case WM_T_80003:
   5860 		/* generic reset */
   5861 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   5862 		CSR_WRITE_FLUSH(sc);
   5863 		delay(20000);
   5864 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5865 		CSR_WRITE_FLUSH(sc);
   5866 		delay(20000);
   5867 
   5868 		if ((sc->sc_type == WM_T_82541)
   5869 		    || (sc->sc_type == WM_T_82541_2)
   5870 		    || (sc->sc_type == WM_T_82547)
   5871 		    || (sc->sc_type == WM_T_82547_2)) {
   5872 			/* workaround for igp are done in igp_reset() */
   5873 			/* XXX add code to set LED after phy reset */
   5874 		}
   5875 		break;
   5876 	case WM_T_ICH8:
   5877 	case WM_T_ICH9:
   5878 	case WM_T_ICH10:
   5879 	case WM_T_PCH:
   5880 	case WM_T_PCH2:
   5881 	case WM_T_PCH_LPT:
   5882 		/* generic reset */
   5883 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   5884 		CSR_WRITE_FLUSH(sc);
   5885 		delay(100);
   5886 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5887 		CSR_WRITE_FLUSH(sc);
   5888 		delay(150);
   5889 		break;
   5890 	default:
   5891 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   5892 		    __func__);
   5893 		break;
   5894 	}
   5895 
   5896 	/* release PHY semaphore */
   5897 	switch (sc->sc_type) {
   5898 	case WM_T_82571:
   5899 	case WM_T_82572:
   5900 	case WM_T_82573:
   5901 	case WM_T_82574:
   5902 	case WM_T_82583:
   5903 		 /* XXX should put sw semaphore, too */
   5904 		wm_put_swsm_semaphore(sc);
   5905 		break;
   5906 	case WM_T_82575:
   5907 	case WM_T_82576:
   5908 	case WM_T_82580:
   5909 	case WM_T_82580ER:
   5910 	case WM_T_I350:
   5911 	case WM_T_I354:
   5912 	case WM_T_I210:
   5913 	case WM_T_I211:
   5914 	case WM_T_80003:
   5915 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   5916 		break;
   5917 	case WM_T_ICH8:
   5918 	case WM_T_ICH9:
   5919 	case WM_T_ICH10:
   5920 	case WM_T_PCH:
   5921 	case WM_T_PCH2:
   5922 	case WM_T_PCH_LPT:
   5923 		wm_put_swfwhw_semaphore(sc);
   5924 		break;
   5925 	default:
   5926 		/* nothing to do*/
   5927 		rv = 0;
   5928 		break;
   5929 	}
   5930 
   5931 	/* get_cfg_done */
   5932 	wm_get_cfg_done(sc);
   5933 
   5934 	/* extra setup */
   5935 	switch (sc->sc_type) {
   5936 	case WM_T_82542_2_0:
   5937 	case WM_T_82542_2_1:
   5938 	case WM_T_82543:
   5939 	case WM_T_82544:
   5940 	case WM_T_82540:
   5941 	case WM_T_82545:
   5942 	case WM_T_82545_3:
   5943 	case WM_T_82546:
   5944 	case WM_T_82546_3:
   5945 	case WM_T_82541_2:
   5946 	case WM_T_82547_2:
   5947 	case WM_T_82571:
   5948 	case WM_T_82572:
   5949 	case WM_T_82573:
   5950 	case WM_T_82574:
   5951 	case WM_T_82575:
   5952 	case WM_T_82576:
   5953 	case WM_T_82580:
   5954 	case WM_T_82580ER:
   5955 	case WM_T_I350:
   5956 	case WM_T_I354:
   5957 	case WM_T_I210:
   5958 	case WM_T_I211:
   5959 	case WM_T_82583:
   5960 	case WM_T_80003:
   5961 		/* null */
   5962 		break;
   5963 	case WM_T_82541:
   5964 	case WM_T_82547:
   5965 		/* XXX Configure actively LED after PHY reset */
   5966 		break;
   5967 	case WM_T_ICH8:
   5968 	case WM_T_ICH9:
   5969 	case WM_T_ICH10:
   5970 	case WM_T_PCH:
   5971 	case WM_T_PCH2:
   5972 	case WM_T_PCH_LPT:
   5973 		/* Allow time for h/w to get to a quiescent state afer reset */
   5974 		delay(10*1000);
   5975 
   5976 		if (sc->sc_type == WM_T_PCH)
   5977 			wm_hv_phy_workaround_ich8lan(sc);
   5978 
   5979 		if (sc->sc_type == WM_T_PCH2)
   5980 			wm_lv_phy_workaround_ich8lan(sc);
   5981 
   5982 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   5983 			/*
   5984 			 * dummy read to clear the phy wakeup bit after lcd
   5985 			 * reset
   5986 			 */
   5987 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   5988 		}
   5989 
   5990 		/*
   5991 		 * XXX Configure the LCD with th extended configuration region
   5992 		 * in NVM
   5993 		 */
   5994 
   5995 		/* Configure the LCD with the OEM bits in NVM */
   5996 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   5997 		    || (sc->sc_type == WM_T_PCH_LPT)) {
   5998 			/*
   5999 			 * Disable LPLU.
   6000 			 * XXX It seems that 82567 has LPLU, too.
   6001 			 */
   6002 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   6003 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
   6004 			reg |= HV_OEM_BITS_ANEGNOW;
   6005 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   6006 		}
   6007 		break;
   6008 	default:
   6009 		panic("%s: unknown type\n", __func__);
   6010 		break;
   6011 	}
   6012 }
   6013 
   6014 /*
   6015  * wm_get_phy_id_82575:
   6016  *
   6017  * Return PHY ID. Return -1 if it failed.
   6018  */
   6019 static int
   6020 wm_get_phy_id_82575(struct wm_softc *sc)
   6021 {
   6022 	uint32_t reg;
   6023 	int phyid = -1;
   6024 
   6025 	/* XXX */
   6026 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   6027 		return -1;
   6028 
   6029 	if (wm_sgmii_uses_mdio(sc)) {
   6030 		switch (sc->sc_type) {
   6031 		case WM_T_82575:
   6032 		case WM_T_82576:
   6033 			reg = CSR_READ(sc, WMREG_MDIC);
   6034 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   6035 			break;
   6036 		case WM_T_82580:
   6037 		case WM_T_I350:
   6038 		case WM_T_I354:
   6039 		case WM_T_I210:
   6040 		case WM_T_I211:
   6041 			reg = CSR_READ(sc, WMREG_MDICNFG);
   6042 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   6043 			break;
   6044 		default:
   6045 			return -1;
   6046 		}
   6047 	}
   6048 
   6049 	return phyid;
   6050 }
   6051 
   6052 
   6053 /*
   6054  * wm_gmii_mediainit:
   6055  *
   6056  *	Initialize media for use on 1000BASE-T devices.
   6057  */
   6058 static void
   6059 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   6060 {
   6061 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6062 	struct mii_data *mii = &sc->sc_mii;
   6063 	uint32_t reg;
   6064 
   6065 	/* We have MII. */
   6066 	sc->sc_flags |= WM_F_HAS_MII;
   6067 
   6068 	if (sc->sc_type == WM_T_80003)
   6069 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   6070 	else
   6071 		sc->sc_tipg = TIPG_1000T_DFLT;
   6072 
   6073 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   6074 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   6075 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   6076 	    || (sc->sc_type == WM_T_I211)) {
   6077 		reg = CSR_READ(sc, WMREG_PHPM);
   6078 		reg &= ~PHPM_GO_LINK_D;
   6079 		CSR_WRITE(sc, WMREG_PHPM, reg);
   6080 	}
   6081 
   6082 	/*
   6083 	 * Let the chip set speed/duplex on its own based on
   6084 	 * signals from the PHY.
   6085 	 * XXXbouyer - I'm not sure this is right for the 80003,
   6086 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   6087 	 */
   6088 	sc->sc_ctrl |= CTRL_SLU;
   6089 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6090 
   6091 	/* Initialize our media structures and probe the GMII. */
   6092 	mii->mii_ifp = ifp;
   6093 
   6094 	/*
   6095 	 * Determine the PHY access method.
   6096 	 *
   6097 	 *  For SGMII, use SGMII specific method.
   6098 	 *
   6099 	 *  For some devices, we can determine the PHY access method
   6100 	 * from sc_type.
   6101 	 *
   6102 	 *  For ICH8 variants, it's difficult to detemine the PHY access
   6103 	 * method by sc_type, so use the PCI product ID for some devices.
   6104 	 * For other ICH8 variants, try to use igp's method. If the PHY
   6105 	 * can't detect, then use bm's method.
   6106 	 */
   6107 	switch (prodid) {
   6108 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   6109 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   6110 		/* 82577 */
   6111 		sc->sc_phytype = WMPHY_82577;
   6112 		mii->mii_readreg = wm_gmii_hv_readreg;
   6113 		mii->mii_writereg = wm_gmii_hv_writereg;
   6114 		break;
   6115 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   6116 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   6117 		/* 82578 */
   6118 		sc->sc_phytype = WMPHY_82578;
   6119 		mii->mii_readreg = wm_gmii_hv_readreg;
   6120 		mii->mii_writereg = wm_gmii_hv_writereg;
   6121 		break;
   6122 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   6123 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   6124 		/* 82579 */
   6125 		sc->sc_phytype = WMPHY_82579;
   6126 		mii->mii_readreg = wm_gmii_hv_readreg;
   6127 		mii->mii_writereg = wm_gmii_hv_writereg;
   6128 		break;
   6129 	case PCI_PRODUCT_INTEL_I217_LM:
   6130 	case PCI_PRODUCT_INTEL_I217_V:
   6131 	case PCI_PRODUCT_INTEL_I218_LM:
   6132 	case PCI_PRODUCT_INTEL_I218_V:
   6133 		/* I21[78] */
   6134 		mii->mii_readreg = wm_gmii_hv_readreg;
   6135 		mii->mii_writereg = wm_gmii_hv_writereg;
   6136 		break;
   6137 	case PCI_PRODUCT_INTEL_82801I_BM:
   6138 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   6139 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   6140 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   6141 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   6142 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   6143 		/* 82567 */
   6144 		sc->sc_phytype = WMPHY_BM;
   6145 		mii->mii_readreg = wm_gmii_bm_readreg;
   6146 		mii->mii_writereg = wm_gmii_bm_writereg;
   6147 		break;
   6148 	default:
   6149 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   6150 		    && !wm_sgmii_uses_mdio(sc)){
   6151 			mii->mii_readreg = wm_sgmii_readreg;
   6152 			mii->mii_writereg = wm_sgmii_writereg;
   6153 		} else if (sc->sc_type >= WM_T_80003) {
   6154 			mii->mii_readreg = wm_gmii_i80003_readreg;
   6155 			mii->mii_writereg = wm_gmii_i80003_writereg;
   6156 		} else if (sc->sc_type >= WM_T_I210) {
   6157 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6158 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6159 		} else if (sc->sc_type >= WM_T_82580) {
   6160 			sc->sc_phytype = WMPHY_82580;
   6161 			mii->mii_readreg = wm_gmii_82580_readreg;
   6162 			mii->mii_writereg = wm_gmii_82580_writereg;
   6163 		} else if (sc->sc_type >= WM_T_82544) {
   6164 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6165 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6166 		} else {
   6167 			mii->mii_readreg = wm_gmii_i82543_readreg;
   6168 			mii->mii_writereg = wm_gmii_i82543_writereg;
   6169 		}
   6170 		break;
   6171 	}
   6172 	mii->mii_statchg = wm_gmii_statchg;
   6173 
   6174 	wm_gmii_reset(sc);
   6175 
   6176 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   6177 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   6178 	    wm_gmii_mediastatus);
   6179 
   6180 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   6181 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   6182 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6183 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   6184 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   6185 			/* Attach only one port */
   6186 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   6187 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6188 		} else {
   6189 			int i, id;
   6190 			uint32_t ctrl_ext;
   6191 
   6192 			id = wm_get_phy_id_82575(sc);
   6193 			if (id != -1) {
   6194 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   6195 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   6196 			}
   6197 			if ((id == -1)
   6198 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6199 				/* Power on sgmii phy if it is disabled */
   6200 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   6201 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   6202 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   6203 				CSR_WRITE_FLUSH(sc);
   6204 				delay(300*1000); /* XXX too long */
   6205 
   6206 				/* from 1 to 8 */
   6207 				for (i = 1; i < 8; i++)
   6208 					mii_attach(sc->sc_dev, &sc->sc_mii,
   6209 					    0xffffffff, i, MII_OFFSET_ANY,
   6210 					    MIIF_DOPAUSE);
   6211 
   6212 				/* restore previous sfp cage power state */
   6213 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   6214 			}
   6215 		}
   6216 	} else {
   6217 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6218 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6219 	}
   6220 
   6221 	/*
   6222 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   6223 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   6224 	 */
   6225 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   6226 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6227 		wm_set_mdio_slow_mode_hv(sc);
   6228 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6229 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6230 	}
   6231 
   6232 	/*
   6233 	 * (For ICH8 variants)
   6234 	 * If PHY detection failed, use BM's r/w function and retry.
   6235 	 */
   6236 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6237 		/* if failed, retry with *_bm_* */
   6238 		mii->mii_readreg = wm_gmii_bm_readreg;
   6239 		mii->mii_writereg = wm_gmii_bm_writereg;
   6240 
   6241 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6242 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6243 	}
   6244 
   6245 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6246 		/* Any PHY wasn't find */
   6247 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   6248 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   6249 		sc->sc_phytype = WMPHY_NONE;
   6250 	} else {
   6251 		/*
   6252 		 * PHY Found!
   6253 		 * Check PHY type.
   6254 		 */
   6255 		uint32_t model;
   6256 		struct mii_softc *child;
   6257 
   6258 		child = LIST_FIRST(&mii->mii_phys);
   6259 		if (device_is_a(child->mii_dev, "igphy")) {
   6260 			struct igphy_softc *isc = (struct igphy_softc *)child;
   6261 
   6262 			model = isc->sc_mii.mii_mpd_model;
   6263 			if (model == MII_MODEL_yyINTEL_I82566)
   6264 				sc->sc_phytype = WMPHY_IGP_3;
   6265 		}
   6266 
   6267 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   6268 	}
   6269 }
   6270 
   6271 /*
   6272  * wm_gmii_mediastatus:	[ifmedia interface function]
   6273  *
   6274  *	Get the current interface media status on a 1000BASE-T device.
   6275  */
   6276 static void
   6277 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   6278 {
   6279 	struct wm_softc *sc = ifp->if_softc;
   6280 
   6281 	ether_mediastatus(ifp, ifmr);
   6282 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   6283 	    | sc->sc_flowflags;
   6284 }
   6285 
   6286 /*
   6287  * wm_gmii_mediachange:	[ifmedia interface function]
   6288  *
   6289  *	Set hardware to newly-selected media on a 1000BASE-T device.
   6290  */
   6291 static int
   6292 wm_gmii_mediachange(struct ifnet *ifp)
   6293 {
   6294 	struct wm_softc *sc = ifp->if_softc;
   6295 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6296 	int rc;
   6297 
   6298 	if ((ifp->if_flags & IFF_UP) == 0)
   6299 		return 0;
   6300 
   6301 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   6302 	sc->sc_ctrl |= CTRL_SLU;
   6303 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   6304 	    || (sc->sc_type > WM_T_82543)) {
   6305 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   6306 	} else {
   6307 		sc->sc_ctrl &= ~CTRL_ASDE;
   6308 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   6309 		if (ife->ifm_media & IFM_FDX)
   6310 			sc->sc_ctrl |= CTRL_FD;
   6311 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   6312 		case IFM_10_T:
   6313 			sc->sc_ctrl |= CTRL_SPEED_10;
   6314 			break;
   6315 		case IFM_100_TX:
   6316 			sc->sc_ctrl |= CTRL_SPEED_100;
   6317 			break;
   6318 		case IFM_1000_T:
   6319 			sc->sc_ctrl |= CTRL_SPEED_1000;
   6320 			break;
   6321 		default:
   6322 			panic("wm_gmii_mediachange: bad media 0x%x",
   6323 			    ife->ifm_media);
   6324 		}
   6325 	}
   6326 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6327 	if (sc->sc_type <= WM_T_82543)
   6328 		wm_gmii_reset(sc);
   6329 
   6330 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   6331 		return 0;
   6332 	return rc;
   6333 }
   6334 
   6335 #define	MDI_IO		CTRL_SWDPIN(2)
   6336 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   6337 #define	MDI_CLK		CTRL_SWDPIN(3)
   6338 
   6339 static void
   6340 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   6341 {
   6342 	uint32_t i, v;
   6343 
   6344 	v = CSR_READ(sc, WMREG_CTRL);
   6345 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6346 	v |= MDI_DIR | CTRL_SWDPIO(3);
   6347 
   6348 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   6349 		if (data & i)
   6350 			v |= MDI_IO;
   6351 		else
   6352 			v &= ~MDI_IO;
   6353 		CSR_WRITE(sc, WMREG_CTRL, v);
   6354 		CSR_WRITE_FLUSH(sc);
   6355 		delay(10);
   6356 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6357 		CSR_WRITE_FLUSH(sc);
   6358 		delay(10);
   6359 		CSR_WRITE(sc, WMREG_CTRL, v);
   6360 		CSR_WRITE_FLUSH(sc);
   6361 		delay(10);
   6362 	}
   6363 }
   6364 
   6365 static uint32_t
   6366 wm_i82543_mii_recvbits(struct wm_softc *sc)
   6367 {
   6368 	uint32_t v, i, data = 0;
   6369 
   6370 	v = CSR_READ(sc, WMREG_CTRL);
   6371 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6372 	v |= CTRL_SWDPIO(3);
   6373 
   6374 	CSR_WRITE(sc, WMREG_CTRL, v);
   6375 	CSR_WRITE_FLUSH(sc);
   6376 	delay(10);
   6377 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6378 	CSR_WRITE_FLUSH(sc);
   6379 	delay(10);
   6380 	CSR_WRITE(sc, WMREG_CTRL, v);
   6381 	CSR_WRITE_FLUSH(sc);
   6382 	delay(10);
   6383 
   6384 	for (i = 0; i < 16; i++) {
   6385 		data <<= 1;
   6386 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6387 		CSR_WRITE_FLUSH(sc);
   6388 		delay(10);
   6389 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   6390 			data |= 1;
   6391 		CSR_WRITE(sc, WMREG_CTRL, v);
   6392 		CSR_WRITE_FLUSH(sc);
   6393 		delay(10);
   6394 	}
   6395 
   6396 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6397 	CSR_WRITE_FLUSH(sc);
   6398 	delay(10);
   6399 	CSR_WRITE(sc, WMREG_CTRL, v);
   6400 	CSR_WRITE_FLUSH(sc);
   6401 	delay(10);
   6402 
   6403 	return data;
   6404 }
   6405 
   6406 #undef MDI_IO
   6407 #undef MDI_DIR
   6408 #undef MDI_CLK
   6409 
   6410 /*
   6411  * wm_gmii_i82543_readreg:	[mii interface function]
   6412  *
   6413  *	Read a PHY register on the GMII (i82543 version).
   6414  */
   6415 static int
   6416 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   6417 {
   6418 	struct wm_softc *sc = device_private(self);
   6419 	int rv;
   6420 
   6421 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6422 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   6423 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   6424 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   6425 
   6426 	DPRINTF(WM_DEBUG_GMII,
   6427 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   6428 	    device_xname(sc->sc_dev), phy, reg, rv));
   6429 
   6430 	return rv;
   6431 }
   6432 
   6433 /*
   6434  * wm_gmii_i82543_writereg:	[mii interface function]
   6435  *
   6436  *	Write a PHY register on the GMII (i82543 version).
   6437  */
   6438 static void
   6439 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   6440 {
   6441 	struct wm_softc *sc = device_private(self);
   6442 
   6443 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6444 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   6445 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   6446 	    (MII_COMMAND_START << 30), 32);
   6447 }
   6448 
   6449 /*
   6450  * wm_gmii_i82544_readreg:	[mii interface function]
   6451  *
   6452  *	Read a PHY register on the GMII.
   6453  */
   6454 static int
   6455 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   6456 {
   6457 	struct wm_softc *sc = device_private(self);
   6458 	uint32_t mdic = 0;
   6459 	int i, rv;
   6460 
   6461 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   6462 	    MDIC_REGADD(reg));
   6463 
   6464 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   6465 		mdic = CSR_READ(sc, WMREG_MDIC);
   6466 		if (mdic & MDIC_READY)
   6467 			break;
   6468 		delay(50);
   6469 	}
   6470 
   6471 	if ((mdic & MDIC_READY) == 0) {
   6472 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   6473 		    device_xname(sc->sc_dev), phy, reg);
   6474 		rv = 0;
   6475 	} else if (mdic & MDIC_E) {
   6476 #if 0 /* This is normal if no PHY is present. */
   6477 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   6478 		    device_xname(sc->sc_dev), phy, reg);
   6479 #endif
   6480 		rv = 0;
   6481 	} else {
   6482 		rv = MDIC_DATA(mdic);
   6483 		if (rv == 0xffff)
   6484 			rv = 0;
   6485 	}
   6486 
   6487 	return rv;
   6488 }
   6489 
   6490 /*
   6491  * wm_gmii_i82544_writereg:	[mii interface function]
   6492  *
   6493  *	Write a PHY register on the GMII.
   6494  */
   6495 static void
   6496 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   6497 {
   6498 	struct wm_softc *sc = device_private(self);
   6499 	uint32_t mdic = 0;
   6500 	int i;
   6501 
   6502 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   6503 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   6504 
   6505 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   6506 		mdic = CSR_READ(sc, WMREG_MDIC);
   6507 		if (mdic & MDIC_READY)
   6508 			break;
   6509 		delay(50);
   6510 	}
   6511 
   6512 	if ((mdic & MDIC_READY) == 0)
   6513 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   6514 		    device_xname(sc->sc_dev), phy, reg);
   6515 	else if (mdic & MDIC_E)
   6516 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   6517 		    device_xname(sc->sc_dev), phy, reg);
   6518 }
   6519 
   6520 /*
   6521  * wm_gmii_i80003_readreg:	[mii interface function]
   6522  *
   6523  *	Read a PHY register on the kumeran
   6524  * This could be handled by the PHY layer if we didn't have to lock the
   6525  * ressource ...
   6526  */
   6527 static int
   6528 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   6529 {
   6530 	struct wm_softc *sc = device_private(self);
   6531 	int sem;
   6532 	int rv;
   6533 
   6534 	if (phy != 1) /* only one PHY on kumeran bus */
   6535 		return 0;
   6536 
   6537 	sem = swfwphysem[sc->sc_funcid];
   6538 	if (wm_get_swfw_semaphore(sc, sem)) {
   6539 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6540 		    __func__);
   6541 		return 0;
   6542 	}
   6543 
   6544 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   6545 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   6546 		    reg >> GG82563_PAGE_SHIFT);
   6547 	} else {
   6548 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   6549 		    reg >> GG82563_PAGE_SHIFT);
   6550 	}
   6551 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   6552 	delay(200);
   6553 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   6554 	delay(200);
   6555 
   6556 	wm_put_swfw_semaphore(sc, sem);
   6557 	return rv;
   6558 }
   6559 
   6560 /*
   6561  * wm_gmii_i80003_writereg:	[mii interface function]
   6562  *
   6563  *	Write a PHY register on the kumeran.
   6564  * This could be handled by the PHY layer if we didn't have to lock the
   6565  * ressource ...
   6566  */
   6567 static void
   6568 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   6569 {
   6570 	struct wm_softc *sc = device_private(self);
   6571 	int sem;
   6572 
   6573 	if (phy != 1) /* only one PHY on kumeran bus */
   6574 		return;
   6575 
   6576 	sem = swfwphysem[sc->sc_funcid];
   6577 	if (wm_get_swfw_semaphore(sc, sem)) {
   6578 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6579 		    __func__);
   6580 		return;
   6581 	}
   6582 
   6583 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   6584 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   6585 		    reg >> GG82563_PAGE_SHIFT);
   6586 	} else {
   6587 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   6588 		    reg >> GG82563_PAGE_SHIFT);
   6589 	}
   6590 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   6591 	delay(200);
   6592 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   6593 	delay(200);
   6594 
   6595 	wm_put_swfw_semaphore(sc, sem);
   6596 }
   6597 
   6598 /*
   6599  * wm_gmii_bm_readreg:	[mii interface function]
   6600  *
   6601  *	Read a PHY register on the kumeran
   6602  * This could be handled by the PHY layer if we didn't have to lock the
   6603  * ressource ...
   6604  */
   6605 static int
   6606 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   6607 {
   6608 	struct wm_softc *sc = device_private(self);
   6609 	int sem;
   6610 	int rv;
   6611 
   6612 	sem = swfwphysem[sc->sc_funcid];
   6613 	if (wm_get_swfw_semaphore(sc, sem)) {
   6614 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6615 		    __func__);
   6616 		return 0;
   6617 	}
   6618 
   6619 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   6620 		if (phy == 1)
   6621 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   6622 			    reg);
   6623 		else
   6624 			wm_gmii_i82544_writereg(self, phy,
   6625 			    GG82563_PHY_PAGE_SELECT,
   6626 			    reg >> GG82563_PAGE_SHIFT);
   6627 	}
   6628 
   6629 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   6630 	wm_put_swfw_semaphore(sc, sem);
   6631 	return rv;
   6632 }
   6633 
   6634 /*
   6635  * wm_gmii_bm_writereg:	[mii interface function]
   6636  *
   6637  *	Write a PHY register on the kumeran.
   6638  * This could be handled by the PHY layer if we didn't have to lock the
   6639  * ressource ...
   6640  */
   6641 static void
   6642 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   6643 {
   6644 	struct wm_softc *sc = device_private(self);
   6645 	int sem;
   6646 
   6647 	sem = swfwphysem[sc->sc_funcid];
   6648 	if (wm_get_swfw_semaphore(sc, sem)) {
   6649 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6650 		    __func__);
   6651 		return;
   6652 	}
   6653 
   6654 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   6655 		if (phy == 1)
   6656 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   6657 			    reg);
   6658 		else
   6659 			wm_gmii_i82544_writereg(self, phy,
   6660 			    GG82563_PHY_PAGE_SELECT,
   6661 			    reg >> GG82563_PAGE_SHIFT);
   6662 	}
   6663 
   6664 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   6665 	wm_put_swfw_semaphore(sc, sem);
   6666 }
   6667 
   6668 static void
   6669 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   6670 {
   6671 	struct wm_softc *sc = device_private(self);
   6672 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   6673 	uint16_t wuce;
   6674 
   6675 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   6676 	if (sc->sc_type == WM_T_PCH) {
   6677 		/* XXX e1000 driver do nothing... why? */
   6678 	}
   6679 
   6680 	/* Set page 769 */
   6681 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6682 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   6683 
   6684 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   6685 
   6686 	wuce &= ~BM_WUC_HOST_WU_BIT;
   6687 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   6688 	    wuce | BM_WUC_ENABLE_BIT);
   6689 
   6690 	/* Select page 800 */
   6691 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6692 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   6693 
   6694 	/* Write page 800 */
   6695 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   6696 
   6697 	if (rd)
   6698 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   6699 	else
   6700 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   6701 
   6702 	/* Set page 769 */
   6703 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6704 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   6705 
   6706 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   6707 }
   6708 
   6709 /*
   6710  * wm_gmii_hv_readreg:	[mii interface function]
   6711  *
   6712  *	Read a PHY register on the kumeran
   6713  * This could be handled by the PHY layer if we didn't have to lock the
   6714  * ressource ...
   6715  */
   6716 static int
   6717 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   6718 {
   6719 	struct wm_softc *sc = device_private(self);
   6720 	uint16_t page = BM_PHY_REG_PAGE(reg);
   6721 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   6722 	uint16_t val;
   6723 	int rv;
   6724 
   6725 	if (wm_get_swfwhw_semaphore(sc)) {
   6726 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6727 		    __func__);
   6728 		return 0;
   6729 	}
   6730 
   6731 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   6732 	if (sc->sc_phytype == WMPHY_82577) {
   6733 		/* XXX must write */
   6734 	}
   6735 
   6736 	/* Page 800 works differently than the rest so it has its own func */
   6737 	if (page == BM_WUC_PAGE) {
   6738 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   6739 		return val;
   6740 	}
   6741 
   6742 	/*
   6743 	 * Lower than page 768 works differently than the rest so it has its
   6744 	 * own func
   6745 	 */
   6746 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   6747 		printf("gmii_hv_readreg!!!\n");
   6748 		return 0;
   6749 	}
   6750 
   6751 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   6752 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6753 		    page << BME1000_PAGE_SHIFT);
   6754 	}
   6755 
   6756 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   6757 	wm_put_swfwhw_semaphore(sc);
   6758 	return rv;
   6759 }
   6760 
   6761 /*
   6762  * wm_gmii_hv_writereg:	[mii interface function]
   6763  *
   6764  *	Write a PHY register on the kumeran.
   6765  * This could be handled by the PHY layer if we didn't have to lock the
   6766  * ressource ...
   6767  */
   6768 static void
   6769 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   6770 {
   6771 	struct wm_softc *sc = device_private(self);
   6772 	uint16_t page = BM_PHY_REG_PAGE(reg);
   6773 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   6774 
   6775 	if (wm_get_swfwhw_semaphore(sc)) {
   6776 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6777 		    __func__);
   6778 		return;
   6779 	}
   6780 
   6781 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   6782 
   6783 	/* Page 800 works differently than the rest so it has its own func */
   6784 	if (page == BM_WUC_PAGE) {
   6785 		uint16_t tmp;
   6786 
   6787 		tmp = val;
   6788 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   6789 		return;
   6790 	}
   6791 
   6792 	/*
   6793 	 * Lower than page 768 works differently than the rest so it has its
   6794 	 * own func
   6795 	 */
   6796 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   6797 		printf("gmii_hv_writereg!!!\n");
   6798 		return;
   6799 	}
   6800 
   6801 	/*
   6802 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   6803 	 * Power Down (whenever bit 11 of the PHY control register is set)
   6804 	 */
   6805 
   6806 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   6807 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6808 		    page << BME1000_PAGE_SHIFT);
   6809 	}
   6810 
   6811 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   6812 	wm_put_swfwhw_semaphore(sc);
   6813 }
   6814 
   6815 /*
   6816  * wm_gmii_82580_readreg:	[mii interface function]
   6817  *
   6818  *	Read a PHY register on the 82580 and I350.
   6819  * This could be handled by the PHY layer if we didn't have to lock the
   6820  * ressource ...
   6821  */
   6822 static int
   6823 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   6824 {
   6825 	struct wm_softc *sc = device_private(self);
   6826 	int sem;
   6827 	int rv;
   6828 
   6829 	sem = swfwphysem[sc->sc_funcid];
   6830 	if (wm_get_swfw_semaphore(sc, sem)) {
   6831 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6832 		    __func__);
   6833 		return 0;
   6834 	}
   6835 
   6836 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   6837 
   6838 	wm_put_swfw_semaphore(sc, sem);
   6839 	return rv;
   6840 }
   6841 
   6842 /*
   6843  * wm_gmii_82580_writereg:	[mii interface function]
   6844  *
   6845  *	Write a PHY register on the 82580 and I350.
   6846  * This could be handled by the PHY layer if we didn't have to lock the
   6847  * ressource ...
   6848  */
   6849 static void
   6850 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   6851 {
   6852 	struct wm_softc *sc = device_private(self);
   6853 	int sem;
   6854 
   6855 	sem = swfwphysem[sc->sc_funcid];
   6856 	if (wm_get_swfw_semaphore(sc, sem)) {
   6857 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6858 		    __func__);
   6859 		return;
   6860 	}
   6861 
   6862 	wm_gmii_i82544_writereg(self, phy, reg, val);
   6863 
   6864 	wm_put_swfw_semaphore(sc, sem);
   6865 }
   6866 
   6867 /*
   6868  * wm_gmii_statchg:	[mii interface function]
   6869  *
   6870  *	Callback from MII layer when media changes.
   6871  */
   6872 static void
   6873 wm_gmii_statchg(struct ifnet *ifp)
   6874 {
   6875 	struct wm_softc *sc = ifp->if_softc;
   6876 	struct mii_data *mii = &sc->sc_mii;
   6877 
   6878 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   6879 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   6880 	sc->sc_fcrtl &= ~FCRTL_XONE;
   6881 
   6882 	/*
   6883 	 * Get flow control negotiation result.
   6884 	 */
   6885 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   6886 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   6887 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   6888 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   6889 	}
   6890 
   6891 	if (sc->sc_flowflags & IFM_FLOW) {
   6892 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   6893 			sc->sc_ctrl |= CTRL_TFCE;
   6894 			sc->sc_fcrtl |= FCRTL_XONE;
   6895 		}
   6896 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   6897 			sc->sc_ctrl |= CTRL_RFCE;
   6898 	}
   6899 
   6900 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6901 		DPRINTF(WM_DEBUG_LINK,
   6902 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   6903 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6904 	} else {
   6905 		DPRINTF(WM_DEBUG_LINK,
   6906 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   6907 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   6908 	}
   6909 
   6910 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6911 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6912 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   6913 						 : WMREG_FCRTL, sc->sc_fcrtl);
   6914 	if (sc->sc_type == WM_T_80003) {
   6915 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   6916 		case IFM_1000_T:
   6917 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   6918 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   6919 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   6920 			break;
   6921 		default:
   6922 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   6923 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   6924 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   6925 			break;
   6926 		}
   6927 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6928 	}
   6929 }
   6930 
   6931 /*
   6932  * wm_kmrn_readreg:
   6933  *
   6934  *	Read a kumeran register
   6935  */
   6936 static int
   6937 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   6938 {
   6939 	int rv;
   6940 
   6941 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
   6942 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   6943 			aprint_error_dev(sc->sc_dev,
   6944 			    "%s: failed to get semaphore\n", __func__);
   6945 			return 0;
   6946 		}
   6947 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
   6948 		if (wm_get_swfwhw_semaphore(sc)) {
   6949 			aprint_error_dev(sc->sc_dev,
   6950 			    "%s: failed to get semaphore\n", __func__);
   6951 			return 0;
   6952 		}
   6953 	}
   6954 
   6955 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   6956 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   6957 	    KUMCTRLSTA_REN);
   6958 	CSR_WRITE_FLUSH(sc);
   6959 	delay(2);
   6960 
   6961 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   6962 
   6963 	if (sc->sc_flags == WM_F_LOCK_SWFW)
   6964 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   6965 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
   6966 		wm_put_swfwhw_semaphore(sc);
   6967 
   6968 	return rv;
   6969 }
   6970 
   6971 /*
   6972  * wm_kmrn_writereg:
   6973  *
   6974  *	Write a kumeran register
   6975  */
   6976 static void
   6977 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   6978 {
   6979 
   6980 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
   6981 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   6982 			aprint_error_dev(sc->sc_dev,
   6983 			    "%s: failed to get semaphore\n", __func__);
   6984 			return;
   6985 		}
   6986 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
   6987 		if (wm_get_swfwhw_semaphore(sc)) {
   6988 			aprint_error_dev(sc->sc_dev,
   6989 			    "%s: failed to get semaphore\n", __func__);
   6990 			return;
   6991 		}
   6992 	}
   6993 
   6994 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   6995 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   6996 	    (val & KUMCTRLSTA_MASK));
   6997 
   6998 	if (sc->sc_flags == WM_F_LOCK_SWFW)
   6999 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7000 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
   7001 		wm_put_swfwhw_semaphore(sc);
   7002 }
   7003 
   7004 /* SGMII related */
   7005 
   7006 /*
   7007  * wm_sgmii_uses_mdio
   7008  *
   7009  * Check whether the transaction is to the internal PHY or the external
   7010  * MDIO interface. Return true if it's MDIO.
   7011  */
   7012 static bool
   7013 wm_sgmii_uses_mdio(struct wm_softc *sc)
   7014 {
   7015 	uint32_t reg;
   7016 	bool ismdio = false;
   7017 
   7018 	switch (sc->sc_type) {
   7019 	case WM_T_82575:
   7020 	case WM_T_82576:
   7021 		reg = CSR_READ(sc, WMREG_MDIC);
   7022 		ismdio = ((reg & MDIC_DEST) != 0);
   7023 		break;
   7024 	case WM_T_82580:
   7025 	case WM_T_82580ER:
   7026 	case WM_T_I350:
   7027 	case WM_T_I354:
   7028 	case WM_T_I210:
   7029 	case WM_T_I211:
   7030 		reg = CSR_READ(sc, WMREG_MDICNFG);
   7031 		ismdio = ((reg & MDICNFG_DEST) != 0);
   7032 		break;
   7033 	default:
   7034 		break;
   7035 	}
   7036 
   7037 	return ismdio;
   7038 }
   7039 
   7040 /*
   7041  * wm_sgmii_readreg:	[mii interface function]
   7042  *
   7043  *	Read a PHY register on the SGMII
   7044  * This could be handled by the PHY layer if we didn't have to lock the
   7045  * ressource ...
   7046  */
   7047 static int
   7048 wm_sgmii_readreg(device_t self, int phy, int reg)
   7049 {
   7050 	struct wm_softc *sc = device_private(self);
   7051 	uint32_t i2ccmd;
   7052 	int i, rv;
   7053 
   7054 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7055 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7056 		    __func__);
   7057 		return 0;
   7058 	}
   7059 
   7060 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7061 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7062 	    | I2CCMD_OPCODE_READ;
   7063 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7064 
   7065 	/* Poll the ready bit */
   7066 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7067 		delay(50);
   7068 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7069 		if (i2ccmd & I2CCMD_READY)
   7070 			break;
   7071 	}
   7072 	if ((i2ccmd & I2CCMD_READY) == 0)
   7073 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   7074 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7075 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7076 
   7077 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   7078 
   7079 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7080 	return rv;
   7081 }
   7082 
   7083 /*
   7084  * wm_sgmii_writereg:	[mii interface function]
   7085  *
   7086  *	Write a PHY register on the SGMII.
   7087  * This could be handled by the PHY layer if we didn't have to lock the
   7088  * ressource ...
   7089  */
   7090 static void
   7091 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   7092 {
   7093 	struct wm_softc *sc = device_private(self);
   7094 	uint32_t i2ccmd;
   7095 	int i;
   7096 
   7097 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7098 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7099 		    __func__);
   7100 		return;
   7101 	}
   7102 
   7103 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7104 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7105 	    | I2CCMD_OPCODE_WRITE;
   7106 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7107 
   7108 	/* Poll the ready bit */
   7109 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7110 		delay(50);
   7111 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7112 		if (i2ccmd & I2CCMD_READY)
   7113 			break;
   7114 	}
   7115 	if ((i2ccmd & I2CCMD_READY) == 0)
   7116 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   7117 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7118 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7119 
   7120 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   7121 }
   7122 
   7123 /* TBI related */
   7124 
   7125 /* XXX Currently TBI only */
   7126 static int
   7127 wm_check_for_link(struct wm_softc *sc)
   7128 {
   7129 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7130 	uint32_t rxcw;
   7131 	uint32_t ctrl;
   7132 	uint32_t status;
   7133 	uint32_t sig;
   7134 
   7135 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
   7136 		sc->sc_tbi_linkup = 1;
   7137 		return 0;
   7138 	}
   7139 
   7140 	rxcw = CSR_READ(sc, WMREG_RXCW);
   7141 	ctrl = CSR_READ(sc, WMREG_CTRL);
   7142 	status = CSR_READ(sc, WMREG_STATUS);
   7143 
   7144 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   7145 
   7146 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   7147 		device_xname(sc->sc_dev), __func__,
   7148 		((ctrl & CTRL_SWDPIN(1)) == sig),
   7149 		((status & STATUS_LU) != 0),
   7150 		((rxcw & RXCW_C) != 0)
   7151 		    ));
   7152 
   7153 	/*
   7154 	 * SWDPIN   LU RXCW
   7155 	 *      0    0    0
   7156 	 *      0    0    1	(should not happen)
   7157 	 *      0    1    0	(should not happen)
   7158 	 *      0    1    1	(should not happen)
   7159 	 *      1    0    0	Disable autonego and force linkup
   7160 	 *      1    0    1	got /C/ but not linkup yet
   7161 	 *      1    1    0	(linkup)
   7162 	 *      1    1    1	If IFM_AUTO, back to autonego
   7163 	 *
   7164 	 */
   7165 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   7166 	    && ((status & STATUS_LU) == 0)
   7167 	    && ((rxcw & RXCW_C) == 0)) {
   7168 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   7169 			__func__));
   7170 		sc->sc_tbi_linkup = 0;
   7171 		/* Disable auto-negotiation in the TXCW register */
   7172 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   7173 
   7174 		/*
   7175 		 * Force link-up and also force full-duplex.
   7176 		 *
   7177 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   7178 		 * so we should update sc->sc_ctrl
   7179 		 */
   7180 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   7181 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7182 	} else if (((status & STATUS_LU) != 0)
   7183 	    && ((rxcw & RXCW_C) != 0)
   7184 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   7185 		sc->sc_tbi_linkup = 1;
   7186 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   7187 			__func__));
   7188 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7189 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   7190 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   7191 	    && ((rxcw & RXCW_C) != 0)) {
   7192 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   7193 	} else {
   7194 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   7195 			status));
   7196 	}
   7197 
   7198 	return 0;
   7199 }
   7200 
   7201 /*
   7202  * wm_tbi_mediainit:
   7203  *
   7204  *	Initialize media for use on 1000BASE-X devices.
   7205  */
   7206 static void
   7207 wm_tbi_mediainit(struct wm_softc *sc)
   7208 {
   7209 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7210 	const char *sep = "";
   7211 
   7212 	if (sc->sc_type < WM_T_82543)
   7213 		sc->sc_tipg = TIPG_WM_DFLT;
   7214 	else
   7215 		sc->sc_tipg = TIPG_LG_DFLT;
   7216 
   7217 	sc->sc_tbi_anegticks = 5;
   7218 
   7219 	/* Initialize our media structures */
   7220 	sc->sc_mii.mii_ifp = ifp;
   7221 
   7222 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   7223 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
   7224 	    wm_tbi_mediastatus);
   7225 
   7226 	/*
   7227 	 * SWD Pins:
   7228 	 *
   7229 	 *	0 = Link LED (output)
   7230 	 *	1 = Loss Of Signal (input)
   7231 	 */
   7232 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   7233 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   7234 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
   7235 		sc->sc_ctrl &= ~CTRL_LRST;
   7236 
   7237 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7238 
   7239 #define	ADD(ss, mm, dd)							\
   7240 do {									\
   7241 	aprint_normal("%s%s", sep, ss);					\
   7242 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   7243 	sep = ", ";							\
   7244 } while (/*CONSTCOND*/0)
   7245 
   7246 	aprint_normal_dev(sc->sc_dev, "");
   7247 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   7248 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   7249 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   7250 	aprint_normal("\n");
   7251 
   7252 #undef ADD
   7253 
   7254 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   7255 }
   7256 
   7257 /*
   7258  * wm_tbi_mediastatus:	[ifmedia interface function]
   7259  *
   7260  *	Get the current interface media status on a 1000BASE-X device.
   7261  */
   7262 static void
   7263 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   7264 {
   7265 	struct wm_softc *sc = ifp->if_softc;
   7266 	uint32_t ctrl, status;
   7267 
   7268 	ifmr->ifm_status = IFM_AVALID;
   7269 	ifmr->ifm_active = IFM_ETHER;
   7270 
   7271 	status = CSR_READ(sc, WMREG_STATUS);
   7272 	if ((status & STATUS_LU) == 0) {
   7273 		ifmr->ifm_active |= IFM_NONE;
   7274 		return;
   7275 	}
   7276 
   7277 	ifmr->ifm_status |= IFM_ACTIVE;
   7278 	ifmr->ifm_active |= IFM_1000_SX;
   7279 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   7280 		ifmr->ifm_active |= IFM_FDX;
   7281 	else
   7282 		ifmr->ifm_active |= IFM_HDX;
   7283 	ctrl = CSR_READ(sc, WMREG_CTRL);
   7284 	if (ctrl & CTRL_RFCE)
   7285 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   7286 	if (ctrl & CTRL_TFCE)
   7287 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   7288 }
   7289 
   7290 /*
   7291  * wm_tbi_mediachange:	[ifmedia interface function]
   7292  *
   7293  *	Set hardware to newly-selected media on a 1000BASE-X device.
   7294  */
   7295 static int
   7296 wm_tbi_mediachange(struct ifnet *ifp)
   7297 {
   7298 	struct wm_softc *sc = ifp->if_softc;
   7299 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7300 	uint32_t status;
   7301 	int i;
   7302 
   7303 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
   7304 		return 0;
   7305 
   7306 	sc->sc_txcw = 0;
   7307 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
   7308 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   7309 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   7310 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7311 		sc->sc_txcw |= TXCW_ANE;
   7312 	} else {
   7313 		/*
   7314 		 * If autonegotiation is turned off, force link up and turn on
   7315 		 * full duplex
   7316 		 */
   7317 		sc->sc_txcw &= ~TXCW_ANE;
   7318 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
   7319 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   7320 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7321 		CSR_WRITE_FLUSH(sc);
   7322 		delay(1000);
   7323 	}
   7324 
   7325 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   7326 		    device_xname(sc->sc_dev),sc->sc_txcw));
   7327 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7328 	CSR_WRITE_FLUSH(sc);
   7329 	delay(10000);
   7330 
   7331 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   7332 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   7333 
   7334 	/*
   7335 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   7336 	 * optics detect a signal, 0 if they don't.
   7337 	 */
   7338 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   7339 		/* Have signal; wait for the link to come up. */
   7340 
   7341 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7342 			/*
   7343 			 * Reset the link, and let autonegotiation do its thing
   7344 			 */
   7345 			sc->sc_ctrl |= CTRL_LRST;
   7346 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7347 			CSR_WRITE_FLUSH(sc);
   7348 			delay(1000);
   7349 			sc->sc_ctrl &= ~CTRL_LRST;
   7350 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7351 			CSR_WRITE_FLUSH(sc);
   7352 			delay(1000);
   7353 		}
   7354 
   7355 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   7356 			delay(10000);
   7357 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   7358 				break;
   7359 		}
   7360 
   7361 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   7362 			    device_xname(sc->sc_dev),i));
   7363 
   7364 		status = CSR_READ(sc, WMREG_STATUS);
   7365 		DPRINTF(WM_DEBUG_LINK,
   7366 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   7367 			device_xname(sc->sc_dev),status, STATUS_LU));
   7368 		if (status & STATUS_LU) {
   7369 			/* Link is up. */
   7370 			DPRINTF(WM_DEBUG_LINK,
   7371 			    ("%s: LINK: set media -> link up %s\n",
   7372 			    device_xname(sc->sc_dev),
   7373 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7374 
   7375 			/*
   7376 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7377 			 * so we should update sc->sc_ctrl
   7378 			 */
   7379 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7380 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7381 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7382 			if (status & STATUS_FD)
   7383 				sc->sc_tctl |=
   7384 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7385 			else
   7386 				sc->sc_tctl |=
   7387 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7388 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   7389 				sc->sc_fcrtl |= FCRTL_XONE;
   7390 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7391 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7392 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7393 				      sc->sc_fcrtl);
   7394 			sc->sc_tbi_linkup = 1;
   7395 		} else {
   7396 			if (i == WM_LINKUP_TIMEOUT)
   7397 				wm_check_for_link(sc);
   7398 			/* Link is down. */
   7399 			DPRINTF(WM_DEBUG_LINK,
   7400 			    ("%s: LINK: set media -> link down\n",
   7401 			    device_xname(sc->sc_dev)));
   7402 			sc->sc_tbi_linkup = 0;
   7403 		}
   7404 	} else {
   7405 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   7406 		    device_xname(sc->sc_dev)));
   7407 		sc->sc_tbi_linkup = 0;
   7408 	}
   7409 
   7410 	wm_tbi_set_linkled(sc);
   7411 
   7412 	return 0;
   7413 }
   7414 
   7415 /*
   7416  * wm_tbi_set_linkled:
   7417  *
   7418  *	Update the link LED on 1000BASE-X devices.
   7419  */
   7420 static void
   7421 wm_tbi_set_linkled(struct wm_softc *sc)
   7422 {
   7423 
   7424 	if (sc->sc_tbi_linkup)
   7425 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7426 	else
   7427 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7428 
   7429 	/* 82540 or newer devices are active low */
   7430 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7431 
   7432 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7433 }
   7434 
   7435 /*
   7436  * wm_tbi_check_link:
   7437  *
   7438  *	Check the link on 1000BASE-X devices.
   7439  */
   7440 static void
   7441 wm_tbi_check_link(struct wm_softc *sc)
   7442 {
   7443 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7444 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7445 	uint32_t status;
   7446 
   7447 	KASSERT(WM_LOCKED(sc));
   7448 
   7449 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
   7450 		sc->sc_tbi_linkup = 1;
   7451 		return;
   7452 	}
   7453 
   7454 	status = CSR_READ(sc, WMREG_STATUS);
   7455 
   7456 	/* XXX is this needed? */
   7457 	(void)CSR_READ(sc, WMREG_RXCW);
   7458 	(void)CSR_READ(sc, WMREG_CTRL);
   7459 
   7460 	/* set link status */
   7461 	if ((status & STATUS_LU) == 0) {
   7462 		DPRINTF(WM_DEBUG_LINK,
   7463 		    ("%s: LINK: checklink -> down\n",
   7464 			device_xname(sc->sc_dev)));
   7465 		sc->sc_tbi_linkup = 0;
   7466 	} else if (sc->sc_tbi_linkup == 0) {
   7467 		DPRINTF(WM_DEBUG_LINK,
   7468 		    ("%s: LINK: checklink -> up %s\n",
   7469 			device_xname(sc->sc_dev),
   7470 			(status & STATUS_FD) ? "FDX" : "HDX"));
   7471 		sc->sc_tbi_linkup = 1;
   7472 	}
   7473 
   7474 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
   7475 	    && ((status & STATUS_LU) == 0)) {
   7476 		sc->sc_tbi_linkup = 0;
   7477 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
   7478 			/* RXCFG storm! */
   7479 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
   7480 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
   7481 			wm_init_locked(ifp);
   7482 			WM_UNLOCK(sc);
   7483 			ifp->if_start(ifp);
   7484 			WM_LOCK(sc);
   7485 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7486 			/* If the timer expired, retry autonegotiation */
   7487 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
   7488 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   7489 				sc->sc_tbi_ticks = 0;
   7490 				/*
   7491 				 * Reset the link, and let autonegotiation do
   7492 				 * its thing
   7493 				 */
   7494 				sc->sc_ctrl |= CTRL_LRST;
   7495 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7496 				CSR_WRITE_FLUSH(sc);
   7497 				delay(1000);
   7498 				sc->sc_ctrl &= ~CTRL_LRST;
   7499 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7500 				CSR_WRITE_FLUSH(sc);
   7501 				delay(1000);
   7502 				CSR_WRITE(sc, WMREG_TXCW,
   7503 				    sc->sc_txcw & ~TXCW_ANE);
   7504 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7505 			}
   7506 		}
   7507 	}
   7508 
   7509 	wm_tbi_set_linkled(sc);
   7510 }
   7511 
   7512 /*
   7513  * NVM related.
   7514  * Microwire, SPI (w/wo EERD) and Flash.
   7515  */
   7516 
   7517 /* Both spi and uwire */
   7518 
   7519 /*
   7520  * wm_eeprom_sendbits:
   7521  *
   7522  *	Send a series of bits to the EEPROM.
   7523  */
   7524 static void
   7525 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   7526 {
   7527 	uint32_t reg;
   7528 	int x;
   7529 
   7530 	reg = CSR_READ(sc, WMREG_EECD);
   7531 
   7532 	for (x = nbits; x > 0; x--) {
   7533 		if (bits & (1U << (x - 1)))
   7534 			reg |= EECD_DI;
   7535 		else
   7536 			reg &= ~EECD_DI;
   7537 		CSR_WRITE(sc, WMREG_EECD, reg);
   7538 		CSR_WRITE_FLUSH(sc);
   7539 		delay(2);
   7540 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   7541 		CSR_WRITE_FLUSH(sc);
   7542 		delay(2);
   7543 		CSR_WRITE(sc, WMREG_EECD, reg);
   7544 		CSR_WRITE_FLUSH(sc);
   7545 		delay(2);
   7546 	}
   7547 }
   7548 
   7549 /*
   7550  * wm_eeprom_recvbits:
   7551  *
   7552  *	Receive a series of bits from the EEPROM.
   7553  */
   7554 static void
   7555 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   7556 {
   7557 	uint32_t reg, val;
   7558 	int x;
   7559 
   7560 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   7561 
   7562 	val = 0;
   7563 	for (x = nbits; x > 0; x--) {
   7564 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   7565 		CSR_WRITE_FLUSH(sc);
   7566 		delay(2);
   7567 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   7568 			val |= (1U << (x - 1));
   7569 		CSR_WRITE(sc, WMREG_EECD, reg);
   7570 		CSR_WRITE_FLUSH(sc);
   7571 		delay(2);
   7572 	}
   7573 	*valp = val;
   7574 }
   7575 
   7576 /* Microwire */
   7577 
   7578 /*
   7579  * wm_nvm_read_uwire:
   7580  *
   7581  *	Read a word from the EEPROM using the MicroWire protocol.
   7582  */
   7583 static int
   7584 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   7585 {
   7586 	uint32_t reg, val;
   7587 	int i;
   7588 
   7589 	for (i = 0; i < wordcnt; i++) {
   7590 		/* Clear SK and DI. */
   7591 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   7592 		CSR_WRITE(sc, WMREG_EECD, reg);
   7593 
   7594 		/*
   7595 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   7596 		 * and Xen.
   7597 		 *
   7598 		 * We use this workaround only for 82540 because qemu's
   7599 		 * e1000 act as 82540.
   7600 		 */
   7601 		if (sc->sc_type == WM_T_82540) {
   7602 			reg |= EECD_SK;
   7603 			CSR_WRITE(sc, WMREG_EECD, reg);
   7604 			reg &= ~EECD_SK;
   7605 			CSR_WRITE(sc, WMREG_EECD, reg);
   7606 			CSR_WRITE_FLUSH(sc);
   7607 			delay(2);
   7608 		}
   7609 		/* XXX: end of workaround */
   7610 
   7611 		/* Set CHIP SELECT. */
   7612 		reg |= EECD_CS;
   7613 		CSR_WRITE(sc, WMREG_EECD, reg);
   7614 		CSR_WRITE_FLUSH(sc);
   7615 		delay(2);
   7616 
   7617 		/* Shift in the READ command. */
   7618 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   7619 
   7620 		/* Shift in address. */
   7621 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
   7622 
   7623 		/* Shift out the data. */
   7624 		wm_eeprom_recvbits(sc, &val, 16);
   7625 		data[i] = val & 0xffff;
   7626 
   7627 		/* Clear CHIP SELECT. */
   7628 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   7629 		CSR_WRITE(sc, WMREG_EECD, reg);
   7630 		CSR_WRITE_FLUSH(sc);
   7631 		delay(2);
   7632 	}
   7633 
   7634 	return 0;
   7635 }
   7636 
   7637 /* SPI */
   7638 
   7639 /* Set SPI related information */
   7640 static void
   7641 wm_set_spiaddrbits(struct wm_softc *sc)
   7642 {
   7643 	uint32_t reg;
   7644 
   7645 	sc->sc_flags |= WM_F_EEPROM_SPI;
   7646 	reg = CSR_READ(sc, WMREG_EECD);
   7647 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   7648 }
   7649 
   7650 /*
   7651  * wm_nvm_ready_spi:
   7652  *
   7653  *	Wait for a SPI EEPROM to be ready for commands.
   7654  */
   7655 static int
   7656 wm_nvm_ready_spi(struct wm_softc *sc)
   7657 {
   7658 	uint32_t val;
   7659 	int usec;
   7660 
   7661 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   7662 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   7663 		wm_eeprom_recvbits(sc, &val, 8);
   7664 		if ((val & SPI_SR_RDY) == 0)
   7665 			break;
   7666 	}
   7667 	if (usec >= SPI_MAX_RETRIES) {
   7668 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   7669 		return 1;
   7670 	}
   7671 	return 0;
   7672 }
   7673 
   7674 /*
   7675  * wm_nvm_read_spi:
   7676  *
   7677  *	Read a work from the EEPROM using the SPI protocol.
   7678  */
   7679 static int
   7680 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   7681 {
   7682 	uint32_t reg, val;
   7683 	int i;
   7684 	uint8_t opc;
   7685 
   7686 	/* Clear SK and CS. */
   7687 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   7688 	CSR_WRITE(sc, WMREG_EECD, reg);
   7689 	CSR_WRITE_FLUSH(sc);
   7690 	delay(2);
   7691 
   7692 	if (wm_nvm_ready_spi(sc))
   7693 		return 1;
   7694 
   7695 	/* Toggle CS to flush commands. */
   7696 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   7697 	CSR_WRITE_FLUSH(sc);
   7698 	delay(2);
   7699 	CSR_WRITE(sc, WMREG_EECD, reg);
   7700 	CSR_WRITE_FLUSH(sc);
   7701 	delay(2);
   7702 
   7703 	opc = SPI_OPC_READ;
   7704 	if (sc->sc_ee_addrbits == 8 && word >= 128)
   7705 		opc |= SPI_OPC_A8;
   7706 
   7707 	wm_eeprom_sendbits(sc, opc, 8);
   7708 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
   7709 
   7710 	for (i = 0; i < wordcnt; i++) {
   7711 		wm_eeprom_recvbits(sc, &val, 16);
   7712 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   7713 	}
   7714 
   7715 	/* Raise CS and clear SK. */
   7716 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   7717 	CSR_WRITE(sc, WMREG_EECD, reg);
   7718 	CSR_WRITE_FLUSH(sc);
   7719 	delay(2);
   7720 
   7721 	return 0;
   7722 }
   7723 
   7724 /* Using with EERD */
   7725 
   7726 static int
   7727 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   7728 {
   7729 	uint32_t attempts = 100000;
   7730 	uint32_t i, reg = 0;
   7731 	int32_t done = -1;
   7732 
   7733 	for (i = 0; i < attempts; i++) {
   7734 		reg = CSR_READ(sc, rw);
   7735 
   7736 		if (reg & EERD_DONE) {
   7737 			done = 0;
   7738 			break;
   7739 		}
   7740 		delay(5);
   7741 	}
   7742 
   7743 	return done;
   7744 }
   7745 
   7746 static int
   7747 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   7748     uint16_t *data)
   7749 {
   7750 	int i, eerd = 0;
   7751 	int error = 0;
   7752 
   7753 	for (i = 0; i < wordcnt; i++) {
   7754 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   7755 
   7756 		CSR_WRITE(sc, WMREG_EERD, eerd);
   7757 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   7758 		if (error != 0)
   7759 			break;
   7760 
   7761 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   7762 	}
   7763 
   7764 	return error;
   7765 }
   7766 
   7767 /* Flash */
   7768 
   7769 static int
   7770 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   7771 {
   7772 	uint32_t eecd;
   7773 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   7774 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   7775 	uint8_t sig_byte = 0;
   7776 
   7777 	switch (sc->sc_type) {
   7778 	case WM_T_ICH8:
   7779 	case WM_T_ICH9:
   7780 		eecd = CSR_READ(sc, WMREG_EECD);
   7781 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   7782 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   7783 			return 0;
   7784 		}
   7785 		/* FALLTHROUGH */
   7786 	default:
   7787 		/* Default to 0 */
   7788 		*bank = 0;
   7789 
   7790 		/* Check bank 0 */
   7791 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   7792 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   7793 			*bank = 0;
   7794 			return 0;
   7795 		}
   7796 
   7797 		/* Check bank 1 */
   7798 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   7799 		    &sig_byte);
   7800 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   7801 			*bank = 1;
   7802 			return 0;
   7803 		}
   7804 	}
   7805 
   7806 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   7807 		device_xname(sc->sc_dev)));
   7808 	return -1;
   7809 }
   7810 
   7811 /******************************************************************************
   7812  * This function does initial flash setup so that a new read/write/erase cycle
   7813  * can be started.
   7814  *
   7815  * sc - The pointer to the hw structure
   7816  ****************************************************************************/
   7817 static int32_t
   7818 wm_ich8_cycle_init(struct wm_softc *sc)
   7819 {
   7820 	uint16_t hsfsts;
   7821 	int32_t error = 1;
   7822 	int32_t i     = 0;
   7823 
   7824 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7825 
   7826 	/* May be check the Flash Des Valid bit in Hw status */
   7827 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   7828 		return error;
   7829 	}
   7830 
   7831 	/* Clear FCERR in Hw status by writing 1 */
   7832 	/* Clear DAEL in Hw status by writing a 1 */
   7833 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   7834 
   7835 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   7836 
   7837 	/*
   7838 	 * Either we should have a hardware SPI cycle in progress bit to check
   7839 	 * against, in order to start a new cycle or FDONE bit should be
   7840 	 * changed in the hardware so that it is 1 after harware reset, which
   7841 	 * can then be used as an indication whether a cycle is in progress or
   7842 	 * has been completed .. we should also have some software semaphore
   7843 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   7844 	 * threads access to those bits can be sequentiallized or a way so that
   7845 	 * 2 threads dont start the cycle at the same time
   7846 	 */
   7847 
   7848 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   7849 		/*
   7850 		 * There is no cycle running at present, so we can start a
   7851 		 * cycle
   7852 		 */
   7853 
   7854 		/* Begin by setting Flash Cycle Done. */
   7855 		hsfsts |= HSFSTS_DONE;
   7856 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   7857 		error = 0;
   7858 	} else {
   7859 		/*
   7860 		 * otherwise poll for sometime so the current cycle has a
   7861 		 * chance to end before giving up.
   7862 		 */
   7863 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   7864 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7865 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   7866 				error = 0;
   7867 				break;
   7868 			}
   7869 			delay(1);
   7870 		}
   7871 		if (error == 0) {
   7872 			/*
   7873 			 * Successful in waiting for previous cycle to timeout,
   7874 			 * now set the Flash Cycle Done.
   7875 			 */
   7876 			hsfsts |= HSFSTS_DONE;
   7877 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   7878 		}
   7879 	}
   7880 	return error;
   7881 }
   7882 
   7883 /******************************************************************************
   7884  * This function starts a flash cycle and waits for its completion
   7885  *
   7886  * sc - The pointer to the hw structure
   7887  ****************************************************************************/
   7888 static int32_t
   7889 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   7890 {
   7891 	uint16_t hsflctl;
   7892 	uint16_t hsfsts;
   7893 	int32_t error = 1;
   7894 	uint32_t i = 0;
   7895 
   7896 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   7897 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   7898 	hsflctl |= HSFCTL_GO;
   7899 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   7900 
   7901 	/* Wait till FDONE bit is set to 1 */
   7902 	do {
   7903 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7904 		if (hsfsts & HSFSTS_DONE)
   7905 			break;
   7906 		delay(1);
   7907 		i++;
   7908 	} while (i < timeout);
   7909 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   7910 		error = 0;
   7911 
   7912 	return error;
   7913 }
   7914 
   7915 /******************************************************************************
   7916  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   7917  *
   7918  * sc - The pointer to the hw structure
   7919  * index - The index of the byte or word to read.
   7920  * size - Size of data to read, 1=byte 2=word
   7921  * data - Pointer to the word to store the value read.
   7922  *****************************************************************************/
   7923 static int32_t
   7924 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   7925     uint32_t size, uint16_t *data)
   7926 {
   7927 	uint16_t hsfsts;
   7928 	uint16_t hsflctl;
   7929 	uint32_t flash_linear_address;
   7930 	uint32_t flash_data = 0;
   7931 	int32_t error = 1;
   7932 	int32_t count = 0;
   7933 
   7934 	if (size < 1  || size > 2 || data == 0x0 ||
   7935 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   7936 		return error;
   7937 
   7938 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   7939 	    sc->sc_ich8_flash_base;
   7940 
   7941 	do {
   7942 		delay(1);
   7943 		/* Steps */
   7944 		error = wm_ich8_cycle_init(sc);
   7945 		if (error)
   7946 			break;
   7947 
   7948 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   7949 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   7950 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   7951 		    & HSFCTL_BCOUNT_MASK;
   7952 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   7953 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   7954 
   7955 		/*
   7956 		 * Write the last 24 bits of index into Flash Linear address
   7957 		 * field in Flash Address
   7958 		 */
   7959 		/* TODO: TBD maybe check the index against the size of flash */
   7960 
   7961 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   7962 
   7963 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   7964 
   7965 		/*
   7966 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   7967 		 * the whole sequence a few more times, else read in (shift in)
   7968 		 * the Flash Data0, the order is least significant byte first
   7969 		 * msb to lsb
   7970 		 */
   7971 		if (error == 0) {
   7972 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   7973 			if (size == 1)
   7974 				*data = (uint8_t)(flash_data & 0x000000FF);
   7975 			else if (size == 2)
   7976 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   7977 			break;
   7978 		} else {
   7979 			/*
   7980 			 * If we've gotten here, then things are probably
   7981 			 * completely hosed, but if the error condition is
   7982 			 * detected, it won't hurt to give it another try...
   7983 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   7984 			 */
   7985 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7986 			if (hsfsts & HSFSTS_ERR) {
   7987 				/* Repeat for some time before giving up. */
   7988 				continue;
   7989 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   7990 				break;
   7991 		}
   7992 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   7993 
   7994 	return error;
   7995 }
   7996 
   7997 /******************************************************************************
   7998  * Reads a single byte from the NVM using the ICH8 flash access registers.
   7999  *
   8000  * sc - pointer to wm_hw structure
   8001  * index - The index of the byte to read.
   8002  * data - Pointer to a byte to store the value read.
   8003  *****************************************************************************/
   8004 static int32_t
   8005 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   8006 {
   8007 	int32_t status;
   8008 	uint16_t word = 0;
   8009 
   8010 	status = wm_read_ich8_data(sc, index, 1, &word);
   8011 	if (status == 0)
   8012 		*data = (uint8_t)word;
   8013 	else
   8014 		*data = 0;
   8015 
   8016 	return status;
   8017 }
   8018 
   8019 /******************************************************************************
   8020  * Reads a word from the NVM using the ICH8 flash access registers.
   8021  *
   8022  * sc - pointer to wm_hw structure
   8023  * index - The starting byte index of the word to read.
   8024  * data - Pointer to a word to store the value read.
   8025  *****************************************************************************/
   8026 static int32_t
   8027 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   8028 {
   8029 	int32_t status;
   8030 
   8031 	status = wm_read_ich8_data(sc, index, 2, data);
   8032 	return status;
   8033 }
   8034 
   8035 /******************************************************************************
   8036  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   8037  * register.
   8038  *
   8039  * sc - Struct containing variables accessed by shared code
   8040  * offset - offset of word in the EEPROM to read
   8041  * data - word read from the EEPROM
   8042  * words - number of words to read
   8043  *****************************************************************************/
   8044 static int
   8045 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   8046 {
   8047 	int32_t  error = 0;
   8048 	uint32_t flash_bank = 0;
   8049 	uint32_t act_offset = 0;
   8050 	uint32_t bank_offset = 0;
   8051 	uint16_t word = 0;
   8052 	uint16_t i = 0;
   8053 
   8054 	/*
   8055 	 * We need to know which is the valid flash bank.  In the event
   8056 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   8057 	 * managing flash_bank.  So it cannot be trusted and needs
   8058 	 * to be updated with each read.
   8059 	 */
   8060 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   8061 	if (error) {
   8062 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
   8063 		    __func__);
   8064 		flash_bank = 0;
   8065 	}
   8066 
   8067 	/*
   8068 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   8069 	 * size
   8070 	 */
   8071 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   8072 
   8073 	error = wm_get_swfwhw_semaphore(sc);
   8074 	if (error) {
   8075 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8076 		    __func__);
   8077 		return error;
   8078 	}
   8079 
   8080 	for (i = 0; i < words; i++) {
   8081 		/* The NVM part needs a byte offset, hence * 2 */
   8082 		act_offset = bank_offset + ((offset + i) * 2);
   8083 		error = wm_read_ich8_word(sc, act_offset, &word);
   8084 		if (error) {
   8085 			aprint_error_dev(sc->sc_dev,
   8086 			    "%s: failed to read NVM\n", __func__);
   8087 			break;
   8088 		}
   8089 		data[i] = word;
   8090 	}
   8091 
   8092 	wm_put_swfwhw_semaphore(sc);
   8093 	return error;
   8094 }
   8095 
   8096 /* Lock, detecting NVM type, validate checksum and read */
   8097 
   8098 /*
   8099  * wm_nvm_acquire:
   8100  *
   8101  *	Perform the EEPROM handshake required on some chips.
   8102  */
   8103 static int
   8104 wm_nvm_acquire(struct wm_softc *sc)
   8105 {
   8106 	uint32_t reg;
   8107 	int x;
   8108 	int ret = 0;
   8109 
   8110 	/* always success */
   8111 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   8112 		return 0;
   8113 
   8114 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8115 		ret = wm_get_swfwhw_semaphore(sc);
   8116 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8117 		/* This will also do wm_get_swsm_semaphore() if needed */
   8118 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   8119 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8120 		ret = wm_get_swsm_semaphore(sc);
   8121 	}
   8122 
   8123 	if (ret) {
   8124 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8125 			__func__);
   8126 		return 1;
   8127 	}
   8128 
   8129 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   8130 		reg = CSR_READ(sc, WMREG_EECD);
   8131 
   8132 		/* Request EEPROM access. */
   8133 		reg |= EECD_EE_REQ;
   8134 		CSR_WRITE(sc, WMREG_EECD, reg);
   8135 
   8136 		/* ..and wait for it to be granted. */
   8137 		for (x = 0; x < 1000; x++) {
   8138 			reg = CSR_READ(sc, WMREG_EECD);
   8139 			if (reg & EECD_EE_GNT)
   8140 				break;
   8141 			delay(5);
   8142 		}
   8143 		if ((reg & EECD_EE_GNT) == 0) {
   8144 			aprint_error_dev(sc->sc_dev,
   8145 			    "could not acquire EEPROM GNT\n");
   8146 			reg &= ~EECD_EE_REQ;
   8147 			CSR_WRITE(sc, WMREG_EECD, reg);
   8148 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8149 				wm_put_swfwhw_semaphore(sc);
   8150 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   8151 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   8152 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   8153 				wm_put_swsm_semaphore(sc);
   8154 			return 1;
   8155 		}
   8156 	}
   8157 
   8158 	return 0;
   8159 }
   8160 
   8161 /*
   8162  * wm_nvm_release:
   8163  *
   8164  *	Release the EEPROM mutex.
   8165  */
   8166 static void
   8167 wm_nvm_release(struct wm_softc *sc)
   8168 {
   8169 	uint32_t reg;
   8170 
   8171 	/* always success */
   8172 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   8173 		return;
   8174 
   8175 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   8176 		reg = CSR_READ(sc, WMREG_EECD);
   8177 		reg &= ~EECD_EE_REQ;
   8178 		CSR_WRITE(sc, WMREG_EECD, reg);
   8179 	}
   8180 
   8181 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8182 		wm_put_swfwhw_semaphore(sc);
   8183 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8184 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   8185 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   8186 		wm_put_swsm_semaphore(sc);
   8187 }
   8188 
   8189 static int
   8190 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   8191 {
   8192 	uint32_t eecd = 0;
   8193 
   8194 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   8195 	    || sc->sc_type == WM_T_82583) {
   8196 		eecd = CSR_READ(sc, WMREG_EECD);
   8197 
   8198 		/* Isolate bits 15 & 16 */
   8199 		eecd = ((eecd >> 15) & 0x03);
   8200 
   8201 		/* If both bits are set, device is Flash type */
   8202 		if (eecd == 0x03)
   8203 			return 0;
   8204 	}
   8205 	return 1;
   8206 }
   8207 
   8208 #define NVM_CHECKSUM			0xBABA
   8209 #define EEPROM_SIZE			0x0040
   8210 #define NVM_COMPAT			0x0003
   8211 #define NVM_COMPAT_VALID_CHECKSUM	0x0001
   8212 #define NVM_FUTURE_INIT_WORD1			0x0019
   8213 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM	0x0040
   8214 
   8215 /*
   8216  * wm_nvm_validate_checksum
   8217  *
   8218  * The checksum is defined as the sum of the first 64 (16 bit) words.
   8219  */
   8220 static int
   8221 wm_nvm_validate_checksum(struct wm_softc *sc)
   8222 {
   8223 	uint16_t checksum;
   8224 	uint16_t eeprom_data;
   8225 #ifdef WM_DEBUG
   8226 	uint16_t csum_wordaddr, valid_checksum;
   8227 #endif
   8228 	int i;
   8229 
   8230 	checksum = 0;
   8231 
   8232 	/* Don't check for I211 */
   8233 	if (sc->sc_type == WM_T_I211)
   8234 		return 0;
   8235 
   8236 #ifdef WM_DEBUG
   8237 	if (sc->sc_type == WM_T_PCH_LPT) {
   8238 		csum_wordaddr = NVM_COMPAT;
   8239 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   8240 	} else {
   8241 		csum_wordaddr = NVM_FUTURE_INIT_WORD1;
   8242 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   8243 	}
   8244 
   8245 	/* Dump EEPROM image for debug */
   8246 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   8247 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   8248 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   8249 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   8250 		if ((eeprom_data & valid_checksum) == 0) {
   8251 			DPRINTF(WM_DEBUG_NVM,
   8252 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   8253 				device_xname(sc->sc_dev), eeprom_data,
   8254 				    valid_checksum));
   8255 		}
   8256 	}
   8257 
   8258 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   8259 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   8260 		for (i = 0; i < EEPROM_SIZE; i++) {
   8261 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   8262 				printf("XX ");
   8263 			else
   8264 				printf("%04x ", eeprom_data);
   8265 			if (i % 8 == 7)
   8266 				printf("\n");
   8267 		}
   8268 	}
   8269 
   8270 #endif /* WM_DEBUG */
   8271 
   8272 	for (i = 0; i < EEPROM_SIZE; i++) {
   8273 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   8274 			return 1;
   8275 		checksum += eeprom_data;
   8276 	}
   8277 
   8278 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   8279 #ifdef WM_DEBUG
   8280 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   8281 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   8282 #endif
   8283 	}
   8284 
   8285 	return 0;
   8286 }
   8287 
   8288 /*
   8289  * wm_nvm_read:
   8290  *
   8291  *	Read data from the serial EEPROM.
   8292  */
   8293 static int
   8294 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   8295 {
   8296 	int rv;
   8297 
   8298 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   8299 		return 1;
   8300 
   8301 	if (wm_nvm_acquire(sc))
   8302 		return 1;
   8303 
   8304 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   8305 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   8306 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   8307 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   8308 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   8309 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   8310 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   8311 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   8312 	else
   8313 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   8314 
   8315 	wm_nvm_release(sc);
   8316 	return rv;
   8317 }
   8318 
   8319 /*
   8320  * Hardware semaphores.
   8321  * Very complexed...
   8322  */
   8323 
   8324 static int
   8325 wm_get_swsm_semaphore(struct wm_softc *sc)
   8326 {
   8327 	int32_t timeout;
   8328 	uint32_t swsm;
   8329 
   8330 	/* Get the SW semaphore. */
   8331 	timeout = 1000 + 1; /* XXX */
   8332 	while (timeout) {
   8333 		swsm = CSR_READ(sc, WMREG_SWSM);
   8334 
   8335 		if ((swsm & SWSM_SMBI) == 0)
   8336 			break;
   8337 
   8338 		delay(50);
   8339 		timeout--;
   8340 	}
   8341 
   8342 	if (timeout == 0) {
   8343 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   8344 		return 1;
   8345 	}
   8346 
   8347 	/* Get the FW semaphore. */
   8348 	timeout = 1000 + 1; /* XXX */
   8349 	while (timeout) {
   8350 		swsm = CSR_READ(sc, WMREG_SWSM);
   8351 		swsm |= SWSM_SWESMBI;
   8352 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   8353 		/* If we managed to set the bit we got the semaphore. */
   8354 		swsm = CSR_READ(sc, WMREG_SWSM);
   8355 		if (swsm & SWSM_SWESMBI)
   8356 			break;
   8357 
   8358 		delay(50);
   8359 		timeout--;
   8360 	}
   8361 
   8362 	if (timeout == 0) {
   8363 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   8364 		/* Release semaphores */
   8365 		wm_put_swsm_semaphore(sc);
   8366 		return 1;
   8367 	}
   8368 	return 0;
   8369 }
   8370 
   8371 static void
   8372 wm_put_swsm_semaphore(struct wm_softc *sc)
   8373 {
   8374 	uint32_t swsm;
   8375 
   8376 	swsm = CSR_READ(sc, WMREG_SWSM);
   8377 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   8378 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   8379 }
   8380 
   8381 static int
   8382 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   8383 {
   8384 	uint32_t swfw_sync;
   8385 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   8386 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   8387 	int timeout = 200;
   8388 
   8389 	for (timeout = 0; timeout < 200; timeout++) {
   8390 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8391 			if (wm_get_swsm_semaphore(sc)) {
   8392 				aprint_error_dev(sc->sc_dev,
   8393 				    "%s: failed to get semaphore\n",
   8394 				    __func__);
   8395 				return 1;
   8396 			}
   8397 		}
   8398 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   8399 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   8400 			swfw_sync |= swmask;
   8401 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   8402 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   8403 				wm_put_swsm_semaphore(sc);
   8404 			return 0;
   8405 		}
   8406 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   8407 			wm_put_swsm_semaphore(sc);
   8408 		delay(5000);
   8409 	}
   8410 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   8411 	    device_xname(sc->sc_dev), mask, swfw_sync);
   8412 	return 1;
   8413 }
   8414 
   8415 static void
   8416 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   8417 {
   8418 	uint32_t swfw_sync;
   8419 
   8420 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8421 		while (wm_get_swsm_semaphore(sc) != 0)
   8422 			continue;
   8423 	}
   8424 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   8425 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   8426 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   8427 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   8428 		wm_put_swsm_semaphore(sc);
   8429 }
   8430 
   8431 static int
   8432 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   8433 {
   8434 	uint32_t ext_ctrl;
   8435 	int timeout = 200;
   8436 
   8437 	for (timeout = 0; timeout < 200; timeout++) {
   8438 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   8439 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
   8440 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   8441 
   8442 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   8443 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
   8444 			return 0;
   8445 		delay(5000);
   8446 	}
   8447 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   8448 	    device_xname(sc->sc_dev), ext_ctrl);
   8449 	return 1;
   8450 }
   8451 
   8452 static void
   8453 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   8454 {
   8455 	uint32_t ext_ctrl;
   8456 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   8457 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
   8458 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   8459 }
   8460 
   8461 static int
   8462 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   8463 {
   8464 	int i = 0;
   8465 	uint32_t reg;
   8466 
   8467 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8468 	do {
   8469 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   8470 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   8471 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8472 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   8473 			break;
   8474 		delay(2*1000);
   8475 		i++;
   8476 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   8477 
   8478 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   8479 		wm_put_hw_semaphore_82573(sc);
   8480 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   8481 		    device_xname(sc->sc_dev));
   8482 		return -1;
   8483 	}
   8484 
   8485 	return 0;
   8486 }
   8487 
   8488 static void
   8489 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   8490 {
   8491 	uint32_t reg;
   8492 
   8493 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8494 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   8495 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   8496 }
   8497 
   8498 /*
   8499  * Management mode and power management related subroutines.
   8500  * BMC, AMT, suspend/resume and EEE.
   8501  */
   8502 
   8503 static int
   8504 wm_check_mng_mode(struct wm_softc *sc)
   8505 {
   8506 	int rv;
   8507 
   8508 	switch (sc->sc_type) {
   8509 	case WM_T_ICH8:
   8510 	case WM_T_ICH9:
   8511 	case WM_T_ICH10:
   8512 	case WM_T_PCH:
   8513 	case WM_T_PCH2:
   8514 	case WM_T_PCH_LPT:
   8515 		rv = wm_check_mng_mode_ich8lan(sc);
   8516 		break;
   8517 	case WM_T_82574:
   8518 	case WM_T_82583:
   8519 		rv = wm_check_mng_mode_82574(sc);
   8520 		break;
   8521 	case WM_T_82571:
   8522 	case WM_T_82572:
   8523 	case WM_T_82573:
   8524 	case WM_T_80003:
   8525 		rv = wm_check_mng_mode_generic(sc);
   8526 		break;
   8527 	default:
   8528 		/* noting to do */
   8529 		rv = 0;
   8530 		break;
   8531 	}
   8532 
   8533 	return rv;
   8534 }
   8535 
   8536 static int
   8537 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   8538 {
   8539 	uint32_t fwsm;
   8540 
   8541 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8542 
   8543 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   8544 		return 1;
   8545 
   8546 	return 0;
   8547 }
   8548 
   8549 static int
   8550 wm_check_mng_mode_82574(struct wm_softc *sc)
   8551 {
   8552 	uint16_t data;
   8553 
   8554 	wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
   8555 
   8556 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
   8557 		return 1;
   8558 
   8559 	return 0;
   8560 }
   8561 
   8562 static int
   8563 wm_check_mng_mode_generic(struct wm_softc *sc)
   8564 {
   8565 	uint32_t fwsm;
   8566 
   8567 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8568 
   8569 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   8570 		return 1;
   8571 
   8572 	return 0;
   8573 }
   8574 
   8575 static int
   8576 wm_enable_mng_pass_thru(struct wm_softc *sc)
   8577 {
   8578 	uint32_t manc, fwsm, factps;
   8579 
   8580 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   8581 		return 0;
   8582 
   8583 	manc = CSR_READ(sc, WMREG_MANC);
   8584 
   8585 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   8586 		device_xname(sc->sc_dev), manc));
   8587 	if ((manc & MANC_RECV_TCO_EN) == 0)
   8588 		return 0;
   8589 
   8590 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   8591 		fwsm = CSR_READ(sc, WMREG_FWSM);
   8592 		factps = CSR_READ(sc, WMREG_FACTPS);
   8593 		if (((factps & FACTPS_MNGCG) == 0)
   8594 		    && ((fwsm & FWSM_MODE_MASK)
   8595 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   8596 			return 1;
   8597 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   8598 		uint16_t data;
   8599 
   8600 		factps = CSR_READ(sc, WMREG_FACTPS);
   8601 		wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
   8602 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   8603 			device_xname(sc->sc_dev), factps, data));
   8604 		if (((factps & FACTPS_MNGCG) == 0)
   8605 		    && ((data & EEPROM_CFG2_MNGM_MASK)
   8606 			== (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
   8607 			return 1;
   8608 	} else if (((manc & MANC_SMBUS_EN) != 0)
   8609 	    && ((manc & MANC_ASF_EN) == 0))
   8610 		return 1;
   8611 
   8612 	return 0;
   8613 }
   8614 
   8615 static int
   8616 wm_check_reset_block(struct wm_softc *sc)
   8617 {
   8618 	uint32_t reg;
   8619 
   8620 	switch (sc->sc_type) {
   8621 	case WM_T_ICH8:
   8622 	case WM_T_ICH9:
   8623 	case WM_T_ICH10:
   8624 	case WM_T_PCH:
   8625 	case WM_T_PCH2:
   8626 	case WM_T_PCH_LPT:
   8627 		reg = CSR_READ(sc, WMREG_FWSM);
   8628 		if ((reg & FWSM_RSPCIPHY) != 0)
   8629 			return 0;
   8630 		else
   8631 			return -1;
   8632 		break;
   8633 	case WM_T_82571:
   8634 	case WM_T_82572:
   8635 	case WM_T_82573:
   8636 	case WM_T_82574:
   8637 	case WM_T_82583:
   8638 	case WM_T_80003:
   8639 		reg = CSR_READ(sc, WMREG_MANC);
   8640 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   8641 			return -1;
   8642 		else
   8643 			return 0;
   8644 		break;
   8645 	default:
   8646 		/* no problem */
   8647 		break;
   8648 	}
   8649 
   8650 	return 0;
   8651 }
   8652 
   8653 static void
   8654 wm_get_hw_control(struct wm_softc *sc)
   8655 {
   8656 	uint32_t reg;
   8657 
   8658 	switch (sc->sc_type) {
   8659 	case WM_T_82573:
   8660 		reg = CSR_READ(sc, WMREG_SWSM);
   8661 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   8662 		break;
   8663 	case WM_T_82571:
   8664 	case WM_T_82572:
   8665 	case WM_T_82574:
   8666 	case WM_T_82583:
   8667 	case WM_T_80003:
   8668 	case WM_T_ICH8:
   8669 	case WM_T_ICH9:
   8670 	case WM_T_ICH10:
   8671 	case WM_T_PCH:
   8672 	case WM_T_PCH2:
   8673 	case WM_T_PCH_LPT:
   8674 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8675 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   8676 		break;
   8677 	default:
   8678 		break;
   8679 	}
   8680 }
   8681 
   8682 static void
   8683 wm_release_hw_control(struct wm_softc *sc)
   8684 {
   8685 	uint32_t reg;
   8686 
   8687 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   8688 		return;
   8689 
   8690 	if (sc->sc_type == WM_T_82573) {
   8691 		reg = CSR_READ(sc, WMREG_SWSM);
   8692 		reg &= ~SWSM_DRV_LOAD;
   8693 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   8694 	} else {
   8695 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8696 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   8697 	}
   8698 }
   8699 
   8700 static void
   8701 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   8702 {
   8703 	uint32_t reg;
   8704 
   8705 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8706 
   8707 	if (on != 0)
   8708 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   8709 	else
   8710 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   8711 
   8712 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   8713 }
   8714 
   8715 static void
   8716 wm_smbustopci(struct wm_softc *sc)
   8717 {
   8718 	uint32_t fwsm;
   8719 
   8720 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8721 	if (((fwsm & FWSM_FW_VALID) == 0)
   8722 	    && ((wm_check_reset_block(sc) == 0))) {
   8723 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   8724 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   8725 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8726 		CSR_WRITE_FLUSH(sc);
   8727 		delay(10);
   8728 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   8729 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8730 		CSR_WRITE_FLUSH(sc);
   8731 		delay(50*1000);
   8732 
   8733 		/*
   8734 		 * Gate automatic PHY configuration by hardware on non-managed
   8735 		 * 82579
   8736 		 */
   8737 		if (sc->sc_type == WM_T_PCH2)
   8738 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   8739 	}
   8740 }
   8741 
   8742 static void
   8743 wm_init_manageability(struct wm_softc *sc)
   8744 {
   8745 
   8746 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   8747 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   8748 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   8749 
   8750 		/* Disable hardware interception of ARP */
   8751 		manc &= ~MANC_ARP_EN;
   8752 
   8753 		/* Enable receiving management packets to the host */
   8754 		if (sc->sc_type >= WM_T_82571) {
   8755 			manc |= MANC_EN_MNG2HOST;
   8756 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   8757 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   8758 
   8759 		}
   8760 
   8761 		CSR_WRITE(sc, WMREG_MANC, manc);
   8762 	}
   8763 }
   8764 
   8765 static void
   8766 wm_release_manageability(struct wm_softc *sc)
   8767 {
   8768 
   8769 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   8770 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   8771 
   8772 		manc |= MANC_ARP_EN;
   8773 		if (sc->sc_type >= WM_T_82571)
   8774 			manc &= ~MANC_EN_MNG2HOST;
   8775 
   8776 		CSR_WRITE(sc, WMREG_MANC, manc);
   8777 	}
   8778 }
   8779 
   8780 static void
   8781 wm_get_wakeup(struct wm_softc *sc)
   8782 {
   8783 
   8784 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   8785 	switch (sc->sc_type) {
   8786 	case WM_T_82573:
   8787 	case WM_T_82583:
   8788 		sc->sc_flags |= WM_F_HAS_AMT;
   8789 		/* FALLTHROUGH */
   8790 	case WM_T_80003:
   8791 	case WM_T_82541:
   8792 	case WM_T_82547:
   8793 	case WM_T_82571:
   8794 	case WM_T_82572:
   8795 	case WM_T_82574:
   8796 	case WM_T_82575:
   8797 	case WM_T_82576:
   8798 	case WM_T_82580:
   8799 	case WM_T_82580ER:
   8800 	case WM_T_I350:
   8801 	case WM_T_I354:
   8802 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   8803 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   8804 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   8805 		break;
   8806 	case WM_T_ICH8:
   8807 	case WM_T_ICH9:
   8808 	case WM_T_ICH10:
   8809 	case WM_T_PCH:
   8810 	case WM_T_PCH2:
   8811 	case WM_T_PCH_LPT:
   8812 		sc->sc_flags |= WM_F_HAS_AMT;
   8813 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   8814 		break;
   8815 	default:
   8816 		break;
   8817 	}
   8818 
   8819 	/* 1: HAS_MANAGE */
   8820 	if (wm_enable_mng_pass_thru(sc) != 0)
   8821 		sc->sc_flags |= WM_F_HAS_MANAGE;
   8822 
   8823 #ifdef WM_DEBUG
   8824 	printf("\n");
   8825 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   8826 		printf("HAS_AMT,");
   8827 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   8828 		printf("ARC_SUBSYS_VALID,");
   8829 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   8830 		printf("ASF_FIRMWARE_PRES,");
   8831 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   8832 		printf("HAS_MANAGE,");
   8833 	printf("\n");
   8834 #endif
   8835 	/*
   8836 	 * Note that the WOL flags is set after the resetting of the eeprom
   8837 	 * stuff
   8838 	 */
   8839 }
   8840 
   8841 #ifdef WM_WOL
   8842 /* WOL in the newer chipset interfaces (pchlan) */
   8843 static void
   8844 wm_enable_phy_wakeup(struct wm_softc *sc)
   8845 {
   8846 #if 0
   8847 	uint16_t preg;
   8848 
   8849 	/* Copy MAC RARs to PHY RARs */
   8850 
   8851 	/* Copy MAC MTA to PHY MTA */
   8852 
   8853 	/* Configure PHY Rx Control register */
   8854 
   8855 	/* Enable PHY wakeup in MAC register */
   8856 
   8857 	/* Configure and enable PHY wakeup in PHY registers */
   8858 
   8859 	/* Activate PHY wakeup */
   8860 
   8861 	/* XXX */
   8862 #endif
   8863 }
   8864 
   8865 /* Power down workaround on D3 */
   8866 static void
   8867 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   8868 {
   8869 	uint32_t reg;
   8870 	int i;
   8871 
   8872 	for (i = 0; i < 2; i++) {
   8873 		/* Disable link */
   8874 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   8875 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   8876 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   8877 
   8878 		/*
   8879 		 * Call gig speed drop workaround on Gig disable before
   8880 		 * accessing any PHY registers
   8881 		 */
   8882 		if (sc->sc_type == WM_T_ICH8)
   8883 			wm_gig_downshift_workaround_ich8lan(sc);
   8884 
   8885 		/* Write VR power-down enable */
   8886 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   8887 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   8888 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   8889 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   8890 
   8891 		/* Read it back and test */
   8892 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   8893 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   8894 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   8895 			break;
   8896 
   8897 		/* Issue PHY reset and repeat at most one more time */
   8898 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8899 	}
   8900 }
   8901 
   8902 static void
   8903 wm_enable_wakeup(struct wm_softc *sc)
   8904 {
   8905 	uint32_t reg, pmreg;
   8906 	pcireg_t pmode;
   8907 
   8908 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   8909 		&pmreg, NULL) == 0)
   8910 		return;
   8911 
   8912 	/* Advertise the wakeup capability */
   8913 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   8914 	    | CTRL_SWDPIN(3));
   8915 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   8916 
   8917 	/* ICH workaround */
   8918 	switch (sc->sc_type) {
   8919 	case WM_T_ICH8:
   8920 	case WM_T_ICH9:
   8921 	case WM_T_ICH10:
   8922 	case WM_T_PCH:
   8923 	case WM_T_PCH2:
   8924 	case WM_T_PCH_LPT:
   8925 		/* Disable gig during WOL */
   8926 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   8927 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   8928 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   8929 		if (sc->sc_type == WM_T_PCH)
   8930 			wm_gmii_reset(sc);
   8931 
   8932 		/* Power down workaround */
   8933 		if (sc->sc_phytype == WMPHY_82577) {
   8934 			struct mii_softc *child;
   8935 
   8936 			/* Assume that the PHY is copper */
   8937 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   8938 			if (child->mii_mpd_rev <= 2)
   8939 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   8940 				    (768 << 5) | 25, 0x0444); /* magic num */
   8941 		}
   8942 		break;
   8943 	default:
   8944 		break;
   8945 	}
   8946 
   8947 	/* Keep the laser running on fiber adapters */
   8948 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
   8949 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
   8950 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8951 		reg |= CTRL_EXT_SWDPIN(3);
   8952 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8953 	}
   8954 
   8955 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   8956 #if 0	/* for the multicast packet */
   8957 	reg |= WUFC_MC;
   8958 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   8959 #endif
   8960 
   8961 	if (sc->sc_type == WM_T_PCH) {
   8962 		wm_enable_phy_wakeup(sc);
   8963 	} else {
   8964 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   8965 		CSR_WRITE(sc, WMREG_WUFC, reg);
   8966 	}
   8967 
   8968 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   8969 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   8970 		|| (sc->sc_type == WM_T_PCH2))
   8971 		    && (sc->sc_phytype == WMPHY_IGP_3))
   8972 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   8973 
   8974 	/* Request PME */
   8975 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   8976 #if 0
   8977 	/* Disable WOL */
   8978 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   8979 #else
   8980 	/* For WOL */
   8981 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   8982 #endif
   8983 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   8984 }
   8985 #endif /* WM_WOL */
   8986 
   8987 /* EEE */
   8988 
   8989 static void
   8990 wm_set_eee_i350(struct wm_softc *sc)
   8991 {
   8992 	uint32_t ipcnfg, eeer;
   8993 
   8994 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   8995 	eeer = CSR_READ(sc, WMREG_EEER);
   8996 
   8997 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   8998 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   8999 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   9000 		    | EEER_LPI_FC);
   9001 	} else {
   9002 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   9003 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   9004 		    | EEER_LPI_FC);
   9005 	}
   9006 
   9007 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   9008 	CSR_WRITE(sc, WMREG_EEER, eeer);
   9009 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   9010 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   9011 }
   9012 
   9013 /*
   9014  * Workarounds (mainly PHY related).
   9015  * Basically, PHY's workarounds are in the PHY drivers.
   9016  */
   9017 
   9018 /* Work-around for 82566 Kumeran PCS lock loss */
   9019 static void
   9020 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   9021 {
   9022 	int miistatus, active, i;
   9023 	int reg;
   9024 
   9025 	miistatus = sc->sc_mii.mii_media_status;
   9026 
   9027 	/* If the link is not up, do nothing */
   9028 	if ((miistatus & IFM_ACTIVE) != 0)
   9029 		return;
   9030 
   9031 	active = sc->sc_mii.mii_media_active;
   9032 
   9033 	/* Nothing to do if the link is other than 1Gbps */
   9034 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   9035 		return;
   9036 
   9037 	for (i = 0; i < 10; i++) {
   9038 		/* read twice */
   9039 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   9040 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   9041 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
   9042 			goto out;	/* GOOD! */
   9043 
   9044 		/* Reset the PHY */
   9045 		wm_gmii_reset(sc);
   9046 		delay(5*1000);
   9047 	}
   9048 
   9049 	/* Disable GigE link negotiation */
   9050 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   9051 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   9052 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   9053 
   9054 	/*
   9055 	 * Call gig speed drop workaround on Gig disable before accessing
   9056 	 * any PHY registers.
   9057 	 */
   9058 	wm_gig_downshift_workaround_ich8lan(sc);
   9059 
   9060 out:
   9061 	return;
   9062 }
   9063 
   9064 /* WOL from S5 stops working */
   9065 static void
   9066 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   9067 {
   9068 	uint16_t kmrn_reg;
   9069 
   9070 	/* Only for igp3 */
   9071 	if (sc->sc_phytype == WMPHY_IGP_3) {
   9072 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   9073 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   9074 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   9075 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   9076 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   9077 	}
   9078 }
   9079 
   9080 /*
   9081  * Workaround for pch's PHYs
   9082  * XXX should be moved to new PHY driver?
   9083  */
   9084 static void
   9085 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   9086 {
   9087 	if (sc->sc_phytype == WMPHY_82577)
   9088 		wm_set_mdio_slow_mode_hv(sc);
   9089 
   9090 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   9091 
   9092 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   9093 
   9094 	/* 82578 */
   9095 	if (sc->sc_phytype == WMPHY_82578) {
   9096 		/* PCH rev. < 3 */
   9097 		if (sc->sc_rev < 3) {
   9098 			/* XXX 6 bit shift? Why? Is it page2? */
   9099 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   9100 			    0x66c0);
   9101 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   9102 			    0xffff);
   9103 		}
   9104 
   9105 		/* XXX phy rev. < 2 */
   9106 	}
   9107 
   9108 	/* Select page 0 */
   9109 
   9110 	/* XXX acquire semaphore */
   9111 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   9112 	/* XXX release semaphore */
   9113 
   9114 	/*
   9115 	 * Configure the K1 Si workaround during phy reset assuming there is
   9116 	 * link so that it disables K1 if link is in 1Gbps.
   9117 	 */
   9118 	wm_k1_gig_workaround_hv(sc, 1);
   9119 }
   9120 
   9121 static void
   9122 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   9123 {
   9124 
   9125 	wm_set_mdio_slow_mode_hv(sc);
   9126 }
   9127 
   9128 static void
   9129 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   9130 {
   9131 	int k1_enable = sc->sc_nvm_k1_enabled;
   9132 
   9133 	/* XXX acquire semaphore */
   9134 
   9135 	if (link) {
   9136 		k1_enable = 0;
   9137 
   9138 		/* Link stall fix for link up */
   9139 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   9140 	} else {
   9141 		/* Link stall fix for link down */
   9142 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   9143 	}
   9144 
   9145 	wm_configure_k1_ich8lan(sc, k1_enable);
   9146 
   9147 	/* XXX release semaphore */
   9148 }
   9149 
   9150 static void
   9151 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   9152 {
   9153 	uint32_t reg;
   9154 
   9155 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   9156 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   9157 	    reg | HV_KMRN_MDIO_SLOW);
   9158 }
   9159 
   9160 static void
   9161 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   9162 {
   9163 	uint32_t ctrl, ctrl_ext, tmp;
   9164 	uint16_t kmrn_reg;
   9165 
   9166 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   9167 
   9168 	if (k1_enable)
   9169 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   9170 	else
   9171 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   9172 
   9173 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   9174 
   9175 	delay(20);
   9176 
   9177 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9178 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9179 
   9180 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   9181 	tmp |= CTRL_FRCSPD;
   9182 
   9183 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   9184 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   9185 	CSR_WRITE_FLUSH(sc);
   9186 	delay(20);
   9187 
   9188 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   9189 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9190 	CSR_WRITE_FLUSH(sc);
   9191 	delay(20);
   9192 }
   9193 
   9194 /* special case - for 82575 - need to do manual init ... */
   9195 static void
   9196 wm_reset_init_script_82575(struct wm_softc *sc)
   9197 {
   9198 	/*
   9199 	 * remark: this is untested code - we have no board without EEPROM
   9200 	 *  same setup as mentioned int the freeBSD driver for the i82575
   9201 	 */
   9202 
   9203 	/* SerDes configuration via SERDESCTRL */
   9204 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   9205 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   9206 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   9207 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   9208 
   9209 	/* CCM configuration via CCMCTL register */
   9210 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   9211 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   9212 
   9213 	/* PCIe lanes configuration */
   9214 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   9215 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   9216 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   9217 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   9218 
   9219 	/* PCIe PLL Configuration */
   9220 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   9221 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   9222 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   9223 }
   9224