Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.285
      1 /*	$NetBSD: if_wm.c,v 1.285 2014/07/31 03:50:09 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Rework how parameters are loaded from the EEPROM.
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.285 2014/07/31 03:50:09 msaitoh Exp $");
     80 
     81 #include <sys/param.h>
     82 #include <sys/systm.h>
     83 #include <sys/callout.h>
     84 #include <sys/mbuf.h>
     85 #include <sys/malloc.h>
     86 #include <sys/kernel.h>
     87 #include <sys/socket.h>
     88 #include <sys/ioctl.h>
     89 #include <sys/errno.h>
     90 #include <sys/device.h>
     91 #include <sys/queue.h>
     92 #include <sys/syslog.h>
     93 
     94 #include <sys/rnd.h>
     95 
     96 #include <net/if.h>
     97 #include <net/if_dl.h>
     98 #include <net/if_media.h>
     99 #include <net/if_ether.h>
    100 
    101 #include <net/bpf.h>
    102 
    103 #include <netinet/in.h>			/* XXX for struct ip */
    104 #include <netinet/in_systm.h>		/* XXX for struct ip */
    105 #include <netinet/ip.h>			/* XXX for struct ip */
    106 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    107 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    108 
    109 #include <sys/bus.h>
    110 #include <sys/intr.h>
    111 #include <machine/endian.h>
    112 
    113 #include <dev/mii/mii.h>
    114 #include <dev/mii/miivar.h>
    115 #include <dev/mii/miidevs.h>
    116 #include <dev/mii/mii_bitbang.h>
    117 #include <dev/mii/ikphyreg.h>
    118 #include <dev/mii/igphyreg.h>
    119 #include <dev/mii/igphyvar.h>
    120 #include <dev/mii/inbmphyreg.h>
    121 
    122 #include <dev/pci/pcireg.h>
    123 #include <dev/pci/pcivar.h>
    124 #include <dev/pci/pcidevs.h>
    125 
    126 #include <dev/pci/if_wmreg.h>
    127 #include <dev/pci/if_wmvar.h>
    128 
    129 #ifdef WM_DEBUG
    130 #define	WM_DEBUG_LINK		0x01
    131 #define	WM_DEBUG_TX		0x02
    132 #define	WM_DEBUG_RX		0x04
    133 #define	WM_DEBUG_GMII		0x08
    134 #define	WM_DEBUG_MANAGE		0x10
    135 #define	WM_DEBUG_NVM		0x20
    136 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    137     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    138 
    139 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    140 #else
    141 #define	DPRINTF(x, y)	/* nothing */
    142 #endif /* WM_DEBUG */
    143 
    144 #ifdef NET_MPSAFE
    145 #define WM_MPSAFE	1
    146 #endif
    147 
    148 /*
    149  * Transmit descriptor list size.  Due to errata, we can only have
    150  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    151  * on >= 82544.  We tell the upper layers that they can queue a lot
    152  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    153  * of them at a time.
    154  *
    155  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    156  * chains containing many small mbufs have been observed in zero-copy
    157  * situations with jumbo frames.
    158  */
    159 #define	WM_NTXSEGS		256
    160 #define	WM_IFQUEUELEN		256
    161 #define	WM_TXQUEUELEN_MAX	64
    162 #define	WM_TXQUEUELEN_MAX_82547	16
    163 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
    164 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
    165 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
    166 #define	WM_NTXDESC_82542	256
    167 #define	WM_NTXDESC_82544	4096
    168 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
    169 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
    170 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
    171 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
    172 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
    173 
    174 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    175 
    176 /*
    177  * Receive descriptor list size.  We have one Rx buffer for normal
    178  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    179  * packet.  We allocate 256 receive descriptors, each with a 2k
    180  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    181  */
    182 #define	WM_NRXDESC		256
    183 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    184 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    185 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    186 
    187 /*
    188  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    189  * a single clump that maps to a single DMA segment to make several things
    190  * easier.
    191  */
    192 struct wm_control_data_82544 {
    193 	/*
    194 	 * The receive descriptors.
    195 	 */
    196 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    197 
    198 	/*
    199 	 * The transmit descriptors.  Put these at the end, because
    200 	 * we might use a smaller number of them.
    201 	 */
    202 	union {
    203 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
    204 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
    205 	} wdc_u;
    206 };
    207 
    208 struct wm_control_data_82542 {
    209 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    210 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
    211 };
    212 
    213 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
    214 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
    215 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    216 
    217 /*
    218  * Software state for transmit jobs.
    219  */
    220 struct wm_txsoft {
    221 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    222 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    223 	int txs_firstdesc;		/* first descriptor in packet */
    224 	int txs_lastdesc;		/* last descriptor in packet */
    225 	int txs_ndesc;			/* # of descriptors used */
    226 };
    227 
    228 /*
    229  * Software state for receive buffers.  Each descriptor gets a
    230  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    231  * more than one buffer, we chain them together.
    232  */
    233 struct wm_rxsoft {
    234 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    235 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    236 };
    237 
    238 #define WM_LINKUP_TIMEOUT	50
    239 
    240 static uint16_t swfwphysem[] = {
    241 	SWFW_PHY0_SM,
    242 	SWFW_PHY1_SM,
    243 	SWFW_PHY2_SM,
    244 	SWFW_PHY3_SM
    245 };
    246 
    247 /*
    248  * Software state per device.
    249  */
    250 struct wm_softc {
    251 	device_t sc_dev;		/* generic device information */
    252 	bus_space_tag_t sc_st;		/* bus space tag */
    253 	bus_space_handle_t sc_sh;	/* bus space handle */
    254 	bus_size_t sc_ss;		/* bus space size */
    255 	bus_space_tag_t sc_iot;		/* I/O space tag */
    256 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    257 	bus_size_t sc_ios;		/* I/O space size */
    258 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    259 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    260 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    261 
    262 	struct ethercom sc_ethercom;	/* ethernet common data */
    263 	struct mii_data sc_mii;		/* MII/media information */
    264 
    265 	pci_chipset_tag_t sc_pc;
    266 	pcitag_t sc_pcitag;
    267 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    268 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    269 
    270 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
    271 	wm_chip_type sc_type;		/* MAC type */
    272 	int sc_rev;			/* MAC revision */
    273 	wm_phy_type sc_phytype;		/* PHY type */
    274 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    275 	int sc_flags;			/* flags; see below */
    276 	int sc_if_flags;		/* last if_flags */
    277 	int sc_flowflags;		/* 802.3x flow control flags */
    278 	int sc_align_tweak;
    279 
    280 	void *sc_ih;			/* interrupt cookie */
    281 	callout_t sc_tick_ch;		/* tick callout */
    282 	bool sc_stopping;
    283 
    284 	int sc_ee_addrbits;		/* EEPROM address bits */
    285 	int sc_ich8_flash_base;
    286 	int sc_ich8_flash_bank_size;
    287 	int sc_nvm_k1_enabled;
    288 
    289 	/* Software state for the transmit and receive descriptors. */
    290 	int sc_txnum;			/* must be a power of two */
    291 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
    292 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    293 
    294 	/* Control data structures. */
    295 	int sc_ntxdesc;			/* must be a power of two */
    296 	struct wm_control_data_82544 *sc_control_data;
    297 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    298 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
    299 	int sc_cd_rseg;			/* real number of control segment */
    300 	size_t sc_cd_size;		/* control data size */
    301 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    302 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
    303 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
    304 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    305 
    306 #ifdef WM_EVENT_COUNTERS
    307 	/* Event counters. */
    308 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    309 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    310 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    311 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    312 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    313 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    314 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    315 
    316 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    317 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    318 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    319 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    320 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    321 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    322 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    323 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    324 
    325 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    326 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    327 
    328 	struct evcnt sc_ev_tu;		/* Tx underrun */
    329 
    330 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    331 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    332 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    333 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    334 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    335 #endif /* WM_EVENT_COUNTERS */
    336 
    337 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    338 
    339 	int	sc_txfree;		/* number of free Tx descriptors */
    340 	int	sc_txnext;		/* next ready Tx descriptor */
    341 
    342 	int	sc_txsfree;		/* number of free Tx jobs */
    343 	int	sc_txsnext;		/* next free Tx job */
    344 	int	sc_txsdirty;		/* dirty Tx jobs */
    345 
    346 	/* These 5 variables are used only on the 82547. */
    347 	int	sc_txfifo_size;		/* Tx FIFO size */
    348 	int	sc_txfifo_head;		/* current head of FIFO */
    349 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
    350 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
    351 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    352 
    353 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    354 
    355 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    356 	int	sc_rxdiscard;
    357 	int	sc_rxlen;
    358 	struct mbuf *sc_rxhead;
    359 	struct mbuf *sc_rxtail;
    360 	struct mbuf **sc_rxtailp;
    361 
    362 	uint32_t sc_ctrl;		/* prototype CTRL register */
    363 #if 0
    364 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    365 #endif
    366 	uint32_t sc_icr;		/* prototype interrupt bits */
    367 	uint32_t sc_itr;		/* prototype intr throttling reg */
    368 	uint32_t sc_tctl;		/* prototype TCTL register */
    369 	uint32_t sc_rctl;		/* prototype RCTL register */
    370 	uint32_t sc_txcw;		/* prototype TXCW register */
    371 	uint32_t sc_tipg;		/* prototype TIPG register */
    372 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    373 	uint32_t sc_pba;		/* prototype PBA register */
    374 
    375 	int sc_tbi_linkup;		/* TBI link status */
    376 	int sc_tbi_anegticks;		/* autonegotiation ticks */
    377 	int sc_tbi_ticks;		/* tbi ticks */
    378 
    379 	int sc_mchash_type;		/* multicast filter offset */
    380 
    381 	krndsource_t rnd_source;	/* random source */
    382 
    383 	kmutex_t *sc_tx_lock;		/* lock for tx operations */
    384 	kmutex_t *sc_rx_lock;		/* lock for rx operations */
    385 };
    386 
    387 #define WM_TX_LOCK(_sc)		if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
    388 #define WM_TX_UNLOCK(_sc)	if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
    389 #define WM_TX_LOCKED(_sc)	(!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
    390 #define WM_RX_LOCK(_sc)		if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
    391 #define WM_RX_UNLOCK(_sc)	if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
    392 #define WM_RX_LOCKED(_sc)	(!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
    393 #define WM_BOTH_LOCK(_sc)	do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
    394 #define WM_BOTH_UNLOCK(_sc)	do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
    395 #define WM_BOTH_LOCKED(_sc)	(WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
    396 
    397 #ifdef WM_MPSAFE
    398 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    399 #else
    400 #define CALLOUT_FLAGS	0
    401 #endif
    402 
    403 #define	WM_RXCHAIN_RESET(sc)						\
    404 do {									\
    405 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    406 	*(sc)->sc_rxtailp = NULL;					\
    407 	(sc)->sc_rxlen = 0;						\
    408 } while (/*CONSTCOND*/0)
    409 
    410 #define	WM_RXCHAIN_LINK(sc, m)						\
    411 do {									\
    412 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    413 	(sc)->sc_rxtailp = &(m)->m_next;				\
    414 } while (/*CONSTCOND*/0)
    415 
    416 #ifdef WM_EVENT_COUNTERS
    417 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    418 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    419 #else
    420 #define	WM_EVCNT_INCR(ev)	/* nothing */
    421 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    422 #endif
    423 
    424 #define	CSR_READ(sc, reg)						\
    425 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    426 #define	CSR_WRITE(sc, reg, val)						\
    427 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    428 #define	CSR_WRITE_FLUSH(sc)						\
    429 	(void) CSR_READ((sc), WMREG_STATUS)
    430 
    431 #define ICH8_FLASH_READ32(sc, reg) \
    432 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    433 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    434 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    435 
    436 #define ICH8_FLASH_READ16(sc, reg) \
    437 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    438 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    439 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    440 
    441 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    442 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    443 
    444 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
    445 #define	WM_CDTXADDR_HI(sc, x)						\
    446 	(sizeof(bus_addr_t) == 8 ?					\
    447 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
    448 
    449 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
    450 #define	WM_CDRXADDR_HI(sc, x)						\
    451 	(sizeof(bus_addr_t) == 8 ?					\
    452 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
    453 
    454 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    455 do {									\
    456 	int __x, __n;							\
    457 									\
    458 	__x = (x);							\
    459 	__n = (n);							\
    460 									\
    461 	/* If it will wrap around, sync to the end of the ring. */	\
    462 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
    463 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    464 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    465 		    (WM_NTXDESC(sc) - __x), (ops));			\
    466 		__n -= (WM_NTXDESC(sc) - __x);				\
    467 		__x = 0;						\
    468 	}								\
    469 									\
    470 	/* Now sync whatever is left. */				\
    471 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    472 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    473 } while (/*CONSTCOND*/0)
    474 
    475 #define	WM_CDRXSYNC(sc, x, ops)						\
    476 do {									\
    477 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    478 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    479 } while (/*CONSTCOND*/0)
    480 
    481 #define	WM_INIT_RXDESC(sc, x)						\
    482 do {									\
    483 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    484 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    485 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    486 									\
    487 	/*								\
    488 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    489 	 * so that the payload after the Ethernet header is aligned	\
    490 	 * to a 4-byte boundary.					\
    491 	 *								\
    492 	 * XXX BRAINDAMAGE ALERT!					\
    493 	 * The stupid chip uses the same size for every buffer, which	\
    494 	 * is set in the Receive Control register.  We are using the 2K	\
    495 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    496 	 * reason, we can't "scoot" packets longer than the standard	\
    497 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    498 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    499 	 * the upper layer copy the headers.				\
    500 	 */								\
    501 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    502 									\
    503 	wm_set_dma_addr(&__rxd->wrx_addr,				\
    504 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
    505 	__rxd->wrx_len = 0;						\
    506 	__rxd->wrx_cksum = 0;						\
    507 	__rxd->wrx_status = 0;						\
    508 	__rxd->wrx_errors = 0;						\
    509 	__rxd->wrx_special = 0;						\
    510 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    511 									\
    512 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    513 } while (/*CONSTCOND*/0)
    514 
    515 /*
    516  * Register read/write functions.
    517  * Other than CSR_{READ|WRITE}().
    518  */
    519 #if 0
    520 static inline uint32_t wm_io_read(struct wm_softc *, int);
    521 #endif
    522 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    523 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    524 	uint32_t, uint32_t);
    525 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    526 
    527 /*
    528  * Device driver interface functions and commonly used functions.
    529  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    530  */
    531 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    532 static int	wm_match(device_t, cfdata_t, void *);
    533 static void	wm_attach(device_t, device_t, void *);
    534 static int	wm_detach(device_t, int);
    535 static bool	wm_suspend(device_t, const pmf_qual_t *);
    536 static bool	wm_resume(device_t, const pmf_qual_t *);
    537 static void	wm_watchdog(struct ifnet *);
    538 static void	wm_tick(void *);
    539 static int	wm_ifflags_cb(struct ethercom *);
    540 static int	wm_ioctl(struct ifnet *, u_long, void *);
    541 /* MAC address related */
    542 static int	wm_check_alt_mac_addr(struct wm_softc *);
    543 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    544 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    545 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    546 static void	wm_set_filter(struct wm_softc *);
    547 /* Reset and init related */
    548 static void	wm_set_vlan(struct wm_softc *);
    549 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    550 static void	wm_get_auto_rd_done(struct wm_softc *);
    551 static void	wm_lan_init_done(struct wm_softc *);
    552 static void	wm_get_cfg_done(struct wm_softc *);
    553 static void	wm_reset(struct wm_softc *);
    554 static int	wm_add_rxbuf(struct wm_softc *, int);
    555 static void	wm_rxdrain(struct wm_softc *);
    556 static int	wm_init(struct ifnet *);
    557 static int	wm_init_locked(struct ifnet *);
    558 static void	wm_stop(struct ifnet *, int);
    559 static void	wm_stop_locked(struct ifnet *, int);
    560 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    561     uint32_t *, uint8_t *);
    562 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    563 static void	wm_82547_txfifo_stall(void *);
    564 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    565 /* Start */
    566 static void	wm_start(struct ifnet *);
    567 static void	wm_start_locked(struct ifnet *);
    568 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    569     uint32_t *, uint32_t *, bool *);
    570 static void	wm_nq_start(struct ifnet *);
    571 static void	wm_nq_start_locked(struct ifnet *);
    572 /* Interrupt */
    573 static void	wm_txintr(struct wm_softc *);
    574 static void	wm_rxintr(struct wm_softc *);
    575 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    576 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    577 static void	wm_linkintr(struct wm_softc *, uint32_t);
    578 static int	wm_intr(void *);
    579 
    580 /*
    581  * Media related.
    582  * GMII, SGMII, TBI (and SERDES)
    583  */
    584 /* GMII related */
    585 static void	wm_gmii_reset(struct wm_softc *);
    586 static int	wm_get_phy_id_82575(struct wm_softc *);
    587 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    588 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    589 static int	wm_gmii_mediachange(struct ifnet *);
    590 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    591 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    592 static int	wm_gmii_i82543_readreg(device_t, int, int);
    593 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    594 static int	wm_gmii_i82544_readreg(device_t, int, int);
    595 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    596 static int	wm_gmii_i80003_readreg(device_t, int, int);
    597 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    598 static int	wm_gmii_bm_readreg(device_t, int, int);
    599 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    600 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    601 static int	wm_gmii_hv_readreg(device_t, int, int);
    602 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    603 static int	wm_gmii_82580_readreg(device_t, int, int);
    604 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    605 static void	wm_gmii_statchg(struct ifnet *);
    606 static int	wm_kmrn_readreg(struct wm_softc *, int);
    607 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    608 /* SGMII */
    609 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    610 static int	wm_sgmii_readreg(device_t, int, int);
    611 static void	wm_sgmii_writereg(device_t, int, int, int);
    612 /* TBI related */
    613 static int	wm_check_for_link(struct wm_softc *);
    614 static void	wm_tbi_mediainit(struct wm_softc *);
    615 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    616 static int	wm_tbi_mediachange(struct ifnet *);
    617 static void	wm_tbi_set_linkled(struct wm_softc *);
    618 static void	wm_tbi_check_link(struct wm_softc *);
    619 
    620 /*
    621  * NVM related.
    622  * Microwire, SPI (w/wo EERD) and Flash.
    623  */
    624 /* Both spi and uwire */
    625 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    626 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    627 /* Microwire */
    628 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    629 /* SPI */
    630 static void	wm_set_spiaddrbits(struct wm_softc *);
    631 static int	wm_nvm_ready_spi(struct wm_softc *);
    632 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    633 /* Using with EERD */
    634 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    635 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    636 /* Flash */
    637 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    638     unsigned int *);
    639 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    640 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    641 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    642 	uint16_t *);
    643 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    644 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    645 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    646 /* Lock, detecting NVM type, validate checksum and read */
    647 static int	wm_nvm_acquire(struct wm_softc *);
    648 static void	wm_nvm_release(struct wm_softc *);
    649 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    650 static int	wm_nvm_validate_checksum(struct wm_softc *);
    651 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    652 
    653 /*
    654  * Hardware semaphores.
    655  * Very complexed...
    656  */
    657 static int	wm_get_swsm_semaphore(struct wm_softc *);
    658 static void	wm_put_swsm_semaphore(struct wm_softc *);
    659 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    660 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    661 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    662 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    663 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    664 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    665 
    666 /*
    667  * Management mode and power management related subroutines.
    668  * BMC, AMT, suspend/resume and EEE.
    669  */
    670 static int	wm_check_mng_mode(struct wm_softc *);
    671 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    672 static int	wm_check_mng_mode_82574(struct wm_softc *);
    673 static int	wm_check_mng_mode_generic(struct wm_softc *);
    674 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    675 static int	wm_check_reset_block(struct wm_softc *);
    676 static void	wm_get_hw_control(struct wm_softc *);
    677 static void	wm_release_hw_control(struct wm_softc *);
    678 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    679 static void	wm_smbustopci(struct wm_softc *);
    680 static void	wm_init_manageability(struct wm_softc *);
    681 static void	wm_release_manageability(struct wm_softc *);
    682 static void	wm_get_wakeup(struct wm_softc *);
    683 #ifdef WM_WOL
    684 static void	wm_enable_phy_wakeup(struct wm_softc *);
    685 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    686 static void	wm_enable_wakeup(struct wm_softc *);
    687 #endif
    688 /* EEE */
    689 static void	wm_set_eee_i350(struct wm_softc *);
    690 
    691 /*
    692  * Workarounds (mainly PHY related).
    693  * Basically, PHY's workarounds are in the PHY drivers.
    694  */
    695 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    696 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    697 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    698 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    699 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    700 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    701 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    702 static void	wm_reset_init_script_82575(struct wm_softc *);
    703 
    704 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    705     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    706 
    707 /*
    708  * Devices supported by this driver.
    709  */
    710 static const struct wm_product {
    711 	pci_vendor_id_t		wmp_vendor;
    712 	pci_product_id_t	wmp_product;
    713 	const char		*wmp_name;
    714 	wm_chip_type		wmp_type;
    715 	int			wmp_flags;
    716 #define	WMP_F_1000X		0x01
    717 #define	WMP_F_1000T		0x02
    718 #define	WMP_F_SERDES		0x04
    719 } wm_products[] = {
    720 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    721 	  "Intel i82542 1000BASE-X Ethernet",
    722 	  WM_T_82542_2_1,	WMP_F_1000X },
    723 
    724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    725 	  "Intel i82543GC 1000BASE-X Ethernet",
    726 	  WM_T_82543,		WMP_F_1000X },
    727 
    728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    729 	  "Intel i82543GC 1000BASE-T Ethernet",
    730 	  WM_T_82543,		WMP_F_1000T },
    731 
    732 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    733 	  "Intel i82544EI 1000BASE-T Ethernet",
    734 	  WM_T_82544,		WMP_F_1000T },
    735 
    736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    737 	  "Intel i82544EI 1000BASE-X Ethernet",
    738 	  WM_T_82544,		WMP_F_1000X },
    739 
    740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    741 	  "Intel i82544GC 1000BASE-T Ethernet",
    742 	  WM_T_82544,		WMP_F_1000T },
    743 
    744 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    745 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    746 	  WM_T_82544,		WMP_F_1000T },
    747 
    748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    749 	  "Intel i82540EM 1000BASE-T Ethernet",
    750 	  WM_T_82540,		WMP_F_1000T },
    751 
    752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    753 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    754 	  WM_T_82540,		WMP_F_1000T },
    755 
    756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    757 	  "Intel i82540EP 1000BASE-T Ethernet",
    758 	  WM_T_82540,		WMP_F_1000T },
    759 
    760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    761 	  "Intel i82540EP 1000BASE-T Ethernet",
    762 	  WM_T_82540,		WMP_F_1000T },
    763 
    764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    765 	  "Intel i82540EP 1000BASE-T Ethernet",
    766 	  WM_T_82540,		WMP_F_1000T },
    767 
    768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    769 	  "Intel i82545EM 1000BASE-T Ethernet",
    770 	  WM_T_82545,		WMP_F_1000T },
    771 
    772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    773 	  "Intel i82545GM 1000BASE-T Ethernet",
    774 	  WM_T_82545_3,		WMP_F_1000T },
    775 
    776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    777 	  "Intel i82545GM 1000BASE-X Ethernet",
    778 	  WM_T_82545_3,		WMP_F_1000X },
    779 
    780 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    781 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    782 	  WM_T_82545_3,		WMP_F_SERDES },
    783 
    784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    785 	  "Intel i82546EB 1000BASE-T Ethernet",
    786 	  WM_T_82546,		WMP_F_1000T },
    787 
    788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    789 	  "Intel i82546EB 1000BASE-T Ethernet",
    790 	  WM_T_82546,		WMP_F_1000T },
    791 
    792 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    793 	  "Intel i82545EM 1000BASE-X Ethernet",
    794 	  WM_T_82545,		WMP_F_1000X },
    795 
    796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    797 	  "Intel i82546EB 1000BASE-X Ethernet",
    798 	  WM_T_82546,		WMP_F_1000X },
    799 
    800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    801 	  "Intel i82546GB 1000BASE-T Ethernet",
    802 	  WM_T_82546_3,		WMP_F_1000T },
    803 
    804 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    805 	  "Intel i82546GB 1000BASE-X Ethernet",
    806 	  WM_T_82546_3,		WMP_F_1000X },
    807 
    808 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    809 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    810 	  WM_T_82546_3,		WMP_F_SERDES },
    811 
    812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    813 	  "i82546GB quad-port Gigabit Ethernet",
    814 	  WM_T_82546_3,		WMP_F_1000T },
    815 
    816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    817 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    818 	  WM_T_82546_3,		WMP_F_1000T },
    819 
    820 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    821 	  "Intel PRO/1000MT (82546GB)",
    822 	  WM_T_82546_3,		WMP_F_1000T },
    823 
    824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    825 	  "Intel i82541EI 1000BASE-T Ethernet",
    826 	  WM_T_82541,		WMP_F_1000T },
    827 
    828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    829 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    830 	  WM_T_82541,		WMP_F_1000T },
    831 
    832 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    833 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    834 	  WM_T_82541,		WMP_F_1000T },
    835 
    836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    837 	  "Intel i82541ER 1000BASE-T Ethernet",
    838 	  WM_T_82541_2,		WMP_F_1000T },
    839 
    840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    841 	  "Intel i82541GI 1000BASE-T Ethernet",
    842 	  WM_T_82541_2,		WMP_F_1000T },
    843 
    844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    845 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    846 	  WM_T_82541_2,		WMP_F_1000T },
    847 
    848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    849 	  "Intel i82541PI 1000BASE-T Ethernet",
    850 	  WM_T_82541_2,		WMP_F_1000T },
    851 
    852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    853 	  "Intel i82547EI 1000BASE-T Ethernet",
    854 	  WM_T_82547,		WMP_F_1000T },
    855 
    856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    857 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    858 	  WM_T_82547,		WMP_F_1000T },
    859 
    860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    861 	  "Intel i82547GI 1000BASE-T Ethernet",
    862 	  WM_T_82547_2,		WMP_F_1000T },
    863 
    864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    865 	  "Intel PRO/1000 PT (82571EB)",
    866 	  WM_T_82571,		WMP_F_1000T },
    867 
    868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    869 	  "Intel PRO/1000 PF (82571EB)",
    870 	  WM_T_82571,		WMP_F_1000X },
    871 
    872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    873 	  "Intel PRO/1000 PB (82571EB)",
    874 	  WM_T_82571,		WMP_F_SERDES },
    875 
    876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    877 	  "Intel PRO/1000 QT (82571EB)",
    878 	  WM_T_82571,		WMP_F_1000T },
    879 
    880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    881 	  "Intel i82572EI 1000baseT Ethernet",
    882 	  WM_T_82572,		WMP_F_1000T },
    883 
    884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    885 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    886 	  WM_T_82571,		WMP_F_1000T, },
    887 
    888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    889 	  "Intel i82572EI 1000baseX Ethernet",
    890 	  WM_T_82572,		WMP_F_1000X },
    891 
    892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    893 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    894 	  WM_T_82572,		WMP_F_SERDES },
    895 
    896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    897 	  "Intel i82572EI 1000baseT Ethernet",
    898 	  WM_T_82572,		WMP_F_1000T },
    899 
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    901 	  "Intel i82573E",
    902 	  WM_T_82573,		WMP_F_1000T },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    905 	  "Intel i82573E IAMT",
    906 	  WM_T_82573,		WMP_F_1000T },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    909 	  "Intel i82573L Gigabit Ethernet",
    910 	  WM_T_82573,		WMP_F_1000T },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    913 	  "Intel i82574L",
    914 	  WM_T_82574,		WMP_F_1000T },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
    917 	  "Intel i82583V",
    918 	  WM_T_82583,		WMP_F_1000T },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
    921 	  "i80003 dual 1000baseT Ethernet",
    922 	  WM_T_80003,		WMP_F_1000T },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
    925 	  "i80003 dual 1000baseX Ethernet",
    926 	  WM_T_80003,		WMP_F_1000T },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
    929 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
    930 	  WM_T_80003,		WMP_F_SERDES },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
    933 	  "Intel i80003 1000baseT Ethernet",
    934 	  WM_T_80003,		WMP_F_1000T },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
    937 	  "Intel i80003 Gigabit Ethernet (SERDES)",
    938 	  WM_T_80003,		WMP_F_SERDES },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
    941 	  "Intel i82801H (M_AMT) LAN Controller",
    942 	  WM_T_ICH8,		WMP_F_1000T },
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
    944 	  "Intel i82801H (AMT) LAN Controller",
    945 	  WM_T_ICH8,		WMP_F_1000T },
    946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
    947 	  "Intel i82801H LAN Controller",
    948 	  WM_T_ICH8,		WMP_F_1000T },
    949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
    950 	  "Intel i82801H (IFE) LAN Controller",
    951 	  WM_T_ICH8,		WMP_F_1000T },
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
    953 	  "Intel i82801H (M) LAN Controller",
    954 	  WM_T_ICH8,		WMP_F_1000T },
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
    956 	  "Intel i82801H IFE (GT) LAN Controller",
    957 	  WM_T_ICH8,		WMP_F_1000T },
    958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
    959 	  "Intel i82801H IFE (G) LAN Controller",
    960 	  WM_T_ICH8,		WMP_F_1000T },
    961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
    962 	  "82801I (AMT) LAN Controller",
    963 	  WM_T_ICH9,		WMP_F_1000T },
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
    965 	  "82801I LAN Controller",
    966 	  WM_T_ICH9,		WMP_F_1000T },
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
    968 	  "82801I (G) LAN Controller",
    969 	  WM_T_ICH9,		WMP_F_1000T },
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
    971 	  "82801I (GT) LAN Controller",
    972 	  WM_T_ICH9,		WMP_F_1000T },
    973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
    974 	  "82801I (C) LAN Controller",
    975 	  WM_T_ICH9,		WMP_F_1000T },
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
    977 	  "82801I mobile LAN Controller",
    978 	  WM_T_ICH9,		WMP_F_1000T },
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
    980 	  "82801I mobile (V) LAN Controller",
    981 	  WM_T_ICH9,		WMP_F_1000T },
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
    983 	  "82801I mobile (AMT) LAN Controller",
    984 	  WM_T_ICH9,		WMP_F_1000T },
    985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
    986 	  "82567LM-4 LAN Controller",
    987 	  WM_T_ICH9,		WMP_F_1000T },
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
    989 	  "82567V-3 LAN Controller",
    990 	  WM_T_ICH9,		WMP_F_1000T },
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
    992 	  "82567LM-2 LAN Controller",
    993 	  WM_T_ICH10,		WMP_F_1000T },
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
    995 	  "82567LF-2 LAN Controller",
    996 	  WM_T_ICH10,		WMP_F_1000T },
    997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
    998 	  "82567LM-3 LAN Controller",
    999 	  WM_T_ICH10,		WMP_F_1000T },
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1001 	  "82567LF-3 LAN Controller",
   1002 	  WM_T_ICH10,		WMP_F_1000T },
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1004 	  "82567V-2 LAN Controller",
   1005 	  WM_T_ICH10,		WMP_F_1000T },
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1007 	  "82567V-3? LAN Controller",
   1008 	  WM_T_ICH10,		WMP_F_1000T },
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1010 	  "HANKSVILLE LAN Controller",
   1011 	  WM_T_ICH10,		WMP_F_1000T },
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1013 	  "PCH LAN (82577LM) Controller",
   1014 	  WM_T_PCH,		WMP_F_1000T },
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1016 	  "PCH LAN (82577LC) Controller",
   1017 	  WM_T_PCH,		WMP_F_1000T },
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1019 	  "PCH LAN (82578DM) Controller",
   1020 	  WM_T_PCH,		WMP_F_1000T },
   1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1022 	  "PCH LAN (82578DC) Controller",
   1023 	  WM_T_PCH,		WMP_F_1000T },
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1025 	  "PCH2 LAN (82579LM) Controller",
   1026 	  WM_T_PCH2,		WMP_F_1000T },
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1028 	  "PCH2 LAN (82579V) Controller",
   1029 	  WM_T_PCH2,		WMP_F_1000T },
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1031 	  "82575EB dual-1000baseT Ethernet",
   1032 	  WM_T_82575,		WMP_F_1000T },
   1033 #if 0
   1034 	/*
   1035 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
   1036 	 * disabled for now ...
   1037 	 */
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1039 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1040 	  WM_T_82575,		WMP_F_SERDES },
   1041 #endif
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1043 	  "82575GB quad-1000baseT Ethernet",
   1044 	  WM_T_82575,		WMP_F_1000T },
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1046 	  "82575GB quad-1000baseT Ethernet (PM)",
   1047 	  WM_T_82575,		WMP_F_1000T },
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1049 	  "82576 1000BaseT Ethernet",
   1050 	  WM_T_82576,		WMP_F_1000T },
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1052 	  "82576 1000BaseX Ethernet",
   1053 	  WM_T_82576,		WMP_F_1000X },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1056 	  "82576 gigabit Ethernet (SERDES)",
   1057 	  WM_T_82576,		WMP_F_SERDES },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1060 	  "82576 quad-1000BaseT Ethernet",
   1061 	  WM_T_82576,		WMP_F_1000T },
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1063 	  "82576 gigabit Ethernet",
   1064 	  WM_T_82576,		WMP_F_1000T },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1067 	  "82576 gigabit Ethernet (SERDES)",
   1068 	  WM_T_82576,		WMP_F_SERDES },
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1070 	  "82576 quad-gigabit Ethernet (SERDES)",
   1071 	  WM_T_82576,		WMP_F_SERDES },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1074 	  "82580 1000BaseT Ethernet",
   1075 	  WM_T_82580,		WMP_F_1000T },
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1077 	  "82580 1000BaseX Ethernet",
   1078 	  WM_T_82580,		WMP_F_1000X },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1081 	  "82580 1000BaseT Ethernet (SERDES)",
   1082 	  WM_T_82580,		WMP_F_SERDES },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1085 	  "82580 gigabit Ethernet (SGMII)",
   1086 	  WM_T_82580,		WMP_F_1000T },
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1088 	  "82580 dual-1000BaseT Ethernet",
   1089 	  WM_T_82580,		WMP_F_1000T },
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
   1091 	  "82580 1000BaseT Ethernet",
   1092 	  WM_T_82580ER,		WMP_F_1000T },
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
   1094 	  "82580 dual-1000BaseT Ethernet",
   1095 	  WM_T_82580ER,		WMP_F_1000T },
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1097 	  "82580 quad-1000BaseX Ethernet",
   1098 	  WM_T_82580,		WMP_F_1000X },
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1100 	  "I350 Gigabit Network Connection",
   1101 	  WM_T_I350,		WMP_F_1000T },
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1103 	  "I350 Gigabit Fiber Network Connection",
   1104 	  WM_T_I350,		WMP_F_1000X },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1107 	  "I350 Gigabit Backplane Connection",
   1108 	  WM_T_I350,		WMP_F_SERDES },
   1109 #if 0
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1111 	  "I350 Gigabit Connection",
   1112 	  WM_T_I350,		WMP_F_1000T },
   1113 #endif
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1115 	  "I354 Gigabit Connection",
   1116 	  WM_T_I354,		WMP_F_1000T },
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1118 	  "I210-T1 Ethernet Server Adapter",
   1119 	  WM_T_I210,		WMP_F_1000T },
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1121 	  "I210 Ethernet (Copper OEM)",
   1122 	  WM_T_I210,		WMP_F_1000T },
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1124 	  "I210 Ethernet (Copper IT)",
   1125 	  WM_T_I210,		WMP_F_1000T },
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1127 	  "I210 Gigabit Ethernet (Fiber)",
   1128 	  WM_T_I210,		WMP_F_1000X },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1131 	  "I210 Gigabit Ethernet (SERDES)",
   1132 	  WM_T_I210,		WMP_F_SERDES },
   1133 #if 0
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1135 	  "I210 Gigabit Ethernet (SGMII)",
   1136 	  WM_T_I210,		WMP_F_SERDES },
   1137 #endif
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1139 	  "I211 Ethernet (COPPER)",
   1140 	  WM_T_I211,		WMP_F_1000T },
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1142 	  "I217 V Ethernet Connection",
   1143 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1145 	  "I217 LM Ethernet Connection",
   1146 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1148 	  "I218 V Ethernet Connection",
   1149 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1151 	  "I218 LM Ethernet Connection",
   1152 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1153 	{ 0,			0,
   1154 	  NULL,
   1155 	  0,			0 },
   1156 };
   1157 
   1158 #ifdef WM_EVENT_COUNTERS
   1159 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1160 #endif /* WM_EVENT_COUNTERS */
   1161 
   1162 
   1163 /*
   1164  * Register read/write functions.
   1165  * Other than CSR_{READ|WRITE}().
   1166  */
   1167 
   1168 #if 0 /* Not currently used */
   1169 static inline uint32_t
   1170 wm_io_read(struct wm_softc *sc, int reg)
   1171 {
   1172 
   1173 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1174 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1175 }
   1176 #endif
   1177 
   1178 static inline void
   1179 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1180 {
   1181 
   1182 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1183 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1184 }
   1185 
   1186 static inline void
   1187 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1188     uint32_t data)
   1189 {
   1190 	uint32_t regval;
   1191 	int i;
   1192 
   1193 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1194 
   1195 	CSR_WRITE(sc, reg, regval);
   1196 
   1197 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1198 		delay(5);
   1199 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1200 			break;
   1201 	}
   1202 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1203 		aprint_error("%s: WARNING:"
   1204 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1205 		    device_xname(sc->sc_dev), reg);
   1206 	}
   1207 }
   1208 
   1209 static inline void
   1210 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1211 {
   1212 	wa->wa_low = htole32(v & 0xffffffffU);
   1213 	if (sizeof(bus_addr_t) == 8)
   1214 		wa->wa_high = htole32((uint64_t) v >> 32);
   1215 	else
   1216 		wa->wa_high = 0;
   1217 }
   1218 
   1219 /*
   1220  * Device driver interface functions and commonly used functions.
   1221  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1222  */
   1223 
   1224 /* Lookup supported device table */
   1225 static const struct wm_product *
   1226 wm_lookup(const struct pci_attach_args *pa)
   1227 {
   1228 	const struct wm_product *wmp;
   1229 
   1230 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1231 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1232 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1233 			return wmp;
   1234 	}
   1235 	return NULL;
   1236 }
   1237 
   1238 /* The match function (ca_match) */
   1239 static int
   1240 wm_match(device_t parent, cfdata_t cf, void *aux)
   1241 {
   1242 	struct pci_attach_args *pa = aux;
   1243 
   1244 	if (wm_lookup(pa) != NULL)
   1245 		return 1;
   1246 
   1247 	return 0;
   1248 }
   1249 
   1250 /* The attach function (ca_attach) */
   1251 static void
   1252 wm_attach(device_t parent, device_t self, void *aux)
   1253 {
   1254 	struct wm_softc *sc = device_private(self);
   1255 	struct pci_attach_args *pa = aux;
   1256 	prop_dictionary_t dict;
   1257 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1258 	pci_chipset_tag_t pc = pa->pa_pc;
   1259 	pci_intr_handle_t ih;
   1260 	const char *intrstr = NULL;
   1261 	const char *eetype, *xname;
   1262 	bus_space_tag_t memt;
   1263 	bus_space_handle_t memh;
   1264 	bus_size_t memsize;
   1265 	int memh_valid;
   1266 	int i, error;
   1267 	const struct wm_product *wmp;
   1268 	prop_data_t ea;
   1269 	prop_number_t pn;
   1270 	uint8_t enaddr[ETHER_ADDR_LEN];
   1271 	uint16_t cfg1, cfg2, swdpin, io3;
   1272 	pcireg_t preg, memtype;
   1273 	uint16_t eeprom_data, apme_mask;
   1274 	bool force_clear_smbi;
   1275 	uint32_t reg;
   1276 	char intrbuf[PCI_INTRSTR_LEN];
   1277 
   1278 	sc->sc_dev = self;
   1279 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1280 	sc->sc_stopping = false;
   1281 
   1282 	sc->sc_wmp = wmp = wm_lookup(pa);
   1283 	if (wmp == NULL) {
   1284 		printf("\n");
   1285 		panic("wm_attach: impossible");
   1286 	}
   1287 
   1288 	sc->sc_pc = pa->pa_pc;
   1289 	sc->sc_pcitag = pa->pa_tag;
   1290 
   1291 	if (pci_dma64_available(pa))
   1292 		sc->sc_dmat = pa->pa_dmat64;
   1293 	else
   1294 		sc->sc_dmat = pa->pa_dmat;
   1295 
   1296 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1297 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1298 
   1299 	sc->sc_type = wmp->wmp_type;
   1300 	if (sc->sc_type < WM_T_82543) {
   1301 		if (sc->sc_rev < 2) {
   1302 			aprint_error_dev(sc->sc_dev,
   1303 			    "i82542 must be at least rev. 2\n");
   1304 			return;
   1305 		}
   1306 		if (sc->sc_rev < 3)
   1307 			sc->sc_type = WM_T_82542_2_0;
   1308 	}
   1309 
   1310 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1311 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   1312 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1313 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1314 		sc->sc_flags |= WM_F_NEWQUEUE;
   1315 
   1316 	/* Set device properties (mactype) */
   1317 	dict = device_properties(sc->sc_dev);
   1318 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1319 
   1320 	/*
   1321 	 * Map the device.  All devices support memory-mapped acccess,
   1322 	 * and it is really required for normal operation.
   1323 	 */
   1324 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1325 	switch (memtype) {
   1326 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1327 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1328 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1329 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1330 		break;
   1331 	default:
   1332 		memh_valid = 0;
   1333 		break;
   1334 	}
   1335 
   1336 	if (memh_valid) {
   1337 		sc->sc_st = memt;
   1338 		sc->sc_sh = memh;
   1339 		sc->sc_ss = memsize;
   1340 	} else {
   1341 		aprint_error_dev(sc->sc_dev,
   1342 		    "unable to map device registers\n");
   1343 		return;
   1344 	}
   1345 
   1346 	/*
   1347 	 * In addition, i82544 and later support I/O mapped indirect
   1348 	 * register access.  It is not desirable (nor supported in
   1349 	 * this driver) to use it for normal operation, though it is
   1350 	 * required to work around bugs in some chip versions.
   1351 	 */
   1352 	if (sc->sc_type >= WM_T_82544) {
   1353 		/* First we have to find the I/O BAR. */
   1354 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1355 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1356 			if (memtype == PCI_MAPREG_TYPE_IO)
   1357 				break;
   1358 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1359 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1360 				i += 4;	/* skip high bits, too */
   1361 		}
   1362 		if (i < PCI_MAPREG_END) {
   1363 			/*
   1364 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1365 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1366 			 * It's no problem because newer chips has no this
   1367 			 * bug.
   1368 			 *
   1369 			 * The i8254x doesn't apparently respond when the
   1370 			 * I/O BAR is 0, which looks somewhat like it's not
   1371 			 * been configured.
   1372 			 */
   1373 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1374 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1375 				aprint_error_dev(sc->sc_dev,
   1376 				    "WARNING: I/O BAR at zero.\n");
   1377 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1378 					0, &sc->sc_iot, &sc->sc_ioh,
   1379 					NULL, &sc->sc_ios) == 0) {
   1380 				sc->sc_flags |= WM_F_IOH_VALID;
   1381 			} else {
   1382 				aprint_error_dev(sc->sc_dev,
   1383 				    "WARNING: unable to map I/O space\n");
   1384 			}
   1385 		}
   1386 
   1387 	}
   1388 
   1389 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1390 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1391 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1392 	if (sc->sc_type < WM_T_82542_2_1)
   1393 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1394 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1395 
   1396 	/* power up chip */
   1397 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1398 	    NULL)) && error != EOPNOTSUPP) {
   1399 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1400 		return;
   1401 	}
   1402 
   1403 	/*
   1404 	 * Map and establish our interrupt.
   1405 	 */
   1406 	if (pci_intr_map(pa, &ih)) {
   1407 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
   1408 		return;
   1409 	}
   1410 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
   1411 #ifdef WM_MPSAFE
   1412 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
   1413 #endif
   1414 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
   1415 	if (sc->sc_ih == NULL) {
   1416 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
   1417 		if (intrstr != NULL)
   1418 			aprint_error(" at %s", intrstr);
   1419 		aprint_error("\n");
   1420 		return;
   1421 	}
   1422 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   1423 
   1424 	/*
   1425 	 * Check the function ID (unit number of the chip).
   1426 	 */
   1427 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1428 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1429 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1430 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   1431 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1432 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1433 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1434 	else
   1435 		sc->sc_funcid = 0;
   1436 
   1437 	/*
   1438 	 * Determine a few things about the bus we're connected to.
   1439 	 */
   1440 	if (sc->sc_type < WM_T_82543) {
   1441 		/* We don't really know the bus characteristics here. */
   1442 		sc->sc_bus_speed = 33;
   1443 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1444 		/*
   1445 		 * CSA (Communication Streaming Architecture) is about as fast
   1446 		 * a 32-bit 66MHz PCI Bus.
   1447 		 */
   1448 		sc->sc_flags |= WM_F_CSA;
   1449 		sc->sc_bus_speed = 66;
   1450 		aprint_verbose_dev(sc->sc_dev,
   1451 		    "Communication Streaming Architecture\n");
   1452 		if (sc->sc_type == WM_T_82547) {
   1453 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1454 			callout_setfunc(&sc->sc_txfifo_ch,
   1455 					wm_82547_txfifo_stall, sc);
   1456 			aprint_verbose_dev(sc->sc_dev,
   1457 			    "using 82547 Tx FIFO stall work-around\n");
   1458 		}
   1459 	} else if (sc->sc_type >= WM_T_82571) {
   1460 		sc->sc_flags |= WM_F_PCIE;
   1461 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1462 		    && (sc->sc_type != WM_T_ICH10)
   1463 		    && (sc->sc_type != WM_T_PCH)
   1464 		    && (sc->sc_type != WM_T_PCH2)
   1465 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1466 			/* ICH* and PCH* have no PCIe capability registers */
   1467 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1468 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1469 				NULL) == 0)
   1470 				aprint_error_dev(sc->sc_dev,
   1471 				    "unable to find PCIe capability\n");
   1472 		}
   1473 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1474 	} else {
   1475 		reg = CSR_READ(sc, WMREG_STATUS);
   1476 		if (reg & STATUS_BUS64)
   1477 			sc->sc_flags |= WM_F_BUS64;
   1478 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1479 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1480 
   1481 			sc->sc_flags |= WM_F_PCIX;
   1482 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1483 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1484 				aprint_error_dev(sc->sc_dev,
   1485 				    "unable to find PCIX capability\n");
   1486 			else if (sc->sc_type != WM_T_82545_3 &&
   1487 				 sc->sc_type != WM_T_82546_3) {
   1488 				/*
   1489 				 * Work around a problem caused by the BIOS
   1490 				 * setting the max memory read byte count
   1491 				 * incorrectly.
   1492 				 */
   1493 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1494 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1495 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1496 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1497 
   1498 				bytecnt =
   1499 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1500 				    PCIX_CMD_BYTECNT_SHIFT;
   1501 				maxb =
   1502 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1503 				    PCIX_STATUS_MAXB_SHIFT;
   1504 				if (bytecnt > maxb) {
   1505 					aprint_verbose_dev(sc->sc_dev,
   1506 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1507 					    512 << bytecnt, 512 << maxb);
   1508 					pcix_cmd = (pcix_cmd &
   1509 					    ~PCIX_CMD_BYTECNT_MASK) |
   1510 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1511 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1512 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1513 					    pcix_cmd);
   1514 				}
   1515 			}
   1516 		}
   1517 		/*
   1518 		 * The quad port adapter is special; it has a PCIX-PCIX
   1519 		 * bridge on the board, and can run the secondary bus at
   1520 		 * a higher speed.
   1521 		 */
   1522 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1523 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1524 								      : 66;
   1525 		} else if (sc->sc_flags & WM_F_PCIX) {
   1526 			switch (reg & STATUS_PCIXSPD_MASK) {
   1527 			case STATUS_PCIXSPD_50_66:
   1528 				sc->sc_bus_speed = 66;
   1529 				break;
   1530 			case STATUS_PCIXSPD_66_100:
   1531 				sc->sc_bus_speed = 100;
   1532 				break;
   1533 			case STATUS_PCIXSPD_100_133:
   1534 				sc->sc_bus_speed = 133;
   1535 				break;
   1536 			default:
   1537 				aprint_error_dev(sc->sc_dev,
   1538 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1539 				    reg & STATUS_PCIXSPD_MASK);
   1540 				sc->sc_bus_speed = 66;
   1541 				break;
   1542 			}
   1543 		} else
   1544 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1545 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1546 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1547 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1548 	}
   1549 
   1550 	/*
   1551 	 * Allocate the control data structures, and create and load the
   1552 	 * DMA map for it.
   1553 	 *
   1554 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   1555 	 * memory.  So must Rx descriptors.  We simplify by allocating
   1556 	 * both sets within the same 4G segment.
   1557 	 */
   1558 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
   1559 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
   1560 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
   1561 	    sizeof(struct wm_control_data_82542) :
   1562 	    sizeof(struct wm_control_data_82544);
   1563 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
   1564 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
   1565 		    &sc->sc_cd_rseg, 0)) != 0) {
   1566 		aprint_error_dev(sc->sc_dev,
   1567 		    "unable to allocate control data, error = %d\n",
   1568 		    error);
   1569 		goto fail_0;
   1570 	}
   1571 
   1572 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
   1573 		    sc->sc_cd_rseg, sc->sc_cd_size,
   1574 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
   1575 		aprint_error_dev(sc->sc_dev,
   1576 		    "unable to map control data, error = %d\n", error);
   1577 		goto fail_1;
   1578 	}
   1579 
   1580 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
   1581 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
   1582 		aprint_error_dev(sc->sc_dev,
   1583 		    "unable to create control data DMA map, error = %d\n",
   1584 		    error);
   1585 		goto fail_2;
   1586 	}
   1587 
   1588 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
   1589 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
   1590 		aprint_error_dev(sc->sc_dev,
   1591 		    "unable to load control data DMA map, error = %d\n",
   1592 		    error);
   1593 		goto fail_3;
   1594 	}
   1595 
   1596 	/* Create the transmit buffer DMA maps. */
   1597 	WM_TXQUEUELEN(sc) =
   1598 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   1599 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   1600 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   1601 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   1602 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   1603 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
   1604 			aprint_error_dev(sc->sc_dev,
   1605 			    "unable to create Tx DMA map %d, error = %d\n",
   1606 			    i, error);
   1607 			goto fail_4;
   1608 		}
   1609 	}
   1610 
   1611 	/* Create the receive buffer DMA maps. */
   1612 	for (i = 0; i < WM_NRXDESC; i++) {
   1613 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   1614 			    MCLBYTES, 0, 0,
   1615 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
   1616 			aprint_error_dev(sc->sc_dev,
   1617 			    "unable to create Rx DMA map %d error = %d\n",
   1618 			    i, error);
   1619 			goto fail_5;
   1620 		}
   1621 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
   1622 	}
   1623 
   1624 	/* clear interesting stat counters */
   1625 	CSR_READ(sc, WMREG_COLC);
   1626 	CSR_READ(sc, WMREG_RXERRC);
   1627 
   1628 	/* get PHY control from SMBus to PCIe */
   1629 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1630 	    || (sc->sc_type == WM_T_PCH_LPT))
   1631 		wm_smbustopci(sc);
   1632 
   1633 	/* Reset the chip to a known state. */
   1634 	wm_reset(sc);
   1635 
   1636 	/* Get some information about the EEPROM. */
   1637 	switch (sc->sc_type) {
   1638 	case WM_T_82542_2_0:
   1639 	case WM_T_82542_2_1:
   1640 	case WM_T_82543:
   1641 	case WM_T_82544:
   1642 		/* Microwire */
   1643 		sc->sc_ee_addrbits = 6;
   1644 		break;
   1645 	case WM_T_82540:
   1646 	case WM_T_82545:
   1647 	case WM_T_82545_3:
   1648 	case WM_T_82546:
   1649 	case WM_T_82546_3:
   1650 		/* Microwire */
   1651 		reg = CSR_READ(sc, WMREG_EECD);
   1652 		if (reg & EECD_EE_SIZE)
   1653 			sc->sc_ee_addrbits = 8;
   1654 		else
   1655 			sc->sc_ee_addrbits = 6;
   1656 		sc->sc_flags |= WM_F_LOCK_EECD;
   1657 		break;
   1658 	case WM_T_82541:
   1659 	case WM_T_82541_2:
   1660 	case WM_T_82547:
   1661 	case WM_T_82547_2:
   1662 		reg = CSR_READ(sc, WMREG_EECD);
   1663 		if (reg & EECD_EE_TYPE) {
   1664 			/* SPI */
   1665 			wm_set_spiaddrbits(sc);
   1666 		} else
   1667 			/* Microwire */
   1668 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
   1669 		sc->sc_flags |= WM_F_LOCK_EECD;
   1670 		break;
   1671 	case WM_T_82571:
   1672 	case WM_T_82572:
   1673 		/* SPI */
   1674 		wm_set_spiaddrbits(sc);
   1675 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1676 		break;
   1677 	case WM_T_82573:
   1678 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1679 		/* FALLTHROUGH */
   1680 	case WM_T_82574:
   1681 	case WM_T_82583:
   1682 		if (wm_nvm_is_onboard_eeprom(sc) == 0)
   1683 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1684 		else {
   1685 			/* SPI */
   1686 			wm_set_spiaddrbits(sc);
   1687 		}
   1688 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1689 		break;
   1690 	case WM_T_82575:
   1691 	case WM_T_82576:
   1692 	case WM_T_82580:
   1693 	case WM_T_82580ER:
   1694 	case WM_T_I350:
   1695 	case WM_T_I354:
   1696 	case WM_T_80003:
   1697 		/* SPI */
   1698 		wm_set_spiaddrbits(sc);
   1699 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1700 		    | WM_F_LOCK_SWSM;
   1701 		break;
   1702 	case WM_T_ICH8:
   1703 	case WM_T_ICH9:
   1704 	case WM_T_ICH10:
   1705 	case WM_T_PCH:
   1706 	case WM_T_PCH2:
   1707 	case WM_T_PCH_LPT:
   1708 		/* FLASH */
   1709 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1710 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1711 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1712 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
   1713 			aprint_error_dev(sc->sc_dev,
   1714 			    "can't map FLASH registers\n");
   1715 			return;
   1716 		}
   1717 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1718 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1719 						ICH_FLASH_SECTOR_SIZE;
   1720 		sc->sc_ich8_flash_bank_size =
   1721 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1722 		sc->sc_ich8_flash_bank_size -=
   1723 		    (reg & ICH_GFPREG_BASE_MASK);
   1724 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1725 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1726 		break;
   1727 	case WM_T_I210:
   1728 	case WM_T_I211:
   1729 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1730 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1731 		break;
   1732 	default:
   1733 		break;
   1734 	}
   1735 
   1736 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1737 	switch (sc->sc_type) {
   1738 	case WM_T_82571:
   1739 	case WM_T_82572:
   1740 		reg = CSR_READ(sc, WMREG_SWSM2);
   1741 		if ((reg & SWSM2_LOCK) != 0) {
   1742 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1743 			force_clear_smbi = true;
   1744 		} else
   1745 			force_clear_smbi = false;
   1746 		break;
   1747 	case WM_T_82573:
   1748 	case WM_T_82574:
   1749 	case WM_T_82583:
   1750 		force_clear_smbi = true;
   1751 		break;
   1752 	default:
   1753 		force_clear_smbi = false;
   1754 		break;
   1755 	}
   1756 	if (force_clear_smbi) {
   1757 		reg = CSR_READ(sc, WMREG_SWSM);
   1758 		if ((reg & SWSM_SMBI) != 0)
   1759 			aprint_error_dev(sc->sc_dev,
   1760 			    "Please update the Bootagent\n");
   1761 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1762 	}
   1763 
   1764 	/*
   1765 	 * Defer printing the EEPROM type until after verifying the checksum
   1766 	 * This allows the EEPROM type to be printed correctly in the case
   1767 	 * that no EEPROM is attached.
   1768 	 */
   1769 	/*
   1770 	 * Validate the EEPROM checksum. If the checksum fails, flag
   1771 	 * this for later, so we can fail future reads from the EEPROM.
   1772 	 */
   1773 	if (wm_nvm_validate_checksum(sc)) {
   1774 		/*
   1775 		 * Read twice again because some PCI-e parts fail the
   1776 		 * first check due to the link being in sleep state.
   1777 		 */
   1778 		if (wm_nvm_validate_checksum(sc))
   1779 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   1780 	}
   1781 
   1782 	/* Set device properties (macflags) */
   1783 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   1784 
   1785 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1786 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
   1787 	else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
   1788 		aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
   1789 	} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   1790 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
   1791 	} else {
   1792 		if (sc->sc_flags & WM_F_EEPROM_SPI)
   1793 			eetype = "SPI";
   1794 		else
   1795 			eetype = "MicroWire";
   1796 		aprint_verbose_dev(sc->sc_dev,
   1797 		    "%u word (%d address bits) %s EEPROM\n",
   1798 		    1U << sc->sc_ee_addrbits,
   1799 		    sc->sc_ee_addrbits, eetype);
   1800 	}
   1801 
   1802 	switch (sc->sc_type) {
   1803 	case WM_T_82571:
   1804 	case WM_T_82572:
   1805 	case WM_T_82573:
   1806 	case WM_T_82574:
   1807 	case WM_T_82583:
   1808 	case WM_T_80003:
   1809 	case WM_T_ICH8:
   1810 	case WM_T_ICH9:
   1811 	case WM_T_ICH10:
   1812 	case WM_T_PCH:
   1813 	case WM_T_PCH2:
   1814 	case WM_T_PCH_LPT:
   1815 		if (wm_check_mng_mode(sc) != 0)
   1816 			wm_get_hw_control(sc);
   1817 		break;
   1818 	default:
   1819 		break;
   1820 	}
   1821 	wm_get_wakeup(sc);
   1822 	/*
   1823 	 * Read the Ethernet address from the EEPROM, if not first found
   1824 	 * in device properties.
   1825 	 */
   1826 	ea = prop_dictionary_get(dict, "mac-address");
   1827 	if (ea != NULL) {
   1828 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   1829 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   1830 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   1831 	} else {
   1832 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   1833 			aprint_error_dev(sc->sc_dev,
   1834 			    "unable to read Ethernet address\n");
   1835 			return;
   1836 		}
   1837 	}
   1838 
   1839 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   1840 	    ether_sprintf(enaddr));
   1841 
   1842 	/*
   1843 	 * Read the config info from the EEPROM, and set up various
   1844 	 * bits in the control registers based on their contents.
   1845 	 */
   1846 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   1847 	if (pn != NULL) {
   1848 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1849 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   1850 	} else {
   1851 		if (wm_nvm_read(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
   1852 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   1853 			return;
   1854 		}
   1855 	}
   1856 
   1857 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   1858 	if (pn != NULL) {
   1859 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1860 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   1861 	} else {
   1862 		if (wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
   1863 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   1864 			return;
   1865 		}
   1866 	}
   1867 
   1868 	/* check for WM_F_WOL */
   1869 	switch (sc->sc_type) {
   1870 	case WM_T_82542_2_0:
   1871 	case WM_T_82542_2_1:
   1872 	case WM_T_82543:
   1873 		/* dummy? */
   1874 		eeprom_data = 0;
   1875 		apme_mask = EEPROM_CFG3_APME;
   1876 		break;
   1877 	case WM_T_82544:
   1878 		apme_mask = EEPROM_CFG2_82544_APM_EN;
   1879 		eeprom_data = cfg2;
   1880 		break;
   1881 	case WM_T_82546:
   1882 	case WM_T_82546_3:
   1883 	case WM_T_82571:
   1884 	case WM_T_82572:
   1885 	case WM_T_82573:
   1886 	case WM_T_82574:
   1887 	case WM_T_82583:
   1888 	case WM_T_80003:
   1889 	default:
   1890 		apme_mask = EEPROM_CFG3_APME;
   1891 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
   1892 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
   1893 		break;
   1894 	case WM_T_82575:
   1895 	case WM_T_82576:
   1896 	case WM_T_82580:
   1897 	case WM_T_82580ER:
   1898 	case WM_T_I350:
   1899 	case WM_T_I354: /* XXX ok? */
   1900 	case WM_T_ICH8:
   1901 	case WM_T_ICH9:
   1902 	case WM_T_ICH10:
   1903 	case WM_T_PCH:
   1904 	case WM_T_PCH2:
   1905 	case WM_T_PCH_LPT:
   1906 		/* XXX The funcid should be checked on some devices */
   1907 		apme_mask = WUC_APME;
   1908 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   1909 		break;
   1910 	}
   1911 
   1912 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   1913 	if ((eeprom_data & apme_mask) != 0)
   1914 		sc->sc_flags |= WM_F_WOL;
   1915 #ifdef WM_DEBUG
   1916 	if ((sc->sc_flags & WM_F_WOL) != 0)
   1917 		printf("WOL\n");
   1918 #endif
   1919 
   1920 	/*
   1921 	 * XXX need special handling for some multiple port cards
   1922 	 * to disable a paticular port.
   1923 	 */
   1924 
   1925 	if (sc->sc_type >= WM_T_82544) {
   1926 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   1927 		if (pn != NULL) {
   1928 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1929 			swdpin = (uint16_t) prop_number_integer_value(pn);
   1930 		} else {
   1931 			if (wm_nvm_read(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
   1932 				aprint_error_dev(sc->sc_dev,
   1933 				    "unable to read SWDPIN\n");
   1934 				return;
   1935 			}
   1936 		}
   1937 	}
   1938 
   1939 	if (cfg1 & EEPROM_CFG1_ILOS)
   1940 		sc->sc_ctrl |= CTRL_ILOS;
   1941 	if (sc->sc_type >= WM_T_82544) {
   1942 		sc->sc_ctrl |=
   1943 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   1944 		    CTRL_SWDPIO_SHIFT;
   1945 		sc->sc_ctrl |=
   1946 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   1947 		    CTRL_SWDPINS_SHIFT;
   1948 	} else {
   1949 		sc->sc_ctrl |=
   1950 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   1951 		    CTRL_SWDPIO_SHIFT;
   1952 	}
   1953 
   1954 #if 0
   1955 	if (sc->sc_type >= WM_T_82544) {
   1956 		if (cfg1 & EEPROM_CFG1_IPS0)
   1957 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   1958 		if (cfg1 & EEPROM_CFG1_IPS1)
   1959 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   1960 		sc->sc_ctrl_ext |=
   1961 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   1962 		    CTRL_EXT_SWDPIO_SHIFT;
   1963 		sc->sc_ctrl_ext |=
   1964 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   1965 		    CTRL_EXT_SWDPINS_SHIFT;
   1966 	} else {
   1967 		sc->sc_ctrl_ext |=
   1968 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   1969 		    CTRL_EXT_SWDPIO_SHIFT;
   1970 	}
   1971 #endif
   1972 
   1973 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   1974 #if 0
   1975 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   1976 #endif
   1977 
   1978 	/*
   1979 	 * Set up some register offsets that are different between
   1980 	 * the i82542 and the i82543 and later chips.
   1981 	 */
   1982 	if (sc->sc_type < WM_T_82543) {
   1983 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   1984 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   1985 	} else {
   1986 		sc->sc_rdt_reg = WMREG_RDT;
   1987 		sc->sc_tdt_reg = WMREG_TDT;
   1988 	}
   1989 
   1990 	if (sc->sc_type == WM_T_PCH) {
   1991 		uint16_t val;
   1992 
   1993 		/* Save the NVM K1 bit setting */
   1994 		wm_nvm_read(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
   1995 
   1996 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
   1997 			sc->sc_nvm_k1_enabled = 1;
   1998 		else
   1999 			sc->sc_nvm_k1_enabled = 0;
   2000 	}
   2001 
   2002 	/*
   2003 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2004 	 * media structures accordingly.
   2005 	 */
   2006 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2007 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2008 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2009 	    || sc->sc_type == WM_T_82573
   2010 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2011 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2012 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2013 	} else if (sc->sc_type < WM_T_82543 ||
   2014 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2015 		if (wmp->wmp_flags & WMP_F_1000T)
   2016 			aprint_error_dev(sc->sc_dev,
   2017 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2018 		wm_tbi_mediainit(sc);
   2019 	} else {
   2020 		switch (sc->sc_type) {
   2021 		case WM_T_82575:
   2022 		case WM_T_82576:
   2023 		case WM_T_82580:
   2024 		case WM_T_82580ER:
   2025 		case WM_T_I350:
   2026 		case WM_T_I354:
   2027 		case WM_T_I210:
   2028 		case WM_T_I211:
   2029 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2030 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
   2031 			case CTRL_EXT_LINK_MODE_1000KX:
   2032 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2033 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   2034 				    reg | CTRL_EXT_I2C_ENA);
   2035 				panic("not supported yet\n");
   2036 				break;
   2037 			case CTRL_EXT_LINK_MODE_SGMII:
   2038 				if (wm_sgmii_uses_mdio(sc)) {
   2039 					aprint_verbose_dev(sc->sc_dev,
   2040 					    "SGMII(MDIO)\n");
   2041 					sc->sc_flags |= WM_F_SGMII;
   2042 					wm_gmii_mediainit(sc,
   2043 					    wmp->wmp_product);
   2044 					break;
   2045 				}
   2046 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2047 				/*FALLTHROUGH*/
   2048 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2049 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2050 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   2051 				    reg | CTRL_EXT_I2C_ENA);
   2052 				panic("not supported yet\n");
   2053 				break;
   2054 			case CTRL_EXT_LINK_MODE_GMII:
   2055 			default:
   2056 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   2057 				    reg & ~CTRL_EXT_I2C_ENA);
   2058 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2059 				break;
   2060 			}
   2061 			break;
   2062 		default:
   2063 			if (wmp->wmp_flags & WMP_F_1000X)
   2064 				aprint_error_dev(sc->sc_dev,
   2065 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2066 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2067 		}
   2068 	}
   2069 
   2070 	ifp = &sc->sc_ethercom.ec_if;
   2071 	xname = device_xname(sc->sc_dev);
   2072 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2073 	ifp->if_softc = sc;
   2074 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2075 	ifp->if_ioctl = wm_ioctl;
   2076 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2077 		ifp->if_start = wm_nq_start;
   2078 	else
   2079 		ifp->if_start = wm_start;
   2080 	ifp->if_watchdog = wm_watchdog;
   2081 	ifp->if_init = wm_init;
   2082 	ifp->if_stop = wm_stop;
   2083 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2084 	IFQ_SET_READY(&ifp->if_snd);
   2085 
   2086 	/* Check for jumbo frame */
   2087 	switch (sc->sc_type) {
   2088 	case WM_T_82573:
   2089 		/* XXX limited to 9234 if ASPM is disabled */
   2090 		wm_nvm_read(sc, EEPROM_INIT_3GIO_3, 1, &io3);
   2091 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
   2092 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2093 		break;
   2094 	case WM_T_82571:
   2095 	case WM_T_82572:
   2096 	case WM_T_82574:
   2097 	case WM_T_82575:
   2098 	case WM_T_82576:
   2099 	case WM_T_82580:
   2100 	case WM_T_82580ER:
   2101 	case WM_T_I350:
   2102 	case WM_T_I354: /* XXXX ok? */
   2103 	case WM_T_I210:
   2104 	case WM_T_I211:
   2105 	case WM_T_80003:
   2106 	case WM_T_ICH9:
   2107 	case WM_T_ICH10:
   2108 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2109 	case WM_T_PCH_LPT:
   2110 		/* XXX limited to 9234 */
   2111 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2112 		break;
   2113 	case WM_T_PCH:
   2114 		/* XXX limited to 4096 */
   2115 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2116 		break;
   2117 	case WM_T_82542_2_0:
   2118 	case WM_T_82542_2_1:
   2119 	case WM_T_82583:
   2120 	case WM_T_ICH8:
   2121 		/* No support for jumbo frame */
   2122 		break;
   2123 	default:
   2124 		/* ETHER_MAX_LEN_JUMBO */
   2125 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2126 		break;
   2127 	}
   2128 
   2129 	/* If we're a i82543 or greater, we can support VLANs. */
   2130 	if (sc->sc_type >= WM_T_82543)
   2131 		sc->sc_ethercom.ec_capabilities |=
   2132 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2133 
   2134 	/*
   2135 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2136 	 * on i82543 and later.
   2137 	 */
   2138 	if (sc->sc_type >= WM_T_82543) {
   2139 		ifp->if_capabilities |=
   2140 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2141 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2142 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2143 		    IFCAP_CSUM_TCPv6_Tx |
   2144 		    IFCAP_CSUM_UDPv6_Tx;
   2145 	}
   2146 
   2147 	/*
   2148 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2149 	 *
   2150 	 *	82541GI (8086:1076) ... no
   2151 	 *	82572EI (8086:10b9) ... yes
   2152 	 */
   2153 	if (sc->sc_type >= WM_T_82571) {
   2154 		ifp->if_capabilities |=
   2155 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2156 	}
   2157 
   2158 	/*
   2159 	 * If we're a i82544 or greater (except i82547), we can do
   2160 	 * TCP segmentation offload.
   2161 	 */
   2162 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2163 		ifp->if_capabilities |= IFCAP_TSOv4;
   2164 	}
   2165 
   2166 	if (sc->sc_type >= WM_T_82571) {
   2167 		ifp->if_capabilities |= IFCAP_TSOv6;
   2168 	}
   2169 
   2170 #ifdef WM_MPSAFE
   2171 	sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2172 	sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2173 #else
   2174 	sc->sc_tx_lock = NULL;
   2175 	sc->sc_rx_lock = NULL;
   2176 #endif
   2177 
   2178 	/* Attach the interface. */
   2179 	if_attach(ifp);
   2180 	ether_ifattach(ifp, enaddr);
   2181 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2182 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
   2183 
   2184 #ifdef WM_EVENT_COUNTERS
   2185 	/* Attach event counters. */
   2186 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2187 	    NULL, xname, "txsstall");
   2188 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2189 	    NULL, xname, "txdstall");
   2190 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2191 	    NULL, xname, "txfifo_stall");
   2192 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2193 	    NULL, xname, "txdw");
   2194 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2195 	    NULL, xname, "txqe");
   2196 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2197 	    NULL, xname, "rxintr");
   2198 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2199 	    NULL, xname, "linkintr");
   2200 
   2201 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2202 	    NULL, xname, "rxipsum");
   2203 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2204 	    NULL, xname, "rxtusum");
   2205 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2206 	    NULL, xname, "txipsum");
   2207 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2208 	    NULL, xname, "txtusum");
   2209 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2210 	    NULL, xname, "txtusum6");
   2211 
   2212 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2213 	    NULL, xname, "txtso");
   2214 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2215 	    NULL, xname, "txtso6");
   2216 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2217 	    NULL, xname, "txtsopain");
   2218 
   2219 	for (i = 0; i < WM_NTXSEGS; i++) {
   2220 		snprintf(wm_txseg_evcnt_names[i],
   2221 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2222 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2223 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2224 	}
   2225 
   2226 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2227 	    NULL, xname, "txdrop");
   2228 
   2229 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2230 	    NULL, xname, "tu");
   2231 
   2232 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2233 	    NULL, xname, "tx_xoff");
   2234 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2235 	    NULL, xname, "tx_xon");
   2236 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2237 	    NULL, xname, "rx_xoff");
   2238 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2239 	    NULL, xname, "rx_xon");
   2240 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2241 	    NULL, xname, "rx_macctl");
   2242 #endif /* WM_EVENT_COUNTERS */
   2243 
   2244 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2245 		pmf_class_network_register(self, ifp);
   2246 	else
   2247 		aprint_error_dev(self, "couldn't establish power handler\n");
   2248 
   2249 	return;
   2250 
   2251 	/*
   2252 	 * Free any resources we've allocated during the failed attach
   2253 	 * attempt.  Do this in reverse order and fall through.
   2254 	 */
   2255  fail_5:
   2256 	for (i = 0; i < WM_NRXDESC; i++) {
   2257 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2258 			bus_dmamap_destroy(sc->sc_dmat,
   2259 			    sc->sc_rxsoft[i].rxs_dmamap);
   2260 	}
   2261  fail_4:
   2262 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2263 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2264 			bus_dmamap_destroy(sc->sc_dmat,
   2265 			    sc->sc_txsoft[i].txs_dmamap);
   2266 	}
   2267 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2268  fail_3:
   2269 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2270  fail_2:
   2271 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2272 	    sc->sc_cd_size);
   2273  fail_1:
   2274 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2275  fail_0:
   2276 	return;
   2277 }
   2278 
   2279 /* The detach function (ca_detach) */
   2280 static int
   2281 wm_detach(device_t self, int flags __unused)
   2282 {
   2283 	struct wm_softc *sc = device_private(self);
   2284 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2285 	int i;
   2286 #ifndef WM_MPSAFE
   2287 	int s;
   2288 
   2289 	s = splnet();
   2290 #endif
   2291 	/* Stop the interface. Callouts are stopped in it. */
   2292 	wm_stop(ifp, 1);
   2293 
   2294 #ifndef WM_MPSAFE
   2295 	splx(s);
   2296 #endif
   2297 
   2298 	pmf_device_deregister(self);
   2299 
   2300 	/* Tell the firmware about the release */
   2301 	WM_BOTH_LOCK(sc);
   2302 	wm_release_manageability(sc);
   2303 	wm_release_hw_control(sc);
   2304 	WM_BOTH_UNLOCK(sc);
   2305 
   2306 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2307 
   2308 	/* Delete all remaining media. */
   2309 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2310 
   2311 	ether_ifdetach(ifp);
   2312 	if_detach(ifp);
   2313 
   2314 
   2315 	/* Unload RX dmamaps and free mbufs */
   2316 	WM_RX_LOCK(sc);
   2317 	wm_rxdrain(sc);
   2318 	WM_RX_UNLOCK(sc);
   2319 	/* Must unlock here */
   2320 
   2321 	/* Free dmamap. It's the same as the end of the wm_attach() function */
   2322 	for (i = 0; i < WM_NRXDESC; i++) {
   2323 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2324 			bus_dmamap_destroy(sc->sc_dmat,
   2325 			    sc->sc_rxsoft[i].rxs_dmamap);
   2326 	}
   2327 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2328 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2329 			bus_dmamap_destroy(sc->sc_dmat,
   2330 			    sc->sc_txsoft[i].txs_dmamap);
   2331 	}
   2332 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2333 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2334 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2335 	    sc->sc_cd_size);
   2336 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2337 
   2338 	/* Disestablish the interrupt handler */
   2339 	if (sc->sc_ih != NULL) {
   2340 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   2341 		sc->sc_ih = NULL;
   2342 	}
   2343 
   2344 	/* Unmap the registers */
   2345 	if (sc->sc_ss) {
   2346 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2347 		sc->sc_ss = 0;
   2348 	}
   2349 
   2350 	if (sc->sc_ios) {
   2351 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2352 		sc->sc_ios = 0;
   2353 	}
   2354 
   2355 	if (sc->sc_tx_lock)
   2356 		mutex_obj_free(sc->sc_tx_lock);
   2357 	if (sc->sc_rx_lock)
   2358 		mutex_obj_free(sc->sc_rx_lock);
   2359 
   2360 	return 0;
   2361 }
   2362 
   2363 static bool
   2364 wm_suspend(device_t self, const pmf_qual_t *qual)
   2365 {
   2366 	struct wm_softc *sc = device_private(self);
   2367 
   2368 	wm_release_manageability(sc);
   2369 	wm_release_hw_control(sc);
   2370 #ifdef WM_WOL
   2371 	wm_enable_wakeup(sc);
   2372 #endif
   2373 
   2374 	return true;
   2375 }
   2376 
   2377 static bool
   2378 wm_resume(device_t self, const pmf_qual_t *qual)
   2379 {
   2380 	struct wm_softc *sc = device_private(self);
   2381 
   2382 	wm_init_manageability(sc);
   2383 
   2384 	return true;
   2385 }
   2386 
   2387 /*
   2388  * wm_watchdog:		[ifnet interface function]
   2389  *
   2390  *	Watchdog timer handler.
   2391  */
   2392 static void
   2393 wm_watchdog(struct ifnet *ifp)
   2394 {
   2395 	struct wm_softc *sc = ifp->if_softc;
   2396 
   2397 	/*
   2398 	 * Since we're using delayed interrupts, sweep up
   2399 	 * before we report an error.
   2400 	 */
   2401 	WM_TX_LOCK(sc);
   2402 	wm_txintr(sc);
   2403 	WM_TX_UNLOCK(sc);
   2404 
   2405 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
   2406 #ifdef WM_DEBUG
   2407 		int i, j;
   2408 		struct wm_txsoft *txs;
   2409 #endif
   2410 		log(LOG_ERR,
   2411 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2412 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
   2413 		    sc->sc_txnext);
   2414 		ifp->if_oerrors++;
   2415 #ifdef WM_DEBUG
   2416 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
   2417 		    i = WM_NEXTTXS(sc, i)) {
   2418 		    txs = &sc->sc_txsoft[i];
   2419 		    printf("txs %d tx %d -> %d\n",
   2420 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2421 		    for (j = txs->txs_firstdesc; ;
   2422 			j = WM_NEXTTX(sc, j)) {
   2423 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2424 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
   2425 			printf("\t %#08x%08x\n",
   2426 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
   2427 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
   2428 			if (j == txs->txs_lastdesc)
   2429 				break;
   2430 			}
   2431 		}
   2432 #endif
   2433 		/* Reset the interface. */
   2434 		(void) wm_init(ifp);
   2435 	}
   2436 
   2437 	/* Try to get more packets going. */
   2438 	ifp->if_start(ifp);
   2439 }
   2440 
   2441 /*
   2442  * wm_tick:
   2443  *
   2444  *	One second timer, used to check link status, sweep up
   2445  *	completed transmit jobs, etc.
   2446  */
   2447 static void
   2448 wm_tick(void *arg)
   2449 {
   2450 	struct wm_softc *sc = arg;
   2451 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2452 #ifndef WM_MPSAFE
   2453 	int s;
   2454 
   2455 	s = splnet();
   2456 #endif
   2457 
   2458 	WM_TX_LOCK(sc);
   2459 
   2460 	if (sc->sc_stopping)
   2461 		goto out;
   2462 
   2463 	if (sc->sc_type >= WM_T_82542_2_1) {
   2464 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2465 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2466 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2467 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2468 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2469 	}
   2470 
   2471 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2472 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2473 	    + CSR_READ(sc, WMREG_CRCERRS)
   2474 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2475 	    + CSR_READ(sc, WMREG_SYMERRC)
   2476 	    + CSR_READ(sc, WMREG_RXERRC)
   2477 	    + CSR_READ(sc, WMREG_SEC)
   2478 	    + CSR_READ(sc, WMREG_CEXTERR)
   2479 	    + CSR_READ(sc, WMREG_RLEC);
   2480 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2481 
   2482 	if (sc->sc_flags & WM_F_HAS_MII)
   2483 		mii_tick(&sc->sc_mii);
   2484 	else
   2485 		wm_tbi_check_link(sc);
   2486 
   2487 out:
   2488 	WM_TX_UNLOCK(sc);
   2489 #ifndef WM_MPSAFE
   2490 	splx(s);
   2491 #endif
   2492 
   2493 	if (!sc->sc_stopping)
   2494 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2495 }
   2496 
   2497 static int
   2498 wm_ifflags_cb(struct ethercom *ec)
   2499 {
   2500 	struct ifnet *ifp = &ec->ec_if;
   2501 	struct wm_softc *sc = ifp->if_softc;
   2502 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2503 	int rc = 0;
   2504 
   2505 	WM_BOTH_LOCK(sc);
   2506 
   2507 	if (change != 0)
   2508 		sc->sc_if_flags = ifp->if_flags;
   2509 
   2510 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   2511 		rc = ENETRESET;
   2512 		goto out;
   2513 	}
   2514 
   2515 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2516 		wm_set_filter(sc);
   2517 
   2518 	wm_set_vlan(sc);
   2519 
   2520 out:
   2521 	WM_BOTH_UNLOCK(sc);
   2522 
   2523 	return rc;
   2524 }
   2525 
   2526 /*
   2527  * wm_ioctl:		[ifnet interface function]
   2528  *
   2529  *	Handle control requests from the operator.
   2530  */
   2531 static int
   2532 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2533 {
   2534 	struct wm_softc *sc = ifp->if_softc;
   2535 	struct ifreq *ifr = (struct ifreq *) data;
   2536 	struct ifaddr *ifa = (struct ifaddr *)data;
   2537 	struct sockaddr_dl *sdl;
   2538 	int s, error;
   2539 
   2540 #ifndef WM_MPSAFE
   2541 	s = splnet();
   2542 #endif
   2543 	WM_BOTH_LOCK(sc);
   2544 
   2545 	switch (cmd) {
   2546 	case SIOCSIFMEDIA:
   2547 	case SIOCGIFMEDIA:
   2548 		/* Flow control requires full-duplex mode. */
   2549 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2550 		    (ifr->ifr_media & IFM_FDX) == 0)
   2551 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2552 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2553 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2554 				/* We can do both TXPAUSE and RXPAUSE. */
   2555 				ifr->ifr_media |=
   2556 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2557 			}
   2558 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2559 		}
   2560 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2561 		break;
   2562 	case SIOCINITIFADDR:
   2563 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2564 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2565 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2566 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2567 			/* unicast address is first multicast entry */
   2568 			wm_set_filter(sc);
   2569 			error = 0;
   2570 			break;
   2571 		}
   2572 		/*FALLTHROUGH*/
   2573 	default:
   2574 		WM_BOTH_UNLOCK(sc);
   2575 #ifdef WM_MPSAFE
   2576 		s = splnet();
   2577 #endif
   2578 		/* It may call wm_start, so unlock here */
   2579 		error = ether_ioctl(ifp, cmd, data);
   2580 #ifdef WM_MPSAFE
   2581 		splx(s);
   2582 #endif
   2583 		WM_BOTH_LOCK(sc);
   2584 
   2585 		if (error != ENETRESET)
   2586 			break;
   2587 
   2588 		error = 0;
   2589 
   2590 		if (cmd == SIOCSIFCAP) {
   2591 			WM_BOTH_UNLOCK(sc);
   2592 			error = (*ifp->if_init)(ifp);
   2593 			WM_BOTH_LOCK(sc);
   2594 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2595 			;
   2596 		else if (ifp->if_flags & IFF_RUNNING) {
   2597 			/*
   2598 			 * Multicast list has changed; set the hardware filter
   2599 			 * accordingly.
   2600 			 */
   2601 			wm_set_filter(sc);
   2602 		}
   2603 		break;
   2604 	}
   2605 
   2606 	WM_BOTH_UNLOCK(sc);
   2607 
   2608 	/* Try to get more packets going. */
   2609 	ifp->if_start(ifp);
   2610 
   2611 #ifndef WM_MPSAFE
   2612 	splx(s);
   2613 #endif
   2614 	return error;
   2615 }
   2616 
   2617 /* MAC address related */
   2618 
   2619 static int
   2620 wm_check_alt_mac_addr(struct wm_softc *sc)
   2621 {
   2622 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2623 	uint16_t offset = EEPROM_OFF_MACADDR;
   2624 
   2625 	/* Try to read alternative MAC address pointer */
   2626 	if (wm_nvm_read(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2627 		return -1;
   2628 
   2629 	/* Check pointer */
   2630 	if (offset == 0xffff)
   2631 		return -1;
   2632 
   2633 	/*
   2634 	 * Check whether alternative MAC address is valid or not.
   2635 	 * Some cards have non 0xffff pointer but those don't use
   2636 	 * alternative MAC address in reality.
   2637 	 *
   2638 	 * Check whether the broadcast bit is set or not.
   2639 	 */
   2640 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2641 		if (((myea[0] & 0xff) & 0x01) == 0)
   2642 			return 0; /* found! */
   2643 
   2644 	/* not found */
   2645 	return -1;
   2646 }
   2647 
   2648 static int
   2649 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2650 {
   2651 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2652 	uint16_t offset = EEPROM_OFF_MACADDR;
   2653 	int do_invert = 0;
   2654 
   2655 	switch (sc->sc_type) {
   2656 	case WM_T_82580:
   2657 	case WM_T_82580ER:
   2658 	case WM_T_I350:
   2659 	case WM_T_I354:
   2660 		switch (sc->sc_funcid) {
   2661 		case 0:
   2662 			/* default value (== EEPROM_OFF_MACADDR) */
   2663 			break;
   2664 		case 1:
   2665 			offset = EEPROM_OFF_LAN1;
   2666 			break;
   2667 		case 2:
   2668 			offset = EEPROM_OFF_LAN2;
   2669 			break;
   2670 		case 3:
   2671 			offset = EEPROM_OFF_LAN3;
   2672 			break;
   2673 		default:
   2674 			goto bad;
   2675 			/* NOTREACHED */
   2676 			break;
   2677 		}
   2678 		break;
   2679 	case WM_T_82571:
   2680 	case WM_T_82575:
   2681 	case WM_T_82576:
   2682 	case WM_T_80003:
   2683 	case WM_T_I210:
   2684 	case WM_T_I211:
   2685 		if (wm_check_alt_mac_addr(sc) != 0) {
   2686 			/* reset the offset to LAN0 */
   2687 			offset = EEPROM_OFF_MACADDR;
   2688 			if ((sc->sc_funcid & 0x01) == 1)
   2689 				do_invert = 1;
   2690 			goto do_read;
   2691 		}
   2692 		switch (sc->sc_funcid) {
   2693 		case 0:
   2694 			/*
   2695 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
   2696 			 * itself.
   2697 			 */
   2698 			break;
   2699 		case 1:
   2700 			offset += EEPROM_OFF_MACADDR_LAN1;
   2701 			break;
   2702 		case 2:
   2703 			offset += EEPROM_OFF_MACADDR_LAN2;
   2704 			break;
   2705 		case 3:
   2706 			offset += EEPROM_OFF_MACADDR_LAN3;
   2707 			break;
   2708 		default:
   2709 			goto bad;
   2710 			/* NOTREACHED */
   2711 			break;
   2712 		}
   2713 		break;
   2714 	default:
   2715 		if ((sc->sc_funcid & 0x01) == 1)
   2716 			do_invert = 1;
   2717 		break;
   2718 	}
   2719 
   2720  do_read:
   2721 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2722 		myea) != 0) {
   2723 		goto bad;
   2724 	}
   2725 
   2726 	enaddr[0] = myea[0] & 0xff;
   2727 	enaddr[1] = myea[0] >> 8;
   2728 	enaddr[2] = myea[1] & 0xff;
   2729 	enaddr[3] = myea[1] >> 8;
   2730 	enaddr[4] = myea[2] & 0xff;
   2731 	enaddr[5] = myea[2] >> 8;
   2732 
   2733 	/*
   2734 	 * Toggle the LSB of the MAC address on the second port
   2735 	 * of some dual port cards.
   2736 	 */
   2737 	if (do_invert != 0)
   2738 		enaddr[5] ^= 1;
   2739 
   2740 	return 0;
   2741 
   2742  bad:
   2743 	return -1;
   2744 }
   2745 
   2746 /*
   2747  * wm_set_ral:
   2748  *
   2749  *	Set an entery in the receive address list.
   2750  */
   2751 static void
   2752 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2753 {
   2754 	uint32_t ral_lo, ral_hi;
   2755 
   2756 	if (enaddr != NULL) {
   2757 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2758 		    (enaddr[3] << 24);
   2759 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2760 		ral_hi |= RAL_AV;
   2761 	} else {
   2762 		ral_lo = 0;
   2763 		ral_hi = 0;
   2764 	}
   2765 
   2766 	if (sc->sc_type >= WM_T_82544) {
   2767 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2768 		    ral_lo);
   2769 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2770 		    ral_hi);
   2771 	} else {
   2772 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2773 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2774 	}
   2775 }
   2776 
   2777 /*
   2778  * wm_mchash:
   2779  *
   2780  *	Compute the hash of the multicast address for the 4096-bit
   2781  *	multicast filter.
   2782  */
   2783 static uint32_t
   2784 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2785 {
   2786 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2787 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2788 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   2789 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   2790 	uint32_t hash;
   2791 
   2792 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2793 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2794 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   2795 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   2796 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   2797 		return (hash & 0x3ff);
   2798 	}
   2799 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   2800 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   2801 
   2802 	return (hash & 0xfff);
   2803 }
   2804 
   2805 /*
   2806  * wm_set_filter:
   2807  *
   2808  *	Set up the receive filter.
   2809  */
   2810 static void
   2811 wm_set_filter(struct wm_softc *sc)
   2812 {
   2813 	struct ethercom *ec = &sc->sc_ethercom;
   2814 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2815 	struct ether_multi *enm;
   2816 	struct ether_multistep step;
   2817 	bus_addr_t mta_reg;
   2818 	uint32_t hash, reg, bit;
   2819 	int i, size;
   2820 
   2821 	if (sc->sc_type >= WM_T_82544)
   2822 		mta_reg = WMREG_CORDOVA_MTA;
   2823 	else
   2824 		mta_reg = WMREG_MTA;
   2825 
   2826 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   2827 
   2828 	if (ifp->if_flags & IFF_BROADCAST)
   2829 		sc->sc_rctl |= RCTL_BAM;
   2830 	if (ifp->if_flags & IFF_PROMISC) {
   2831 		sc->sc_rctl |= RCTL_UPE;
   2832 		goto allmulti;
   2833 	}
   2834 
   2835 	/*
   2836 	 * Set the station address in the first RAL slot, and
   2837 	 * clear the remaining slots.
   2838 	 */
   2839 	if (sc->sc_type == WM_T_ICH8)
   2840 		size = WM_RAL_TABSIZE_ICH8 -1;
   2841 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   2842 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   2843 	    || (sc->sc_type == WM_T_PCH_LPT))
   2844 		size = WM_RAL_TABSIZE_ICH8;
   2845 	else if (sc->sc_type == WM_T_82575)
   2846 		size = WM_RAL_TABSIZE_82575;
   2847 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   2848 		size = WM_RAL_TABSIZE_82576;
   2849 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2850 		size = WM_RAL_TABSIZE_I350;
   2851 	else
   2852 		size = WM_RAL_TABSIZE;
   2853 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   2854 	for (i = 1; i < size; i++)
   2855 		wm_set_ral(sc, NULL, i);
   2856 
   2857 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2858 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2859 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   2860 		size = WM_ICH8_MC_TABSIZE;
   2861 	else
   2862 		size = WM_MC_TABSIZE;
   2863 	/* Clear out the multicast table. */
   2864 	for (i = 0; i < size; i++)
   2865 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   2866 
   2867 	ETHER_FIRST_MULTI(step, ec, enm);
   2868 	while (enm != NULL) {
   2869 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2870 			/*
   2871 			 * We must listen to a range of multicast addresses.
   2872 			 * For now, just accept all multicasts, rather than
   2873 			 * trying to set only those filter bits needed to match
   2874 			 * the range.  (At this time, the only use of address
   2875 			 * ranges is for IP multicast routing, for which the
   2876 			 * range is big enough to require all bits set.)
   2877 			 */
   2878 			goto allmulti;
   2879 		}
   2880 
   2881 		hash = wm_mchash(sc, enm->enm_addrlo);
   2882 
   2883 		reg = (hash >> 5);
   2884 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2885 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2886 		    || (sc->sc_type == WM_T_PCH2)
   2887 		    || (sc->sc_type == WM_T_PCH_LPT))
   2888 			reg &= 0x1f;
   2889 		else
   2890 			reg &= 0x7f;
   2891 		bit = hash & 0x1f;
   2892 
   2893 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   2894 		hash |= 1U << bit;
   2895 
   2896 		/* XXX Hardware bug?? */
   2897 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   2898 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   2899 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2900 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   2901 		} else
   2902 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2903 
   2904 		ETHER_NEXT_MULTI(step, enm);
   2905 	}
   2906 
   2907 	ifp->if_flags &= ~IFF_ALLMULTI;
   2908 	goto setit;
   2909 
   2910  allmulti:
   2911 	ifp->if_flags |= IFF_ALLMULTI;
   2912 	sc->sc_rctl |= RCTL_MPE;
   2913 
   2914  setit:
   2915 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   2916 }
   2917 
   2918 /* Reset and init related */
   2919 
   2920 static void
   2921 wm_set_vlan(struct wm_softc *sc)
   2922 {
   2923 	/* Deal with VLAN enables. */
   2924 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   2925 		sc->sc_ctrl |= CTRL_VME;
   2926 	else
   2927 		sc->sc_ctrl &= ~CTRL_VME;
   2928 
   2929 	/* Write the control registers. */
   2930 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2931 }
   2932 
   2933 static void
   2934 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   2935 {
   2936 	uint32_t gcr;
   2937 	pcireg_t ctrl2;
   2938 
   2939 	gcr = CSR_READ(sc, WMREG_GCR);
   2940 
   2941 	/* Only take action if timeout value is defaulted to 0 */
   2942 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   2943 		goto out;
   2944 
   2945 	if ((gcr & GCR_CAP_VER2) == 0) {
   2946 		gcr |= GCR_CMPL_TMOUT_10MS;
   2947 		goto out;
   2948 	}
   2949 
   2950 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   2951 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   2952 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   2953 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   2954 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   2955 
   2956 out:
   2957 	/* Disable completion timeout resend */
   2958 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   2959 
   2960 	CSR_WRITE(sc, WMREG_GCR, gcr);
   2961 }
   2962 
   2963 void
   2964 wm_get_auto_rd_done(struct wm_softc *sc)
   2965 {
   2966 	int i;
   2967 
   2968 	/* wait for eeprom to reload */
   2969 	switch (sc->sc_type) {
   2970 	case WM_T_82571:
   2971 	case WM_T_82572:
   2972 	case WM_T_82573:
   2973 	case WM_T_82574:
   2974 	case WM_T_82583:
   2975 	case WM_T_82575:
   2976 	case WM_T_82576:
   2977 	case WM_T_82580:
   2978 	case WM_T_82580ER:
   2979 	case WM_T_I350:
   2980 	case WM_T_I354:
   2981 	case WM_T_I210:
   2982 	case WM_T_I211:
   2983 	case WM_T_80003:
   2984 	case WM_T_ICH8:
   2985 	case WM_T_ICH9:
   2986 		for (i = 0; i < 10; i++) {
   2987 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   2988 				break;
   2989 			delay(1000);
   2990 		}
   2991 		if (i == 10) {
   2992 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   2993 			    "complete\n", device_xname(sc->sc_dev));
   2994 		}
   2995 		break;
   2996 	default:
   2997 		break;
   2998 	}
   2999 }
   3000 
   3001 void
   3002 wm_lan_init_done(struct wm_softc *sc)
   3003 {
   3004 	uint32_t reg = 0;
   3005 	int i;
   3006 
   3007 	/* wait for eeprom to reload */
   3008 	switch (sc->sc_type) {
   3009 	case WM_T_ICH10:
   3010 	case WM_T_PCH:
   3011 	case WM_T_PCH2:
   3012 	case WM_T_PCH_LPT:
   3013 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3014 			reg = CSR_READ(sc, WMREG_STATUS);
   3015 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3016 				break;
   3017 			delay(100);
   3018 		}
   3019 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3020 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3021 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3022 		}
   3023 		break;
   3024 	default:
   3025 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3026 		    __func__);
   3027 		break;
   3028 	}
   3029 
   3030 	reg &= ~STATUS_LAN_INIT_DONE;
   3031 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3032 }
   3033 
   3034 void
   3035 wm_get_cfg_done(struct wm_softc *sc)
   3036 {
   3037 	int mask;
   3038 	uint32_t reg;
   3039 	int i;
   3040 
   3041 	/* wait for eeprom to reload */
   3042 	switch (sc->sc_type) {
   3043 	case WM_T_82542_2_0:
   3044 	case WM_T_82542_2_1:
   3045 		/* null */
   3046 		break;
   3047 	case WM_T_82543:
   3048 	case WM_T_82544:
   3049 	case WM_T_82540:
   3050 	case WM_T_82545:
   3051 	case WM_T_82545_3:
   3052 	case WM_T_82546:
   3053 	case WM_T_82546_3:
   3054 	case WM_T_82541:
   3055 	case WM_T_82541_2:
   3056 	case WM_T_82547:
   3057 	case WM_T_82547_2:
   3058 	case WM_T_82573:
   3059 	case WM_T_82574:
   3060 	case WM_T_82583:
   3061 		/* generic */
   3062 		delay(10*1000);
   3063 		break;
   3064 	case WM_T_80003:
   3065 	case WM_T_82571:
   3066 	case WM_T_82572:
   3067 	case WM_T_82575:
   3068 	case WM_T_82576:
   3069 	case WM_T_82580:
   3070 	case WM_T_82580ER:
   3071 	case WM_T_I350:
   3072 	case WM_T_I354:
   3073 	case WM_T_I210:
   3074 	case WM_T_I211:
   3075 		if (sc->sc_type == WM_T_82571) {
   3076 			/* Only 82571 shares port 0 */
   3077 			mask = EEMNGCTL_CFGDONE_0;
   3078 		} else
   3079 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3080 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3081 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3082 				break;
   3083 			delay(1000);
   3084 		}
   3085 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3086 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3087 				device_xname(sc->sc_dev), __func__));
   3088 		}
   3089 		break;
   3090 	case WM_T_ICH8:
   3091 	case WM_T_ICH9:
   3092 	case WM_T_ICH10:
   3093 	case WM_T_PCH:
   3094 	case WM_T_PCH2:
   3095 	case WM_T_PCH_LPT:
   3096 		delay(10*1000);
   3097 		if (sc->sc_type >= WM_T_ICH10)
   3098 			wm_lan_init_done(sc);
   3099 		else
   3100 			wm_get_auto_rd_done(sc);
   3101 
   3102 		reg = CSR_READ(sc, WMREG_STATUS);
   3103 		if ((reg & STATUS_PHYRA) != 0)
   3104 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3105 		break;
   3106 	default:
   3107 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3108 		    __func__);
   3109 		break;
   3110 	}
   3111 }
   3112 
   3113 /*
   3114  * wm_reset:
   3115  *
   3116  *	Reset the i82542 chip.
   3117  */
   3118 static void
   3119 wm_reset(struct wm_softc *sc)
   3120 {
   3121 	int phy_reset = 0;
   3122 	int error = 0;
   3123 	uint32_t reg, mask;
   3124 
   3125 	/*
   3126 	 * Allocate on-chip memory according to the MTU size.
   3127 	 * The Packet Buffer Allocation register must be written
   3128 	 * before the chip is reset.
   3129 	 */
   3130 	switch (sc->sc_type) {
   3131 	case WM_T_82547:
   3132 	case WM_T_82547_2:
   3133 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3134 		    PBA_22K : PBA_30K;
   3135 		sc->sc_txfifo_head = 0;
   3136 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3137 		sc->sc_txfifo_size =
   3138 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3139 		sc->sc_txfifo_stall = 0;
   3140 		break;
   3141 	case WM_T_82571:
   3142 	case WM_T_82572:
   3143 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3144 	case WM_T_I350:
   3145 	case WM_T_I354:
   3146 	case WM_T_80003:
   3147 		sc->sc_pba = PBA_32K;
   3148 		break;
   3149 	case WM_T_82580:
   3150 	case WM_T_82580ER:
   3151 		sc->sc_pba = PBA_35K;
   3152 		break;
   3153 	case WM_T_I210:
   3154 	case WM_T_I211:
   3155 		sc->sc_pba = PBA_34K;
   3156 		break;
   3157 	case WM_T_82576:
   3158 		sc->sc_pba = PBA_64K;
   3159 		break;
   3160 	case WM_T_82573:
   3161 		sc->sc_pba = PBA_12K;
   3162 		break;
   3163 	case WM_T_82574:
   3164 	case WM_T_82583:
   3165 		sc->sc_pba = PBA_20K;
   3166 		break;
   3167 	case WM_T_ICH8:
   3168 		sc->sc_pba = PBA_8K;
   3169 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3170 		break;
   3171 	case WM_T_ICH9:
   3172 	case WM_T_ICH10:
   3173 		sc->sc_pba = PBA_10K;
   3174 		break;
   3175 	case WM_T_PCH:
   3176 	case WM_T_PCH2:
   3177 	case WM_T_PCH_LPT:
   3178 		sc->sc_pba = PBA_26K;
   3179 		break;
   3180 	default:
   3181 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3182 		    PBA_40K : PBA_48K;
   3183 		break;
   3184 	}
   3185 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3186 
   3187 	/* Prevent the PCI-E bus from sticking */
   3188 	if (sc->sc_flags & WM_F_PCIE) {
   3189 		int timeout = 800;
   3190 
   3191 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3192 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3193 
   3194 		while (timeout--) {
   3195 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3196 			    == 0)
   3197 				break;
   3198 			delay(100);
   3199 		}
   3200 	}
   3201 
   3202 	/* Set the completion timeout for interface */
   3203 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3204 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   3205 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3206 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3207 		wm_set_pcie_completion_timeout(sc);
   3208 
   3209 	/* Clear interrupt */
   3210 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3211 
   3212 	/* Stop the transmit and receive processes. */
   3213 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3214 	sc->sc_rctl &= ~RCTL_EN;
   3215 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3216 	CSR_WRITE_FLUSH(sc);
   3217 
   3218 	/* XXX set_tbi_sbp_82543() */
   3219 
   3220 	delay(10*1000);
   3221 
   3222 	/* Must acquire the MDIO ownership before MAC reset */
   3223 	switch (sc->sc_type) {
   3224 	case WM_T_82573:
   3225 	case WM_T_82574:
   3226 	case WM_T_82583:
   3227 		error = wm_get_hw_semaphore_82573(sc);
   3228 		break;
   3229 	default:
   3230 		break;
   3231 	}
   3232 
   3233 	/*
   3234 	 * 82541 Errata 29? & 82547 Errata 28?
   3235 	 * See also the description about PHY_RST bit in CTRL register
   3236 	 * in 8254x_GBe_SDM.pdf.
   3237 	 */
   3238 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3239 		CSR_WRITE(sc, WMREG_CTRL,
   3240 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3241 		CSR_WRITE_FLUSH(sc);
   3242 		delay(5000);
   3243 	}
   3244 
   3245 	switch (sc->sc_type) {
   3246 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3247 	case WM_T_82541:
   3248 	case WM_T_82541_2:
   3249 	case WM_T_82547:
   3250 	case WM_T_82547_2:
   3251 		/*
   3252 		 * On some chipsets, a reset through a memory-mapped write
   3253 		 * cycle can cause the chip to reset before completing the
   3254 		 * write cycle.  This causes major headache that can be
   3255 		 * avoided by issuing the reset via indirect register writes
   3256 		 * through I/O space.
   3257 		 *
   3258 		 * So, if we successfully mapped the I/O BAR at attach time,
   3259 		 * use that.  Otherwise, try our luck with a memory-mapped
   3260 		 * reset.
   3261 		 */
   3262 		if (sc->sc_flags & WM_F_IOH_VALID)
   3263 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3264 		else
   3265 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3266 		break;
   3267 	case WM_T_82545_3:
   3268 	case WM_T_82546_3:
   3269 		/* Use the shadow control register on these chips. */
   3270 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3271 		break;
   3272 	case WM_T_80003:
   3273 		mask = swfwphysem[sc->sc_funcid];
   3274 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3275 		wm_get_swfw_semaphore(sc, mask);
   3276 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3277 		wm_put_swfw_semaphore(sc, mask);
   3278 		break;
   3279 	case WM_T_ICH8:
   3280 	case WM_T_ICH9:
   3281 	case WM_T_ICH10:
   3282 	case WM_T_PCH:
   3283 	case WM_T_PCH2:
   3284 	case WM_T_PCH_LPT:
   3285 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3286 		if (wm_check_reset_block(sc) == 0) {
   3287 			/*
   3288 			 * Gate automatic PHY configuration by hardware on
   3289 			 * non-managed 82579
   3290 			 */
   3291 			if ((sc->sc_type == WM_T_PCH2)
   3292 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3293 				!= 0))
   3294 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3295 
   3296 
   3297 			reg |= CTRL_PHY_RESET;
   3298 			phy_reset = 1;
   3299 		}
   3300 		wm_get_swfwhw_semaphore(sc);
   3301 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3302 		/* Don't insert a completion barrier when reset */
   3303 		delay(20*1000);
   3304 		wm_put_swfwhw_semaphore(sc);
   3305 		break;
   3306 	case WM_T_82542_2_0:
   3307 	case WM_T_82542_2_1:
   3308 	case WM_T_82543:
   3309 	case WM_T_82540:
   3310 	case WM_T_82545:
   3311 	case WM_T_82546:
   3312 	case WM_T_82571:
   3313 	case WM_T_82572:
   3314 	case WM_T_82573:
   3315 	case WM_T_82574:
   3316 	case WM_T_82575:
   3317 	case WM_T_82576:
   3318 	case WM_T_82580:
   3319 	case WM_T_82580ER:
   3320 	case WM_T_82583:
   3321 	case WM_T_I350:
   3322 	case WM_T_I354:
   3323 	case WM_T_I210:
   3324 	case WM_T_I211:
   3325 	default:
   3326 		/* Everything else can safely use the documented method. */
   3327 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3328 		break;
   3329 	}
   3330 
   3331 	/* Must release the MDIO ownership after MAC reset */
   3332 	switch (sc->sc_type) {
   3333 	case WM_T_82573:
   3334 	case WM_T_82574:
   3335 	case WM_T_82583:
   3336 		if (error == 0)
   3337 			wm_put_hw_semaphore_82573(sc);
   3338 		break;
   3339 	default:
   3340 		break;
   3341 	}
   3342 
   3343 	if (phy_reset != 0)
   3344 		wm_get_cfg_done(sc);
   3345 
   3346 	/* reload EEPROM */
   3347 	switch (sc->sc_type) {
   3348 	case WM_T_82542_2_0:
   3349 	case WM_T_82542_2_1:
   3350 	case WM_T_82543:
   3351 	case WM_T_82544:
   3352 		delay(10);
   3353 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3354 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3355 		CSR_WRITE_FLUSH(sc);
   3356 		delay(2000);
   3357 		break;
   3358 	case WM_T_82540:
   3359 	case WM_T_82545:
   3360 	case WM_T_82545_3:
   3361 	case WM_T_82546:
   3362 	case WM_T_82546_3:
   3363 		delay(5*1000);
   3364 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3365 		break;
   3366 	case WM_T_82541:
   3367 	case WM_T_82541_2:
   3368 	case WM_T_82547:
   3369 	case WM_T_82547_2:
   3370 		delay(20000);
   3371 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3372 		break;
   3373 	case WM_T_82571:
   3374 	case WM_T_82572:
   3375 	case WM_T_82573:
   3376 	case WM_T_82574:
   3377 	case WM_T_82583:
   3378 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3379 			delay(10);
   3380 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3381 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3382 			CSR_WRITE_FLUSH(sc);
   3383 		}
   3384 		/* check EECD_EE_AUTORD */
   3385 		wm_get_auto_rd_done(sc);
   3386 		/*
   3387 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3388 		 * is set.
   3389 		 */
   3390 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3391 		    || (sc->sc_type == WM_T_82583))
   3392 			delay(25*1000);
   3393 		break;
   3394 	case WM_T_82575:
   3395 	case WM_T_82576:
   3396 	case WM_T_82580:
   3397 	case WM_T_82580ER:
   3398 	case WM_T_I350:
   3399 	case WM_T_I354:
   3400 	case WM_T_I210:
   3401 	case WM_T_I211:
   3402 	case WM_T_80003:
   3403 		/* check EECD_EE_AUTORD */
   3404 		wm_get_auto_rd_done(sc);
   3405 		break;
   3406 	case WM_T_ICH8:
   3407 	case WM_T_ICH9:
   3408 	case WM_T_ICH10:
   3409 	case WM_T_PCH:
   3410 	case WM_T_PCH2:
   3411 	case WM_T_PCH_LPT:
   3412 		break;
   3413 	default:
   3414 		panic("%s: unknown type\n", __func__);
   3415 	}
   3416 
   3417 	/* Check whether EEPROM is present or not */
   3418 	switch (sc->sc_type) {
   3419 	case WM_T_82575:
   3420 	case WM_T_82576:
   3421 #if 0 /* XXX */
   3422 	case WM_T_82580:
   3423 	case WM_T_82580ER:
   3424 #endif
   3425 	case WM_T_I350:
   3426 	case WM_T_I354:
   3427 	case WM_T_ICH8:
   3428 	case WM_T_ICH9:
   3429 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3430 			/* Not found */
   3431 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3432 			if ((sc->sc_type == WM_T_82575)
   3433 			    || (sc->sc_type == WM_T_82576)
   3434 			    || (sc->sc_type == WM_T_82580)
   3435 			    || (sc->sc_type == WM_T_82580ER)
   3436 			    || (sc->sc_type == WM_T_I350)
   3437 			    || (sc->sc_type == WM_T_I354))
   3438 				wm_reset_init_script_82575(sc);
   3439 		}
   3440 		break;
   3441 	default:
   3442 		break;
   3443 	}
   3444 
   3445 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   3446 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3447 		/* clear global device reset status bit */
   3448 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3449 	}
   3450 
   3451 	/* Clear any pending interrupt events. */
   3452 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3453 	reg = CSR_READ(sc, WMREG_ICR);
   3454 
   3455 	/* reload sc_ctrl */
   3456 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3457 
   3458 	if (sc->sc_type == WM_T_I350)
   3459 		wm_set_eee_i350(sc);
   3460 
   3461 	/* dummy read from WUC */
   3462 	if (sc->sc_type == WM_T_PCH)
   3463 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   3464 	/*
   3465 	 * For PCH, this write will make sure that any noise will be detected
   3466 	 * as a CRC error and be dropped rather than show up as a bad packet
   3467 	 * to the DMA engine
   3468 	 */
   3469 	if (sc->sc_type == WM_T_PCH)
   3470 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   3471 
   3472 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3473 		CSR_WRITE(sc, WMREG_WUC, 0);
   3474 
   3475 	/* XXX need special handling for 82580 */
   3476 }
   3477 
   3478 /*
   3479  * wm_add_rxbuf:
   3480  *
   3481  *	Add a receive buffer to the indiciated descriptor.
   3482  */
   3483 static int
   3484 wm_add_rxbuf(struct wm_softc *sc, int idx)
   3485 {
   3486 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   3487 	struct mbuf *m;
   3488 	int error;
   3489 
   3490 	KASSERT(WM_RX_LOCKED(sc));
   3491 
   3492 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3493 	if (m == NULL)
   3494 		return ENOBUFS;
   3495 
   3496 	MCLGET(m, M_DONTWAIT);
   3497 	if ((m->m_flags & M_EXT) == 0) {
   3498 		m_freem(m);
   3499 		return ENOBUFS;
   3500 	}
   3501 
   3502 	if (rxs->rxs_mbuf != NULL)
   3503 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3504 
   3505 	rxs->rxs_mbuf = m;
   3506 
   3507 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3508 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3509 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3510 	if (error) {
   3511 		/* XXX XXX XXX */
   3512 		aprint_error_dev(sc->sc_dev,
   3513 		    "unable to load rx DMA map %d, error = %d\n",
   3514 		    idx, error);
   3515 		panic("wm_add_rxbuf");
   3516 	}
   3517 
   3518 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3519 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3520 
   3521 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3522 		if ((sc->sc_rctl & RCTL_EN) != 0)
   3523 			WM_INIT_RXDESC(sc, idx);
   3524 	} else
   3525 		WM_INIT_RXDESC(sc, idx);
   3526 
   3527 	return 0;
   3528 }
   3529 
   3530 /*
   3531  * wm_rxdrain:
   3532  *
   3533  *	Drain the receive queue.
   3534  */
   3535 static void
   3536 wm_rxdrain(struct wm_softc *sc)
   3537 {
   3538 	struct wm_rxsoft *rxs;
   3539 	int i;
   3540 
   3541 	KASSERT(WM_RX_LOCKED(sc));
   3542 
   3543 	for (i = 0; i < WM_NRXDESC; i++) {
   3544 		rxs = &sc->sc_rxsoft[i];
   3545 		if (rxs->rxs_mbuf != NULL) {
   3546 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3547 			m_freem(rxs->rxs_mbuf);
   3548 			rxs->rxs_mbuf = NULL;
   3549 		}
   3550 	}
   3551 }
   3552 
   3553 /*
   3554  * wm_init:		[ifnet interface function]
   3555  *
   3556  *	Initialize the interface.
   3557  */
   3558 static int
   3559 wm_init(struct ifnet *ifp)
   3560 {
   3561 	struct wm_softc *sc = ifp->if_softc;
   3562 	int ret;
   3563 
   3564 	WM_BOTH_LOCK(sc);
   3565 	ret = wm_init_locked(ifp);
   3566 	WM_BOTH_UNLOCK(sc);
   3567 
   3568 	return ret;
   3569 }
   3570 
   3571 static int
   3572 wm_init_locked(struct ifnet *ifp)
   3573 {
   3574 	struct wm_softc *sc = ifp->if_softc;
   3575 	struct wm_rxsoft *rxs;
   3576 	int i, j, trynum, error = 0;
   3577 	uint32_t reg;
   3578 
   3579 	KASSERT(WM_BOTH_LOCKED(sc));
   3580 	/*
   3581 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   3582 	 * There is a small but measurable benefit to avoiding the adjusment
   3583 	 * of the descriptor so that the headers are aligned, for normal mtu,
   3584 	 * on such platforms.  One possibility is that the DMA itself is
   3585 	 * slightly more efficient if the front of the entire packet (instead
   3586 	 * of the front of the headers) is aligned.
   3587 	 *
   3588 	 * Note we must always set align_tweak to 0 if we are using
   3589 	 * jumbo frames.
   3590 	 */
   3591 #ifdef __NO_STRICT_ALIGNMENT
   3592 	sc->sc_align_tweak = 0;
   3593 #else
   3594 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   3595 		sc->sc_align_tweak = 0;
   3596 	else
   3597 		sc->sc_align_tweak = 2;
   3598 #endif /* __NO_STRICT_ALIGNMENT */
   3599 
   3600 	/* Cancel any pending I/O. */
   3601 	wm_stop_locked(ifp, 0);
   3602 
   3603 	/* update statistics before reset */
   3604 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3605 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   3606 
   3607 	/* Reset the chip to a known state. */
   3608 	wm_reset(sc);
   3609 
   3610 	switch (sc->sc_type) {
   3611 	case WM_T_82571:
   3612 	case WM_T_82572:
   3613 	case WM_T_82573:
   3614 	case WM_T_82574:
   3615 	case WM_T_82583:
   3616 	case WM_T_80003:
   3617 	case WM_T_ICH8:
   3618 	case WM_T_ICH9:
   3619 	case WM_T_ICH10:
   3620 	case WM_T_PCH:
   3621 	case WM_T_PCH2:
   3622 	case WM_T_PCH_LPT:
   3623 		if (wm_check_mng_mode(sc) != 0)
   3624 			wm_get_hw_control(sc);
   3625 		break;
   3626 	default:
   3627 		break;
   3628 	}
   3629 
   3630 	/* Reset the PHY. */
   3631 	if (sc->sc_flags & WM_F_HAS_MII)
   3632 		wm_gmii_reset(sc);
   3633 
   3634 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3635 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
   3636 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   3637 	    || (sc->sc_type == WM_T_PCH_LPT))
   3638 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
   3639 
   3640 	/* Initialize the transmit descriptor ring. */
   3641 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
   3642 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
   3643 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   3644 	sc->sc_txfree = WM_NTXDESC(sc);
   3645 	sc->sc_txnext = 0;
   3646 
   3647 	if (sc->sc_type < WM_T_82543) {
   3648 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
   3649 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
   3650 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
   3651 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   3652 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   3653 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   3654 	} else {
   3655 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
   3656 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
   3657 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
   3658 		CSR_WRITE(sc, WMREG_TDH, 0);
   3659 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
   3660 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
   3661 
   3662 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3663 			/*
   3664 			 * Don't write TDT before TCTL.EN is set.
   3665 			 * See the document.
   3666 			 */
   3667 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
   3668 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   3669 			    | TXDCTL_WTHRESH(0));
   3670 		else {
   3671 			CSR_WRITE(sc, WMREG_TDT, 0);
   3672 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   3673 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   3674 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   3675 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   3676 		}
   3677 	}
   3678 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   3679 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   3680 
   3681 	/* Initialize the transmit job descriptors. */
   3682 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
   3683 		sc->sc_txsoft[i].txs_mbuf = NULL;
   3684 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
   3685 	sc->sc_txsnext = 0;
   3686 	sc->sc_txsdirty = 0;
   3687 
   3688 	/*
   3689 	 * Initialize the receive descriptor and receive job
   3690 	 * descriptor rings.
   3691 	 */
   3692 	if (sc->sc_type < WM_T_82543) {
   3693 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
   3694 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
   3695 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   3696 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   3697 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   3698 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   3699 
   3700 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   3701 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   3702 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   3703 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   3704 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   3705 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   3706 	} else {
   3707 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
   3708 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
   3709 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   3710 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3711 			CSR_WRITE(sc, WMREG_EITR(0), 450);
   3712 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   3713 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   3714 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
   3715 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   3716 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
   3717 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   3718 			    | RXDCTL_WTHRESH(1));
   3719 		} else {
   3720 			CSR_WRITE(sc, WMREG_RDH, 0);
   3721 			CSR_WRITE(sc, WMREG_RDT, 0);
   3722 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
   3723 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
   3724 		}
   3725 	}
   3726 	for (i = 0; i < WM_NRXDESC; i++) {
   3727 		rxs = &sc->sc_rxsoft[i];
   3728 		if (rxs->rxs_mbuf == NULL) {
   3729 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   3730 				log(LOG_ERR, "%s: unable to allocate or map "
   3731 				    "rx buffer %d, error = %d\n",
   3732 				    device_xname(sc->sc_dev), i, error);
   3733 				/*
   3734 				 * XXX Should attempt to run with fewer receive
   3735 				 * XXX buffers instead of just failing.
   3736 				 */
   3737 				wm_rxdrain(sc);
   3738 				goto out;
   3739 			}
   3740 		} else {
   3741 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3742 				WM_INIT_RXDESC(sc, i);
   3743 			/*
   3744 			 * For 82575 and newer device, the RX descriptors
   3745 			 * must be initialized after the setting of RCTL.EN in
   3746 			 * wm_set_filter()
   3747 			 */
   3748 		}
   3749 	}
   3750 	sc->sc_rxptr = 0;
   3751 	sc->sc_rxdiscard = 0;
   3752 	WM_RXCHAIN_RESET(sc);
   3753 
   3754 	/*
   3755 	 * Clear out the VLAN table -- we don't use it (yet).
   3756 	 */
   3757 	CSR_WRITE(sc, WMREG_VET, 0);
   3758 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3759 		trynum = 10; /* Due to hw errata */
   3760 	else
   3761 		trynum = 1;
   3762 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   3763 		for (j = 0; j < trynum; j++)
   3764 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   3765 
   3766 	/*
   3767 	 * Set up flow-control parameters.
   3768 	 *
   3769 	 * XXX Values could probably stand some tuning.
   3770 	 */
   3771 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   3772 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   3773 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   3774 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   3775 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   3776 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   3777 	}
   3778 
   3779 	sc->sc_fcrtl = FCRTL_DFLT;
   3780 	if (sc->sc_type < WM_T_82543) {
   3781 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   3782 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   3783 	} else {
   3784 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   3785 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   3786 	}
   3787 
   3788 	if (sc->sc_type == WM_T_80003)
   3789 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   3790 	else
   3791 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   3792 
   3793 	/* Writes the control register. */
   3794 	wm_set_vlan(sc);
   3795 
   3796 	if (sc->sc_flags & WM_F_HAS_MII) {
   3797 		int val;
   3798 
   3799 		switch (sc->sc_type) {
   3800 		case WM_T_80003:
   3801 		case WM_T_ICH8:
   3802 		case WM_T_ICH9:
   3803 		case WM_T_ICH10:
   3804 		case WM_T_PCH:
   3805 		case WM_T_PCH2:
   3806 		case WM_T_PCH_LPT:
   3807 			/*
   3808 			 * Set the mac to wait the maximum time between each
   3809 			 * iteration and increase the max iterations when
   3810 			 * polling the phy; this fixes erroneous timeouts at
   3811 			 * 10Mbps.
   3812 			 */
   3813 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   3814 			    0xFFFF);
   3815 			val = wm_kmrn_readreg(sc,
   3816 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   3817 			val |= 0x3F;
   3818 			wm_kmrn_writereg(sc,
   3819 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   3820 			break;
   3821 		default:
   3822 			break;
   3823 		}
   3824 
   3825 		if (sc->sc_type == WM_T_80003) {
   3826 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   3827 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   3828 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   3829 
   3830 			/* Bypass RX and TX FIFO's */
   3831 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   3832 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   3833 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   3834 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   3835 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   3836 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   3837 		}
   3838 	}
   3839 #if 0
   3840 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   3841 #endif
   3842 
   3843 	/* Set up checksum offload parameters. */
   3844 	reg = CSR_READ(sc, WMREG_RXCSUM);
   3845 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   3846 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   3847 		reg |= RXCSUM_IPOFL;
   3848 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   3849 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   3850 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   3851 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   3852 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   3853 
   3854 	/* Set up the interrupt registers. */
   3855 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3856 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   3857 	    ICR_RXO | ICR_RXT0;
   3858 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   3859 
   3860 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3861 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3862 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   3863 		reg = CSR_READ(sc, WMREG_KABGTXD);
   3864 		reg |= KABGTXD_BGSQLBIAS;
   3865 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   3866 	}
   3867 
   3868 	/* Set up the inter-packet gap. */
   3869 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   3870 
   3871 	if (sc->sc_type >= WM_T_82543) {
   3872 		/*
   3873 		 * Set up the interrupt throttling register (units of 256ns)
   3874 		 * Note that a footnote in Intel's documentation says this
   3875 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   3876 		 * or 10Mbit mode.  Empirically, it appears to be the case
   3877 		 * that that is also true for the 1024ns units of the other
   3878 		 * interrupt-related timer registers -- so, really, we ought
   3879 		 * to divide this value by 4 when the link speed is low.
   3880 		 *
   3881 		 * XXX implement this division at link speed change!
   3882 		 */
   3883 
   3884 		/*
   3885 		 * For N interrupts/sec, set this value to:
   3886 		 * 1000000000 / (N * 256).  Note that we set the
   3887 		 * absolute and packet timer values to this value
   3888 		 * divided by 4 to get "simple timer" behavior.
   3889 		 */
   3890 
   3891 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   3892 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   3893 	}
   3894 
   3895 	/* Set the VLAN ethernetype. */
   3896 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   3897 
   3898 	/*
   3899 	 * Set up the transmit control register; we start out with
   3900 	 * a collision distance suitable for FDX, but update it whe
   3901 	 * we resolve the media type.
   3902 	 */
   3903 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   3904 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   3905 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3906 	if (sc->sc_type >= WM_T_82571)
   3907 		sc->sc_tctl |= TCTL_MULR;
   3908 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3909 
   3910 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3911 		/* Write TDT after TCTL.EN is set. See the document. */
   3912 		CSR_WRITE(sc, WMREG_TDT, 0);
   3913 	}
   3914 
   3915 	if (sc->sc_type == WM_T_80003) {
   3916 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   3917 		reg &= ~TCTL_EXT_GCEX_MASK;
   3918 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   3919 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   3920 	}
   3921 
   3922 	/* Set the media. */
   3923 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   3924 		goto out;
   3925 
   3926 	/* Configure for OS presence */
   3927 	wm_init_manageability(sc);
   3928 
   3929 	/*
   3930 	 * Set up the receive control register; we actually program
   3931 	 * the register when we set the receive filter.  Use multicast
   3932 	 * address offset type 0.
   3933 	 *
   3934 	 * Only the i82544 has the ability to strip the incoming
   3935 	 * CRC, so we don't enable that feature.
   3936 	 */
   3937 	sc->sc_mchash_type = 0;
   3938 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   3939 	    | RCTL_MO(sc->sc_mchash_type);
   3940 
   3941 	/*
   3942 	 * The I350 has a bug where it always strips the CRC whether
   3943 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3944 	 */
   3945 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3946 	    || (sc->sc_type == WM_T_I210))
   3947 		sc->sc_rctl |= RCTL_SECRC;
   3948 
   3949 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   3950 	    && (ifp->if_mtu > ETHERMTU)) {
   3951 		sc->sc_rctl |= RCTL_LPE;
   3952 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3953 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   3954 	}
   3955 
   3956 	if (MCLBYTES == 2048) {
   3957 		sc->sc_rctl |= RCTL_2k;
   3958 	} else {
   3959 		if (sc->sc_type >= WM_T_82543) {
   3960 			switch (MCLBYTES) {
   3961 			case 4096:
   3962 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   3963 				break;
   3964 			case 8192:
   3965 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   3966 				break;
   3967 			case 16384:
   3968 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   3969 				break;
   3970 			default:
   3971 				panic("wm_init: MCLBYTES %d unsupported",
   3972 				    MCLBYTES);
   3973 				break;
   3974 			}
   3975 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   3976 	}
   3977 
   3978 	/* Set the receive filter. */
   3979 	wm_set_filter(sc);
   3980 
   3981 	/* Enable ECC */
   3982 	switch (sc->sc_type) {
   3983 	case WM_T_82571:
   3984 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   3985 		reg |= PBA_ECC_CORR_EN;
   3986 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   3987 		break;
   3988 	case WM_T_PCH_LPT:
   3989 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   3990 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   3991 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   3992 
   3993 		reg = CSR_READ(sc, WMREG_CTRL);
   3994 		reg |= CTRL_MEHE;
   3995 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3996 		break;
   3997 	default:
   3998 		break;
   3999 	}
   4000 
   4001 	/* On 575 and later set RDT only if RX enabled */
   4002 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4003 		for (i = 0; i < WM_NRXDESC; i++)
   4004 			WM_INIT_RXDESC(sc, i);
   4005 
   4006 	sc->sc_stopping = false;
   4007 
   4008 	/* Start the one second link check clock. */
   4009 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4010 
   4011 	/* ...all done! */
   4012 	ifp->if_flags |= IFF_RUNNING;
   4013 	ifp->if_flags &= ~IFF_OACTIVE;
   4014 
   4015  out:
   4016 	sc->sc_if_flags = ifp->if_flags;
   4017 	if (error)
   4018 		log(LOG_ERR, "%s: interface not running\n",
   4019 		    device_xname(sc->sc_dev));
   4020 	return error;
   4021 }
   4022 
   4023 /*
   4024  * wm_stop:		[ifnet interface function]
   4025  *
   4026  *	Stop transmission on the interface.
   4027  */
   4028 static void
   4029 wm_stop(struct ifnet *ifp, int disable)
   4030 {
   4031 	struct wm_softc *sc = ifp->if_softc;
   4032 
   4033 	WM_BOTH_LOCK(sc);
   4034 	wm_stop_locked(ifp, disable);
   4035 	WM_BOTH_UNLOCK(sc);
   4036 }
   4037 
   4038 static void
   4039 wm_stop_locked(struct ifnet *ifp, int disable)
   4040 {
   4041 	struct wm_softc *sc = ifp->if_softc;
   4042 	struct wm_txsoft *txs;
   4043 	int i;
   4044 
   4045 	KASSERT(WM_BOTH_LOCKED(sc));
   4046 
   4047 	sc->sc_stopping = true;
   4048 
   4049 	/* Stop the one second clock. */
   4050 	callout_stop(&sc->sc_tick_ch);
   4051 
   4052 	/* Stop the 82547 Tx FIFO stall check timer. */
   4053 	if (sc->sc_type == WM_T_82547)
   4054 		callout_stop(&sc->sc_txfifo_ch);
   4055 
   4056 	if (sc->sc_flags & WM_F_HAS_MII) {
   4057 		/* Down the MII. */
   4058 		mii_down(&sc->sc_mii);
   4059 	} else {
   4060 #if 0
   4061 		/* Should we clear PHY's status properly? */
   4062 		wm_reset(sc);
   4063 #endif
   4064 	}
   4065 
   4066 	/* Stop the transmit and receive processes. */
   4067 	CSR_WRITE(sc, WMREG_TCTL, 0);
   4068 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4069 	sc->sc_rctl &= ~RCTL_EN;
   4070 
   4071 	/*
   4072 	 * Clear the interrupt mask to ensure the device cannot assert its
   4073 	 * interrupt line.
   4074 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
   4075 	 * any currently pending or shared interrupt.
   4076 	 */
   4077 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4078 	sc->sc_icr = 0;
   4079 
   4080 	/* Release any queued transmit buffers. */
   4081 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   4082 		txs = &sc->sc_txsoft[i];
   4083 		if (txs->txs_mbuf != NULL) {
   4084 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   4085 			m_freem(txs->txs_mbuf);
   4086 			txs->txs_mbuf = NULL;
   4087 		}
   4088 	}
   4089 
   4090 	/* Mark the interface as down and cancel the watchdog timer. */
   4091 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   4092 	ifp->if_timer = 0;
   4093 
   4094 	if (disable)
   4095 		wm_rxdrain(sc);
   4096 
   4097 #if 0 /* notyet */
   4098 	if (sc->sc_type >= WM_T_82544)
   4099 		CSR_WRITE(sc, WMREG_WUC, 0);
   4100 #endif
   4101 }
   4102 
   4103 /*
   4104  * wm_tx_offload:
   4105  *
   4106  *	Set up TCP/IP checksumming parameters for the
   4107  *	specified packet.
   4108  */
   4109 static int
   4110 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   4111     uint8_t *fieldsp)
   4112 {
   4113 	struct mbuf *m0 = txs->txs_mbuf;
   4114 	struct livengood_tcpip_ctxdesc *t;
   4115 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   4116 	uint32_t ipcse;
   4117 	struct ether_header *eh;
   4118 	int offset, iphl;
   4119 	uint8_t fields;
   4120 
   4121 	/*
   4122 	 * XXX It would be nice if the mbuf pkthdr had offset
   4123 	 * fields for the protocol headers.
   4124 	 */
   4125 
   4126 	eh = mtod(m0, struct ether_header *);
   4127 	switch (htons(eh->ether_type)) {
   4128 	case ETHERTYPE_IP:
   4129 	case ETHERTYPE_IPV6:
   4130 		offset = ETHER_HDR_LEN;
   4131 		break;
   4132 
   4133 	case ETHERTYPE_VLAN:
   4134 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   4135 		break;
   4136 
   4137 	default:
   4138 		/*
   4139 		 * Don't support this protocol or encapsulation.
   4140 		 */
   4141 		*fieldsp = 0;
   4142 		*cmdp = 0;
   4143 		return 0;
   4144 	}
   4145 
   4146 	if ((m0->m_pkthdr.csum_flags &
   4147 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   4148 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   4149 	} else {
   4150 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   4151 	}
   4152 	ipcse = offset + iphl - 1;
   4153 
   4154 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   4155 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   4156 	seg = 0;
   4157 	fields = 0;
   4158 
   4159 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   4160 		int hlen = offset + iphl;
   4161 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   4162 
   4163 		if (__predict_false(m0->m_len <
   4164 				    (hlen + sizeof(struct tcphdr)))) {
   4165 			/*
   4166 			 * TCP/IP headers are not in the first mbuf; we need
   4167 			 * to do this the slow and painful way.  Let's just
   4168 			 * hope this doesn't happen very often.
   4169 			 */
   4170 			struct tcphdr th;
   4171 
   4172 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   4173 
   4174 			m_copydata(m0, hlen, sizeof(th), &th);
   4175 			if (v4) {
   4176 				struct ip ip;
   4177 
   4178 				m_copydata(m0, offset, sizeof(ip), &ip);
   4179 				ip.ip_len = 0;
   4180 				m_copyback(m0,
   4181 				    offset + offsetof(struct ip, ip_len),
   4182 				    sizeof(ip.ip_len), &ip.ip_len);
   4183 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   4184 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   4185 			} else {
   4186 				struct ip6_hdr ip6;
   4187 
   4188 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   4189 				ip6.ip6_plen = 0;
   4190 				m_copyback(m0,
   4191 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   4192 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   4193 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   4194 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   4195 			}
   4196 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   4197 			    sizeof(th.th_sum), &th.th_sum);
   4198 
   4199 			hlen += th.th_off << 2;
   4200 		} else {
   4201 			/*
   4202 			 * TCP/IP headers are in the first mbuf; we can do
   4203 			 * this the easy way.
   4204 			 */
   4205 			struct tcphdr *th;
   4206 
   4207 			if (v4) {
   4208 				struct ip *ip =
   4209 				    (void *)(mtod(m0, char *) + offset);
   4210 				th = (void *)(mtod(m0, char *) + hlen);
   4211 
   4212 				ip->ip_len = 0;
   4213 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   4214 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   4215 			} else {
   4216 				struct ip6_hdr *ip6 =
   4217 				    (void *)(mtod(m0, char *) + offset);
   4218 				th = (void *)(mtod(m0, char *) + hlen);
   4219 
   4220 				ip6->ip6_plen = 0;
   4221 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   4222 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   4223 			}
   4224 			hlen += th->th_off << 2;
   4225 		}
   4226 
   4227 		if (v4) {
   4228 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   4229 			cmdlen |= WTX_TCPIP_CMD_IP;
   4230 		} else {
   4231 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   4232 			ipcse = 0;
   4233 		}
   4234 		cmd |= WTX_TCPIP_CMD_TSE;
   4235 		cmdlen |= WTX_TCPIP_CMD_TSE |
   4236 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   4237 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   4238 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   4239 	}
   4240 
   4241 	/*
   4242 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   4243 	 * offload feature, if we load the context descriptor, we
   4244 	 * MUST provide valid values for IPCSS and TUCSS fields.
   4245 	 */
   4246 
   4247 	ipcs = WTX_TCPIP_IPCSS(offset) |
   4248 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   4249 	    WTX_TCPIP_IPCSE(ipcse);
   4250 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   4251 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   4252 		fields |= WTX_IXSM;
   4253 	}
   4254 
   4255 	offset += iphl;
   4256 
   4257 	if (m0->m_pkthdr.csum_flags &
   4258 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   4259 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   4260 		fields |= WTX_TXSM;
   4261 		tucs = WTX_TCPIP_TUCSS(offset) |
   4262 		    WTX_TCPIP_TUCSO(offset +
   4263 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   4264 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4265 	} else if ((m0->m_pkthdr.csum_flags &
   4266 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   4267 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   4268 		fields |= WTX_TXSM;
   4269 		tucs = WTX_TCPIP_TUCSS(offset) |
   4270 		    WTX_TCPIP_TUCSO(offset +
   4271 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   4272 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4273 	} else {
   4274 		/* Just initialize it to a valid TCP context. */
   4275 		tucs = WTX_TCPIP_TUCSS(offset) |
   4276 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   4277 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4278 	}
   4279 
   4280 	/* Fill in the context descriptor. */
   4281 	t = (struct livengood_tcpip_ctxdesc *)
   4282 	    &sc->sc_txdescs[sc->sc_txnext];
   4283 	t->tcpip_ipcs = htole32(ipcs);
   4284 	t->tcpip_tucs = htole32(tucs);
   4285 	t->tcpip_cmdlen = htole32(cmdlen);
   4286 	t->tcpip_seg = htole32(seg);
   4287 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   4288 
   4289 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   4290 	txs->txs_ndesc++;
   4291 
   4292 	*cmdp = cmd;
   4293 	*fieldsp = fields;
   4294 
   4295 	return 0;
   4296 }
   4297 
   4298 static void
   4299 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   4300 {
   4301 	struct mbuf *m;
   4302 	int i;
   4303 
   4304 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   4305 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   4306 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   4307 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   4308 		    m->m_data, m->m_len, m->m_flags);
   4309 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   4310 	    i, i == 1 ? "" : "s");
   4311 }
   4312 
   4313 /*
   4314  * wm_82547_txfifo_stall:
   4315  *
   4316  *	Callout used to wait for the 82547 Tx FIFO to drain,
   4317  *	reset the FIFO pointers, and restart packet transmission.
   4318  */
   4319 static void
   4320 wm_82547_txfifo_stall(void *arg)
   4321 {
   4322 	struct wm_softc *sc = arg;
   4323 #ifndef WM_MPSAFE
   4324 	int s;
   4325 
   4326 	s = splnet();
   4327 #endif
   4328 	WM_TX_LOCK(sc);
   4329 
   4330 	if (sc->sc_stopping)
   4331 		goto out;
   4332 
   4333 	if (sc->sc_txfifo_stall) {
   4334 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
   4335 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   4336 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   4337 			/*
   4338 			 * Packets have drained.  Stop transmitter, reset
   4339 			 * FIFO pointers, restart transmitter, and kick
   4340 			 * the packet queue.
   4341 			 */
   4342 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   4343 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   4344 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
   4345 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
   4346 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
   4347 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
   4348 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   4349 			CSR_WRITE_FLUSH(sc);
   4350 
   4351 			sc->sc_txfifo_head = 0;
   4352 			sc->sc_txfifo_stall = 0;
   4353 			wm_start_locked(&sc->sc_ethercom.ec_if);
   4354 		} else {
   4355 			/*
   4356 			 * Still waiting for packets to drain; try again in
   4357 			 * another tick.
   4358 			 */
   4359 			callout_schedule(&sc->sc_txfifo_ch, 1);
   4360 		}
   4361 	}
   4362 
   4363 out:
   4364 	WM_TX_UNLOCK(sc);
   4365 #ifndef WM_MPSAFE
   4366 	splx(s);
   4367 #endif
   4368 }
   4369 
   4370 /*
   4371  * wm_82547_txfifo_bugchk:
   4372  *
   4373  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   4374  *	prevent enqueueing a packet that would wrap around the end
   4375  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   4376  *
   4377  *	We do this by checking the amount of space before the end
   4378  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   4379  *	the Tx FIFO, wait for all remaining packets to drain, reset
   4380  *	the internal FIFO pointers to the beginning, and restart
   4381  *	transmission on the interface.
   4382  */
   4383 #define	WM_FIFO_HDR		0x10
   4384 #define	WM_82547_PAD_LEN	0x3e0
   4385 static int
   4386 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   4387 {
   4388 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
   4389 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   4390 
   4391 	/* Just return if already stalled. */
   4392 	if (sc->sc_txfifo_stall)
   4393 		return 1;
   4394 
   4395 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   4396 		/* Stall only occurs in half-duplex mode. */
   4397 		goto send_packet;
   4398 	}
   4399 
   4400 	if (len >= WM_82547_PAD_LEN + space) {
   4401 		sc->sc_txfifo_stall = 1;
   4402 		callout_schedule(&sc->sc_txfifo_ch, 1);
   4403 		return 1;
   4404 	}
   4405 
   4406  send_packet:
   4407 	sc->sc_txfifo_head += len;
   4408 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
   4409 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
   4410 
   4411 	return 0;
   4412 }
   4413 
   4414 /*
   4415  * wm_start:		[ifnet interface function]
   4416  *
   4417  *	Start packet transmission on the interface.
   4418  */
   4419 static void
   4420 wm_start(struct ifnet *ifp)
   4421 {
   4422 	struct wm_softc *sc = ifp->if_softc;
   4423 
   4424 	WM_TX_LOCK(sc);
   4425 	if (!sc->sc_stopping)
   4426 		wm_start_locked(ifp);
   4427 	WM_TX_UNLOCK(sc);
   4428 }
   4429 
   4430 static void
   4431 wm_start_locked(struct ifnet *ifp)
   4432 {
   4433 	struct wm_softc *sc = ifp->if_softc;
   4434 	struct mbuf *m0;
   4435 	struct m_tag *mtag;
   4436 	struct wm_txsoft *txs;
   4437 	bus_dmamap_t dmamap;
   4438 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   4439 	bus_addr_t curaddr;
   4440 	bus_size_t seglen, curlen;
   4441 	uint32_t cksumcmd;
   4442 	uint8_t cksumfields;
   4443 
   4444 	KASSERT(WM_TX_LOCKED(sc));
   4445 
   4446 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   4447 		return;
   4448 
   4449 	/* Remember the previous number of free descriptors. */
   4450 	ofree = sc->sc_txfree;
   4451 
   4452 	/*
   4453 	 * Loop through the send queue, setting up transmit descriptors
   4454 	 * until we drain the queue, or use up all available transmit
   4455 	 * descriptors.
   4456 	 */
   4457 	for (;;) {
   4458 		m0 = NULL;
   4459 
   4460 		/* Get a work queue entry. */
   4461 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   4462 			wm_txintr(sc);
   4463 			if (sc->sc_txsfree == 0) {
   4464 				DPRINTF(WM_DEBUG_TX,
   4465 				    ("%s: TX: no free job descriptors\n",
   4466 					device_xname(sc->sc_dev)));
   4467 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   4468 				break;
   4469 			}
   4470 		}
   4471 
   4472 		/* Grab a packet off the queue. */
   4473 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   4474 		if (m0 == NULL)
   4475 			break;
   4476 
   4477 		DPRINTF(WM_DEBUG_TX,
   4478 		    ("%s: TX: have packet to transmit: %p\n",
   4479 		    device_xname(sc->sc_dev), m0));
   4480 
   4481 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   4482 		dmamap = txs->txs_dmamap;
   4483 
   4484 		use_tso = (m0->m_pkthdr.csum_flags &
   4485 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   4486 
   4487 		/*
   4488 		 * So says the Linux driver:
   4489 		 * The controller does a simple calculation to make sure
   4490 		 * there is enough room in the FIFO before initiating the
   4491 		 * DMA for each buffer.  The calc is:
   4492 		 *	4 = ceil(buffer len / MSS)
   4493 		 * To make sure we don't overrun the FIFO, adjust the max
   4494 		 * buffer len if the MSS drops.
   4495 		 */
   4496 		dmamap->dm_maxsegsz =
   4497 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   4498 		    ? m0->m_pkthdr.segsz << 2
   4499 		    : WTX_MAX_LEN;
   4500 
   4501 		/*
   4502 		 * Load the DMA map.  If this fails, the packet either
   4503 		 * didn't fit in the allotted number of segments, or we
   4504 		 * were short on resources.  For the too-many-segments
   4505 		 * case, we simply report an error and drop the packet,
   4506 		 * since we can't sanely copy a jumbo packet to a single
   4507 		 * buffer.
   4508 		 */
   4509 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   4510 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   4511 		if (error) {
   4512 			if (error == EFBIG) {
   4513 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   4514 				log(LOG_ERR, "%s: Tx packet consumes too many "
   4515 				    "DMA segments, dropping...\n",
   4516 				    device_xname(sc->sc_dev));
   4517 				wm_dump_mbuf_chain(sc, m0);
   4518 				m_freem(m0);
   4519 				continue;
   4520 			}
   4521 			/*  Short on resources, just stop for now. */
   4522 			DPRINTF(WM_DEBUG_TX,
   4523 			    ("%s: TX: dmamap load failed: %d\n",
   4524 			    device_xname(sc->sc_dev), error));
   4525 			break;
   4526 		}
   4527 
   4528 		segs_needed = dmamap->dm_nsegs;
   4529 		if (use_tso) {
   4530 			/* For sentinel descriptor; see below. */
   4531 			segs_needed++;
   4532 		}
   4533 
   4534 		/*
   4535 		 * Ensure we have enough descriptors free to describe
   4536 		 * the packet.  Note, we always reserve one descriptor
   4537 		 * at the end of the ring due to the semantics of the
   4538 		 * TDT register, plus one more in the event we need
   4539 		 * to load offload context.
   4540 		 */
   4541 		if (segs_needed > sc->sc_txfree - 2) {
   4542 			/*
   4543 			 * Not enough free descriptors to transmit this
   4544 			 * packet.  We haven't committed anything yet,
   4545 			 * so just unload the DMA map, put the packet
   4546 			 * pack on the queue, and punt.  Notify the upper
   4547 			 * layer that there are no more slots left.
   4548 			 */
   4549 			DPRINTF(WM_DEBUG_TX,
   4550 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   4551 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   4552 			    segs_needed, sc->sc_txfree - 1));
   4553 			ifp->if_flags |= IFF_OACTIVE;
   4554 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   4555 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   4556 			break;
   4557 		}
   4558 
   4559 		/*
   4560 		 * Check for 82547 Tx FIFO bug.  We need to do this
   4561 		 * once we know we can transmit the packet, since we
   4562 		 * do some internal FIFO space accounting here.
   4563 		 */
   4564 		if (sc->sc_type == WM_T_82547 &&
   4565 		    wm_82547_txfifo_bugchk(sc, m0)) {
   4566 			DPRINTF(WM_DEBUG_TX,
   4567 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   4568 			    device_xname(sc->sc_dev)));
   4569 			ifp->if_flags |= IFF_OACTIVE;
   4570 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   4571 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   4572 			break;
   4573 		}
   4574 
   4575 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   4576 
   4577 		DPRINTF(WM_DEBUG_TX,
   4578 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   4579 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   4580 
   4581 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   4582 
   4583 		/*
   4584 		 * Store a pointer to the packet so that we can free it
   4585 		 * later.
   4586 		 *
   4587 		 * Initially, we consider the number of descriptors the
   4588 		 * packet uses the number of DMA segments.  This may be
   4589 		 * incremented by 1 if we do checksum offload (a descriptor
   4590 		 * is used to set the checksum context).
   4591 		 */
   4592 		txs->txs_mbuf = m0;
   4593 		txs->txs_firstdesc = sc->sc_txnext;
   4594 		txs->txs_ndesc = segs_needed;
   4595 
   4596 		/* Set up offload parameters for this packet. */
   4597 		if (m0->m_pkthdr.csum_flags &
   4598 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   4599 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   4600 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   4601 			if (wm_tx_offload(sc, txs, &cksumcmd,
   4602 					  &cksumfields) != 0) {
   4603 				/* Error message already displayed. */
   4604 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   4605 				continue;
   4606 			}
   4607 		} else {
   4608 			cksumcmd = 0;
   4609 			cksumfields = 0;
   4610 		}
   4611 
   4612 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   4613 
   4614 		/* Sync the DMA map. */
   4615 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   4616 		    BUS_DMASYNC_PREWRITE);
   4617 
   4618 		/* Initialize the transmit descriptor. */
   4619 		for (nexttx = sc->sc_txnext, seg = 0;
   4620 		     seg < dmamap->dm_nsegs; seg++) {
   4621 			for (seglen = dmamap->dm_segs[seg].ds_len,
   4622 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   4623 			     seglen != 0;
   4624 			     curaddr += curlen, seglen -= curlen,
   4625 			     nexttx = WM_NEXTTX(sc, nexttx)) {
   4626 				curlen = seglen;
   4627 
   4628 				/*
   4629 				 * So says the Linux driver:
   4630 				 * Work around for premature descriptor
   4631 				 * write-backs in TSO mode.  Append a
   4632 				 * 4-byte sentinel descriptor.
   4633 				 */
   4634 				if (use_tso &&
   4635 				    seg == dmamap->dm_nsegs - 1 &&
   4636 				    curlen > 8)
   4637 					curlen -= 4;
   4638 
   4639 				wm_set_dma_addr(
   4640 				    &sc->sc_txdescs[nexttx].wtx_addr,
   4641 				    curaddr);
   4642 				sc->sc_txdescs[nexttx].wtx_cmdlen =
   4643 				    htole32(cksumcmd | curlen);
   4644 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
   4645 				    0;
   4646 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
   4647 				    cksumfields;
   4648 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   4649 				lasttx = nexttx;
   4650 
   4651 				DPRINTF(WM_DEBUG_TX,
   4652 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   4653 				     "len %#04zx\n",
   4654 				    device_xname(sc->sc_dev), nexttx,
   4655 				    (uint64_t)curaddr, curlen));
   4656 			}
   4657 		}
   4658 
   4659 		KASSERT(lasttx != -1);
   4660 
   4661 		/*
   4662 		 * Set up the command byte on the last descriptor of
   4663 		 * the packet.  If we're in the interrupt delay window,
   4664 		 * delay the interrupt.
   4665 		 */
   4666 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   4667 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   4668 
   4669 		/*
   4670 		 * If VLANs are enabled and the packet has a VLAN tag, set
   4671 		 * up the descriptor to encapsulate the packet for us.
   4672 		 *
   4673 		 * This is only valid on the last descriptor of the packet.
   4674 		 */
   4675 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   4676 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   4677 			    htole32(WTX_CMD_VLE);
   4678 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
   4679 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   4680 		}
   4681 
   4682 		txs->txs_lastdesc = lasttx;
   4683 
   4684 		DPRINTF(WM_DEBUG_TX,
   4685 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   4686 		    device_xname(sc->sc_dev),
   4687 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   4688 
   4689 		/* Sync the descriptors we're using. */
   4690 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   4691 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   4692 
   4693 		/* Give the packet to the chip. */
   4694 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   4695 
   4696 		DPRINTF(WM_DEBUG_TX,
   4697 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   4698 
   4699 		DPRINTF(WM_DEBUG_TX,
   4700 		    ("%s: TX: finished transmitting packet, job %d\n",
   4701 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   4702 
   4703 		/* Advance the tx pointer. */
   4704 		sc->sc_txfree -= txs->txs_ndesc;
   4705 		sc->sc_txnext = nexttx;
   4706 
   4707 		sc->sc_txsfree--;
   4708 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   4709 
   4710 		/* Pass the packet to any BPF listeners. */
   4711 		bpf_mtap(ifp, m0);
   4712 	}
   4713 
   4714 	if (m0 != NULL) {
   4715 		ifp->if_flags |= IFF_OACTIVE;
   4716 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   4717 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   4718 		m_freem(m0);
   4719 	}
   4720 
   4721 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   4722 		/* No more slots; notify upper layer. */
   4723 		ifp->if_flags |= IFF_OACTIVE;
   4724 	}
   4725 
   4726 	if (sc->sc_txfree != ofree) {
   4727 		/* Set a watchdog timer in case the chip flakes out. */
   4728 		ifp->if_timer = 5;
   4729 	}
   4730 }
   4731 
   4732 /*
   4733  * wm_nq_tx_offload:
   4734  *
   4735  *	Set up TCP/IP checksumming parameters for the
   4736  *	specified packet, for NEWQUEUE devices
   4737  */
   4738 static int
   4739 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   4740     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   4741 {
   4742 	struct mbuf *m0 = txs->txs_mbuf;
   4743 	struct m_tag *mtag;
   4744 	uint32_t vl_len, mssidx, cmdc;
   4745 	struct ether_header *eh;
   4746 	int offset, iphl;
   4747 
   4748 	/*
   4749 	 * XXX It would be nice if the mbuf pkthdr had offset
   4750 	 * fields for the protocol headers.
   4751 	 */
   4752 	*cmdlenp = 0;
   4753 	*fieldsp = 0;
   4754 
   4755 	eh = mtod(m0, struct ether_header *);
   4756 	switch (htons(eh->ether_type)) {
   4757 	case ETHERTYPE_IP:
   4758 	case ETHERTYPE_IPV6:
   4759 		offset = ETHER_HDR_LEN;
   4760 		break;
   4761 
   4762 	case ETHERTYPE_VLAN:
   4763 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   4764 		break;
   4765 
   4766 	default:
   4767 		/* Don't support this protocol or encapsulation. */
   4768 		*do_csum = false;
   4769 		return 0;
   4770 	}
   4771 	*do_csum = true;
   4772 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   4773 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   4774 
   4775 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   4776 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   4777 
   4778 	if ((m0->m_pkthdr.csum_flags &
   4779 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   4780 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   4781 	} else {
   4782 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   4783 	}
   4784 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   4785 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   4786 
   4787 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   4788 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   4789 		     << NQTXC_VLLEN_VLAN_SHIFT);
   4790 		*cmdlenp |= NQTX_CMD_VLE;
   4791 	}
   4792 
   4793 	mssidx = 0;
   4794 
   4795 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   4796 		int hlen = offset + iphl;
   4797 		int tcp_hlen;
   4798 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   4799 
   4800 		if (__predict_false(m0->m_len <
   4801 				    (hlen + sizeof(struct tcphdr)))) {
   4802 			/*
   4803 			 * TCP/IP headers are not in the first mbuf; we need
   4804 			 * to do this the slow and painful way.  Let's just
   4805 			 * hope this doesn't happen very often.
   4806 			 */
   4807 			struct tcphdr th;
   4808 
   4809 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   4810 
   4811 			m_copydata(m0, hlen, sizeof(th), &th);
   4812 			if (v4) {
   4813 				struct ip ip;
   4814 
   4815 				m_copydata(m0, offset, sizeof(ip), &ip);
   4816 				ip.ip_len = 0;
   4817 				m_copyback(m0,
   4818 				    offset + offsetof(struct ip, ip_len),
   4819 				    sizeof(ip.ip_len), &ip.ip_len);
   4820 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   4821 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   4822 			} else {
   4823 				struct ip6_hdr ip6;
   4824 
   4825 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   4826 				ip6.ip6_plen = 0;
   4827 				m_copyback(m0,
   4828 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   4829 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   4830 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   4831 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   4832 			}
   4833 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   4834 			    sizeof(th.th_sum), &th.th_sum);
   4835 
   4836 			tcp_hlen = th.th_off << 2;
   4837 		} else {
   4838 			/*
   4839 			 * TCP/IP headers are in the first mbuf; we can do
   4840 			 * this the easy way.
   4841 			 */
   4842 			struct tcphdr *th;
   4843 
   4844 			if (v4) {
   4845 				struct ip *ip =
   4846 				    (void *)(mtod(m0, char *) + offset);
   4847 				th = (void *)(mtod(m0, char *) + hlen);
   4848 
   4849 				ip->ip_len = 0;
   4850 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   4851 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   4852 			} else {
   4853 				struct ip6_hdr *ip6 =
   4854 				    (void *)(mtod(m0, char *) + offset);
   4855 				th = (void *)(mtod(m0, char *) + hlen);
   4856 
   4857 				ip6->ip6_plen = 0;
   4858 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   4859 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   4860 			}
   4861 			tcp_hlen = th->th_off << 2;
   4862 		}
   4863 		hlen += tcp_hlen;
   4864 		*cmdlenp |= NQTX_CMD_TSE;
   4865 
   4866 		if (v4) {
   4867 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   4868 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   4869 		} else {
   4870 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   4871 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   4872 		}
   4873 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   4874 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   4875 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   4876 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   4877 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   4878 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   4879 	} else {
   4880 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   4881 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   4882 	}
   4883 
   4884 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   4885 		*fieldsp |= NQTXD_FIELDS_IXSM;
   4886 		cmdc |= NQTXC_CMD_IP4;
   4887 	}
   4888 
   4889 	if (m0->m_pkthdr.csum_flags &
   4890 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   4891 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   4892 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   4893 			cmdc |= NQTXC_CMD_TCP;
   4894 		} else {
   4895 			cmdc |= NQTXC_CMD_UDP;
   4896 		}
   4897 		cmdc |= NQTXC_CMD_IP4;
   4898 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   4899 	}
   4900 	if (m0->m_pkthdr.csum_flags &
   4901 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   4902 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   4903 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   4904 			cmdc |= NQTXC_CMD_TCP;
   4905 		} else {
   4906 			cmdc |= NQTXC_CMD_UDP;
   4907 		}
   4908 		cmdc |= NQTXC_CMD_IP6;
   4909 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   4910 	}
   4911 
   4912 	/* Fill in the context descriptor. */
   4913 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
   4914 	    htole32(vl_len);
   4915 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
   4916 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
   4917 	    htole32(cmdc);
   4918 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
   4919 	    htole32(mssidx);
   4920 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   4921 	DPRINTF(WM_DEBUG_TX,
   4922 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   4923 	    sc->sc_txnext, 0, vl_len));
   4924 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   4925 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   4926 	txs->txs_ndesc++;
   4927 	return 0;
   4928 }
   4929 
   4930 /*
   4931  * wm_nq_start:		[ifnet interface function]
   4932  *
   4933  *	Start packet transmission on the interface for NEWQUEUE devices
   4934  */
   4935 static void
   4936 wm_nq_start(struct ifnet *ifp)
   4937 {
   4938 	struct wm_softc *sc = ifp->if_softc;
   4939 
   4940 	WM_TX_LOCK(sc);
   4941 	if (!sc->sc_stopping)
   4942 		wm_nq_start_locked(ifp);
   4943 	WM_TX_UNLOCK(sc);
   4944 }
   4945 
   4946 static void
   4947 wm_nq_start_locked(struct ifnet *ifp)
   4948 {
   4949 	struct wm_softc *sc = ifp->if_softc;
   4950 	struct mbuf *m0;
   4951 	struct m_tag *mtag;
   4952 	struct wm_txsoft *txs;
   4953 	bus_dmamap_t dmamap;
   4954 	int error, nexttx, lasttx = -1, seg, segs_needed;
   4955 	bool do_csum, sent;
   4956 
   4957 	KASSERT(WM_TX_LOCKED(sc));
   4958 
   4959 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   4960 		return;
   4961 
   4962 	sent = false;
   4963 
   4964 	/*
   4965 	 * Loop through the send queue, setting up transmit descriptors
   4966 	 * until we drain the queue, or use up all available transmit
   4967 	 * descriptors.
   4968 	 */
   4969 	for (;;) {
   4970 		m0 = NULL;
   4971 
   4972 		/* Get a work queue entry. */
   4973 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   4974 			wm_txintr(sc);
   4975 			if (sc->sc_txsfree == 0) {
   4976 				DPRINTF(WM_DEBUG_TX,
   4977 				    ("%s: TX: no free job descriptors\n",
   4978 					device_xname(sc->sc_dev)));
   4979 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   4980 				break;
   4981 			}
   4982 		}
   4983 
   4984 		/* Grab a packet off the queue. */
   4985 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   4986 		if (m0 == NULL)
   4987 			break;
   4988 
   4989 		DPRINTF(WM_DEBUG_TX,
   4990 		    ("%s: TX: have packet to transmit: %p\n",
   4991 		    device_xname(sc->sc_dev), m0));
   4992 
   4993 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   4994 		dmamap = txs->txs_dmamap;
   4995 
   4996 		/*
   4997 		 * Load the DMA map.  If this fails, the packet either
   4998 		 * didn't fit in the allotted number of segments, or we
   4999 		 * were short on resources.  For the too-many-segments
   5000 		 * case, we simply report an error and drop the packet,
   5001 		 * since we can't sanely copy a jumbo packet to a single
   5002 		 * buffer.
   5003 		 */
   5004 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   5005 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   5006 		if (error) {
   5007 			if (error == EFBIG) {
   5008 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5009 				log(LOG_ERR, "%s: Tx packet consumes too many "
   5010 				    "DMA segments, dropping...\n",
   5011 				    device_xname(sc->sc_dev));
   5012 				wm_dump_mbuf_chain(sc, m0);
   5013 				m_freem(m0);
   5014 				continue;
   5015 			}
   5016 			/* Short on resources, just stop for now. */
   5017 			DPRINTF(WM_DEBUG_TX,
   5018 			    ("%s: TX: dmamap load failed: %d\n",
   5019 			    device_xname(sc->sc_dev), error));
   5020 			break;
   5021 		}
   5022 
   5023 		segs_needed = dmamap->dm_nsegs;
   5024 
   5025 		/*
   5026 		 * Ensure we have enough descriptors free to describe
   5027 		 * the packet.  Note, we always reserve one descriptor
   5028 		 * at the end of the ring due to the semantics of the
   5029 		 * TDT register, plus one more in the event we need
   5030 		 * to load offload context.
   5031 		 */
   5032 		if (segs_needed > sc->sc_txfree - 2) {
   5033 			/*
   5034 			 * Not enough free descriptors to transmit this
   5035 			 * packet.  We haven't committed anything yet,
   5036 			 * so just unload the DMA map, put the packet
   5037 			 * pack on the queue, and punt.  Notify the upper
   5038 			 * layer that there are no more slots left.
   5039 			 */
   5040 			DPRINTF(WM_DEBUG_TX,
   5041 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   5042 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   5043 			    segs_needed, sc->sc_txfree - 1));
   5044 			ifp->if_flags |= IFF_OACTIVE;
   5045 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   5046 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   5047 			break;
   5048 		}
   5049 
   5050 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   5051 
   5052 		DPRINTF(WM_DEBUG_TX,
   5053 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   5054 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   5055 
   5056 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   5057 
   5058 		/*
   5059 		 * Store a pointer to the packet so that we can free it
   5060 		 * later.
   5061 		 *
   5062 		 * Initially, we consider the number of descriptors the
   5063 		 * packet uses the number of DMA segments.  This may be
   5064 		 * incremented by 1 if we do checksum offload (a descriptor
   5065 		 * is used to set the checksum context).
   5066 		 */
   5067 		txs->txs_mbuf = m0;
   5068 		txs->txs_firstdesc = sc->sc_txnext;
   5069 		txs->txs_ndesc = segs_needed;
   5070 
   5071 		/* Set up offload parameters for this packet. */
   5072 		uint32_t cmdlen, fields, dcmdlen;
   5073 		if (m0->m_pkthdr.csum_flags &
   5074 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   5075 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   5076 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   5077 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   5078 			    &do_csum) != 0) {
   5079 				/* Error message already displayed. */
   5080 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   5081 				continue;
   5082 			}
   5083 		} else {
   5084 			do_csum = false;
   5085 			cmdlen = 0;
   5086 			fields = 0;
   5087 		}
   5088 
   5089 		/* Sync the DMA map. */
   5090 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   5091 		    BUS_DMASYNC_PREWRITE);
   5092 
   5093 		/* Initialize the first transmit descriptor. */
   5094 		nexttx = sc->sc_txnext;
   5095 		if (!do_csum) {
   5096 			/* setup a legacy descriptor */
   5097 			wm_set_dma_addr(
   5098 			    &sc->sc_txdescs[nexttx].wtx_addr,
   5099 			    dmamap->dm_segs[0].ds_addr);
   5100 			sc->sc_txdescs[nexttx].wtx_cmdlen =
   5101 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   5102 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
   5103 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
   5104 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   5105 			    NULL) {
   5106 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
   5107 				    htole32(WTX_CMD_VLE);
   5108 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
   5109 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   5110 			} else {
   5111 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   5112 			}
   5113 			dcmdlen = 0;
   5114 		} else {
   5115 			/* setup an advanced data descriptor */
   5116 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   5117 			    htole64(dmamap->dm_segs[0].ds_addr);
   5118 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   5119 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   5120 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   5121 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
   5122 			    htole32(fields);
   5123 			DPRINTF(WM_DEBUG_TX,
   5124 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   5125 			    device_xname(sc->sc_dev), nexttx,
   5126 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   5127 			DPRINTF(WM_DEBUG_TX,
   5128 			    ("\t 0x%08x%08x\n", fields,
   5129 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   5130 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   5131 		}
   5132 
   5133 		lasttx = nexttx;
   5134 		nexttx = WM_NEXTTX(sc, nexttx);
   5135 		/*
   5136 		 * fill in the next descriptors. legacy or adcanced format
   5137 		 * is the same here
   5138 		 */
   5139 		for (seg = 1; seg < dmamap->dm_nsegs;
   5140 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
   5141 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   5142 			    htole64(dmamap->dm_segs[seg].ds_addr);
   5143 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   5144 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   5145 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   5146 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
   5147 			lasttx = nexttx;
   5148 
   5149 			DPRINTF(WM_DEBUG_TX,
   5150 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   5151 			     "len %#04zx\n",
   5152 			    device_xname(sc->sc_dev), nexttx,
   5153 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   5154 			    dmamap->dm_segs[seg].ds_len));
   5155 		}
   5156 
   5157 		KASSERT(lasttx != -1);
   5158 
   5159 		/*
   5160 		 * Set up the command byte on the last descriptor of
   5161 		 * the packet.  If we're in the interrupt delay window,
   5162 		 * delay the interrupt.
   5163 		 */
   5164 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   5165 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   5166 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   5167 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   5168 
   5169 		txs->txs_lastdesc = lasttx;
   5170 
   5171 		DPRINTF(WM_DEBUG_TX,
   5172 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   5173 		    device_xname(sc->sc_dev),
   5174 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   5175 
   5176 		/* Sync the descriptors we're using. */
   5177 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   5178 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5179 
   5180 		/* Give the packet to the chip. */
   5181 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   5182 		sent = true;
   5183 
   5184 		DPRINTF(WM_DEBUG_TX,
   5185 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   5186 
   5187 		DPRINTF(WM_DEBUG_TX,
   5188 		    ("%s: TX: finished transmitting packet, job %d\n",
   5189 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   5190 
   5191 		/* Advance the tx pointer. */
   5192 		sc->sc_txfree -= txs->txs_ndesc;
   5193 		sc->sc_txnext = nexttx;
   5194 
   5195 		sc->sc_txsfree--;
   5196 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   5197 
   5198 		/* Pass the packet to any BPF listeners. */
   5199 		bpf_mtap(ifp, m0);
   5200 	}
   5201 
   5202 	if (m0 != NULL) {
   5203 		ifp->if_flags |= IFF_OACTIVE;
   5204 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5205 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   5206 		m_freem(m0);
   5207 	}
   5208 
   5209 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   5210 		/* No more slots; notify upper layer. */
   5211 		ifp->if_flags |= IFF_OACTIVE;
   5212 	}
   5213 
   5214 	if (sent) {
   5215 		/* Set a watchdog timer in case the chip flakes out. */
   5216 		ifp->if_timer = 5;
   5217 	}
   5218 }
   5219 
   5220 /* Interrupt */
   5221 
   5222 /*
   5223  * wm_txintr:
   5224  *
   5225  *	Helper; handle transmit interrupts.
   5226  */
   5227 static void
   5228 wm_txintr(struct wm_softc *sc)
   5229 {
   5230 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5231 	struct wm_txsoft *txs;
   5232 	uint8_t status;
   5233 	int i;
   5234 
   5235 	if (sc->sc_stopping)
   5236 		return;
   5237 
   5238 	ifp->if_flags &= ~IFF_OACTIVE;
   5239 
   5240 	/*
   5241 	 * Go through the Tx list and free mbufs for those
   5242 	 * frames which have been transmitted.
   5243 	 */
   5244 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
   5245 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
   5246 		txs = &sc->sc_txsoft[i];
   5247 
   5248 		DPRINTF(WM_DEBUG_TX,
   5249 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   5250 
   5251 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
   5252 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   5253 
   5254 		status =
   5255 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   5256 		if ((status & WTX_ST_DD) == 0) {
   5257 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   5258 			    BUS_DMASYNC_PREREAD);
   5259 			break;
   5260 		}
   5261 
   5262 		DPRINTF(WM_DEBUG_TX,
   5263 		    ("%s: TX: job %d done: descs %d..%d\n",
   5264 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   5265 		    txs->txs_lastdesc));
   5266 
   5267 		/*
   5268 		 * XXX We should probably be using the statistics
   5269 		 * XXX registers, but I don't know if they exist
   5270 		 * XXX on chips before the i82544.
   5271 		 */
   5272 
   5273 #ifdef WM_EVENT_COUNTERS
   5274 		if (status & WTX_ST_TU)
   5275 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   5276 #endif /* WM_EVENT_COUNTERS */
   5277 
   5278 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   5279 			ifp->if_oerrors++;
   5280 			if (status & WTX_ST_LC)
   5281 				log(LOG_WARNING, "%s: late collision\n",
   5282 				    device_xname(sc->sc_dev));
   5283 			else if (status & WTX_ST_EC) {
   5284 				ifp->if_collisions += 16;
   5285 				log(LOG_WARNING, "%s: excessive collisions\n",
   5286 				    device_xname(sc->sc_dev));
   5287 			}
   5288 		} else
   5289 			ifp->if_opackets++;
   5290 
   5291 		sc->sc_txfree += txs->txs_ndesc;
   5292 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   5293 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   5294 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   5295 		m_freem(txs->txs_mbuf);
   5296 		txs->txs_mbuf = NULL;
   5297 	}
   5298 
   5299 	/* Update the dirty transmit buffer pointer. */
   5300 	sc->sc_txsdirty = i;
   5301 	DPRINTF(WM_DEBUG_TX,
   5302 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   5303 
   5304 	/*
   5305 	 * If there are no more pending transmissions, cancel the watchdog
   5306 	 * timer.
   5307 	 */
   5308 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
   5309 		ifp->if_timer = 0;
   5310 }
   5311 
   5312 /*
   5313  * wm_rxintr:
   5314  *
   5315  *	Helper; handle receive interrupts.
   5316  */
   5317 static void
   5318 wm_rxintr(struct wm_softc *sc)
   5319 {
   5320 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5321 	struct wm_rxsoft *rxs;
   5322 	struct mbuf *m;
   5323 	int i, len;
   5324 	uint8_t status, errors;
   5325 	uint16_t vlantag;
   5326 
   5327 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   5328 		rxs = &sc->sc_rxsoft[i];
   5329 
   5330 		DPRINTF(WM_DEBUG_RX,
   5331 		    ("%s: RX: checking descriptor %d\n",
   5332 		    device_xname(sc->sc_dev), i));
   5333 
   5334 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   5335 
   5336 		status = sc->sc_rxdescs[i].wrx_status;
   5337 		errors = sc->sc_rxdescs[i].wrx_errors;
   5338 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   5339 		vlantag = sc->sc_rxdescs[i].wrx_special;
   5340 
   5341 		if ((status & WRX_ST_DD) == 0) {
   5342 			/* We have processed all of the receive descriptors. */
   5343 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   5344 			break;
   5345 		}
   5346 
   5347 		if (__predict_false(sc->sc_rxdiscard)) {
   5348 			DPRINTF(WM_DEBUG_RX,
   5349 			    ("%s: RX: discarding contents of descriptor %d\n",
   5350 			    device_xname(sc->sc_dev), i));
   5351 			WM_INIT_RXDESC(sc, i);
   5352 			if (status & WRX_ST_EOP) {
   5353 				/* Reset our state. */
   5354 				DPRINTF(WM_DEBUG_RX,
   5355 				    ("%s: RX: resetting rxdiscard -> 0\n",
   5356 				    device_xname(sc->sc_dev)));
   5357 				sc->sc_rxdiscard = 0;
   5358 			}
   5359 			continue;
   5360 		}
   5361 
   5362 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5363 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   5364 
   5365 		m = rxs->rxs_mbuf;
   5366 
   5367 		/*
   5368 		 * Add a new receive buffer to the ring, unless of
   5369 		 * course the length is zero. Treat the latter as a
   5370 		 * failed mapping.
   5371 		 */
   5372 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
   5373 			/*
   5374 			 * Failed, throw away what we've done so
   5375 			 * far, and discard the rest of the packet.
   5376 			 */
   5377 			ifp->if_ierrors++;
   5378 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5379 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5380 			WM_INIT_RXDESC(sc, i);
   5381 			if ((status & WRX_ST_EOP) == 0)
   5382 				sc->sc_rxdiscard = 1;
   5383 			if (sc->sc_rxhead != NULL)
   5384 				m_freem(sc->sc_rxhead);
   5385 			WM_RXCHAIN_RESET(sc);
   5386 			DPRINTF(WM_DEBUG_RX,
   5387 			    ("%s: RX: Rx buffer allocation failed, "
   5388 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   5389 			    sc->sc_rxdiscard ? " (discard)" : ""));
   5390 			continue;
   5391 		}
   5392 
   5393 		m->m_len = len;
   5394 		sc->sc_rxlen += len;
   5395 		DPRINTF(WM_DEBUG_RX,
   5396 		    ("%s: RX: buffer at %p len %d\n",
   5397 		    device_xname(sc->sc_dev), m->m_data, len));
   5398 
   5399 		/* If this is not the end of the packet, keep looking. */
   5400 		if ((status & WRX_ST_EOP) == 0) {
   5401 			WM_RXCHAIN_LINK(sc, m);
   5402 			DPRINTF(WM_DEBUG_RX,
   5403 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   5404 			    device_xname(sc->sc_dev), sc->sc_rxlen));
   5405 			continue;
   5406 		}
   5407 
   5408 		/*
   5409 		 * Okay, we have the entire packet now.  The chip is
   5410 		 * configured to include the FCS except I350 and I21[01]
   5411 		 * (not all chips can be configured to strip it),
   5412 		 * so we need to trim it.
   5413 		 * May need to adjust length of previous mbuf in the
   5414 		 * chain if the current mbuf is too short.
   5415 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   5416 		 * is always set in I350, so we don't trim it.
   5417 		 */
   5418 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   5419 		    && (sc->sc_type != WM_T_I210)
   5420 		    && (sc->sc_type != WM_T_I211)) {
   5421 			if (m->m_len < ETHER_CRC_LEN) {
   5422 				sc->sc_rxtail->m_len
   5423 				    -= (ETHER_CRC_LEN - m->m_len);
   5424 				m->m_len = 0;
   5425 			} else
   5426 				m->m_len -= ETHER_CRC_LEN;
   5427 			len = sc->sc_rxlen - ETHER_CRC_LEN;
   5428 		} else
   5429 			len = sc->sc_rxlen;
   5430 
   5431 		WM_RXCHAIN_LINK(sc, m);
   5432 
   5433 		*sc->sc_rxtailp = NULL;
   5434 		m = sc->sc_rxhead;
   5435 
   5436 		WM_RXCHAIN_RESET(sc);
   5437 
   5438 		DPRINTF(WM_DEBUG_RX,
   5439 		    ("%s: RX: have entire packet, len -> %d\n",
   5440 		    device_xname(sc->sc_dev), len));
   5441 
   5442 		/* If an error occurred, update stats and drop the packet. */
   5443 		if (errors &
   5444 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   5445 			if (errors & WRX_ER_SE)
   5446 				log(LOG_WARNING, "%s: symbol error\n",
   5447 				    device_xname(sc->sc_dev));
   5448 			else if (errors & WRX_ER_SEQ)
   5449 				log(LOG_WARNING, "%s: receive sequence error\n",
   5450 				    device_xname(sc->sc_dev));
   5451 			else if (errors & WRX_ER_CE)
   5452 				log(LOG_WARNING, "%s: CRC error\n",
   5453 				    device_xname(sc->sc_dev));
   5454 			m_freem(m);
   5455 			continue;
   5456 		}
   5457 
   5458 		/* No errors.  Receive the packet. */
   5459 		m->m_pkthdr.rcvif = ifp;
   5460 		m->m_pkthdr.len = len;
   5461 
   5462 		/*
   5463 		 * If VLANs are enabled, VLAN packets have been unwrapped
   5464 		 * for us.  Associate the tag with the packet.
   5465 		 */
   5466 		/* XXXX should check for i350 and i354 */
   5467 		if ((status & WRX_ST_VP) != 0) {
   5468 			VLAN_INPUT_TAG(ifp, m,
   5469 			    le16toh(vlantag),
   5470 			    continue);
   5471 		}
   5472 
   5473 		/* Set up checksum info for this packet. */
   5474 		if ((status & WRX_ST_IXSM) == 0) {
   5475 			if (status & WRX_ST_IPCS) {
   5476 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   5477 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   5478 				if (errors & WRX_ER_IPE)
   5479 					m->m_pkthdr.csum_flags |=
   5480 					    M_CSUM_IPv4_BAD;
   5481 			}
   5482 			if (status & WRX_ST_TCPCS) {
   5483 				/*
   5484 				 * Note: we don't know if this was TCP or UDP,
   5485 				 * so we just set both bits, and expect the
   5486 				 * upper layers to deal.
   5487 				 */
   5488 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   5489 				m->m_pkthdr.csum_flags |=
   5490 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   5491 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   5492 				if (errors & WRX_ER_TCPE)
   5493 					m->m_pkthdr.csum_flags |=
   5494 					    M_CSUM_TCP_UDP_BAD;
   5495 			}
   5496 		}
   5497 
   5498 		ifp->if_ipackets++;
   5499 
   5500 		WM_RX_UNLOCK(sc);
   5501 
   5502 		/* Pass this up to any BPF listeners. */
   5503 		bpf_mtap(ifp, m);
   5504 
   5505 		/* Pass it on. */
   5506 		(*ifp->if_input)(ifp, m);
   5507 
   5508 		WM_RX_LOCK(sc);
   5509 
   5510 		if (sc->sc_stopping)
   5511 			break;
   5512 	}
   5513 
   5514 	/* Update the receive pointer. */
   5515 	sc->sc_rxptr = i;
   5516 
   5517 	DPRINTF(WM_DEBUG_RX,
   5518 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   5519 }
   5520 
   5521 /*
   5522  * wm_linkintr_gmii:
   5523  *
   5524  *	Helper; handle link interrupts for GMII.
   5525  */
   5526 static void
   5527 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   5528 {
   5529 
   5530 	KASSERT(WM_TX_LOCKED(sc));
   5531 
   5532 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   5533 		__func__));
   5534 
   5535 	if (icr & ICR_LSC) {
   5536 		DPRINTF(WM_DEBUG_LINK,
   5537 		    ("%s: LINK: LSC -> mii_pollstat\n",
   5538 			device_xname(sc->sc_dev)));
   5539 		mii_pollstat(&sc->sc_mii);
   5540 		if (sc->sc_type == WM_T_82543) {
   5541 			int miistatus, active;
   5542 
   5543 			/*
   5544 			 * With 82543, we need to force speed and
   5545 			 * duplex on the MAC equal to what the PHY
   5546 			 * speed and duplex configuration is.
   5547 			 */
   5548 			miistatus = sc->sc_mii.mii_media_status;
   5549 
   5550 			if (miistatus & IFM_ACTIVE) {
   5551 				active = sc->sc_mii.mii_media_active;
   5552 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   5553 				switch (IFM_SUBTYPE(active)) {
   5554 				case IFM_10_T:
   5555 					sc->sc_ctrl |= CTRL_SPEED_10;
   5556 					break;
   5557 				case IFM_100_TX:
   5558 					sc->sc_ctrl |= CTRL_SPEED_100;
   5559 					break;
   5560 				case IFM_1000_T:
   5561 					sc->sc_ctrl |= CTRL_SPEED_1000;
   5562 					break;
   5563 				default:
   5564 					/*
   5565 					 * fiber?
   5566 					 * Shoud not enter here.
   5567 					 */
   5568 					printf("unknown media (%x)\n",
   5569 					    active);
   5570 					break;
   5571 				}
   5572 				if (active & IFM_FDX)
   5573 					sc->sc_ctrl |= CTRL_FD;
   5574 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5575 			}
   5576 		} else if ((sc->sc_type == WM_T_ICH8)
   5577 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   5578 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   5579 		} else if (sc->sc_type == WM_T_PCH) {
   5580 			wm_k1_gig_workaround_hv(sc,
   5581 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   5582 		}
   5583 
   5584 		if ((sc->sc_phytype == WMPHY_82578)
   5585 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   5586 			== IFM_1000_T)) {
   5587 
   5588 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   5589 				delay(200*1000); /* XXX too big */
   5590 
   5591 				/* Link stall fix for link up */
   5592 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   5593 				    HV_MUX_DATA_CTRL,
   5594 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   5595 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   5596 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   5597 				    HV_MUX_DATA_CTRL,
   5598 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   5599 			}
   5600 		}
   5601 	} else if (icr & ICR_RXSEQ) {
   5602 		DPRINTF(WM_DEBUG_LINK,
   5603 		    ("%s: LINK Receive sequence error\n",
   5604 			device_xname(sc->sc_dev)));
   5605 	}
   5606 }
   5607 
   5608 /*
   5609  * wm_linkintr_tbi:
   5610  *
   5611  *	Helper; handle link interrupts for TBI mode.
   5612  */
   5613 static void
   5614 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   5615 {
   5616 	uint32_t status;
   5617 
   5618 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   5619 		__func__));
   5620 
   5621 	status = CSR_READ(sc, WMREG_STATUS);
   5622 	if (icr & ICR_LSC) {
   5623 		if (status & STATUS_LU) {
   5624 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   5625 			    device_xname(sc->sc_dev),
   5626 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   5627 			/*
   5628 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   5629 			 * so we should update sc->sc_ctrl
   5630 			 */
   5631 
   5632 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5633 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   5634 			sc->sc_fcrtl &= ~FCRTL_XONE;
   5635 			if (status & STATUS_FD)
   5636 				sc->sc_tctl |=
   5637 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5638 			else
   5639 				sc->sc_tctl |=
   5640 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   5641 			if (sc->sc_ctrl & CTRL_TFCE)
   5642 				sc->sc_fcrtl |= FCRTL_XONE;
   5643 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5644 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   5645 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   5646 				      sc->sc_fcrtl);
   5647 			sc->sc_tbi_linkup = 1;
   5648 		} else {
   5649 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   5650 			    device_xname(sc->sc_dev)));
   5651 			sc->sc_tbi_linkup = 0;
   5652 		}
   5653 		wm_tbi_set_linkled(sc);
   5654 	} else if (icr & ICR_RXSEQ) {
   5655 		DPRINTF(WM_DEBUG_LINK,
   5656 		    ("%s: LINK: Receive sequence error\n",
   5657 		    device_xname(sc->sc_dev)));
   5658 	}
   5659 }
   5660 
   5661 /*
   5662  * wm_linkintr:
   5663  *
   5664  *	Helper; handle link interrupts.
   5665  */
   5666 static void
   5667 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   5668 {
   5669 
   5670 	if (sc->sc_flags & WM_F_HAS_MII)
   5671 		wm_linkintr_gmii(sc, icr);
   5672 	else
   5673 		wm_linkintr_tbi(sc, icr);
   5674 }
   5675 
   5676 /*
   5677  * wm_intr:
   5678  *
   5679  *	Interrupt service routine.
   5680  */
   5681 static int
   5682 wm_intr(void *arg)
   5683 {
   5684 	struct wm_softc *sc = arg;
   5685 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5686 	uint32_t icr;
   5687 	int handled = 0;
   5688 
   5689 	while (1 /* CONSTCOND */) {
   5690 		icr = CSR_READ(sc, WMREG_ICR);
   5691 		if ((icr & sc->sc_icr) == 0)
   5692 			break;
   5693 		rnd_add_uint32(&sc->rnd_source, icr);
   5694 
   5695 		WM_RX_LOCK(sc);
   5696 
   5697 		if (sc->sc_stopping) {
   5698 			WM_RX_UNLOCK(sc);
   5699 			break;
   5700 		}
   5701 
   5702 		handled = 1;
   5703 
   5704 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   5705 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   5706 			DPRINTF(WM_DEBUG_RX,
   5707 			    ("%s: RX: got Rx intr 0x%08x\n",
   5708 			    device_xname(sc->sc_dev),
   5709 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   5710 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   5711 		}
   5712 #endif
   5713 		wm_rxintr(sc);
   5714 
   5715 		WM_RX_UNLOCK(sc);
   5716 		WM_TX_LOCK(sc);
   5717 
   5718 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   5719 		if (icr & ICR_TXDW) {
   5720 			DPRINTF(WM_DEBUG_TX,
   5721 			    ("%s: TX: got TXDW interrupt\n",
   5722 			    device_xname(sc->sc_dev)));
   5723 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   5724 		}
   5725 #endif
   5726 		wm_txintr(sc);
   5727 
   5728 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
   5729 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   5730 			wm_linkintr(sc, icr);
   5731 		}
   5732 
   5733 		WM_TX_UNLOCK(sc);
   5734 
   5735 		if (icr & ICR_RXO) {
   5736 #if defined(WM_DEBUG)
   5737 			log(LOG_WARNING, "%s: Receive overrun\n",
   5738 			    device_xname(sc->sc_dev));
   5739 #endif /* defined(WM_DEBUG) */
   5740 		}
   5741 	}
   5742 
   5743 	if (handled) {
   5744 		/* Try to get more packets going. */
   5745 		ifp->if_start(ifp);
   5746 	}
   5747 
   5748 	return handled;
   5749 }
   5750 
   5751 /*
   5752  * Media related.
   5753  * GMII, SGMII, TBI (and SERDES)
   5754  */
   5755 
   5756 /* GMII related */
   5757 
   5758 /*
   5759  * wm_gmii_reset:
   5760  *
   5761  *	Reset the PHY.
   5762  */
   5763 static void
   5764 wm_gmii_reset(struct wm_softc *sc)
   5765 {
   5766 	uint32_t reg;
   5767 	int rv;
   5768 
   5769 	/* get phy semaphore */
   5770 	switch (sc->sc_type) {
   5771 	case WM_T_82571:
   5772 	case WM_T_82572:
   5773 	case WM_T_82573:
   5774 	case WM_T_82574:
   5775 	case WM_T_82583:
   5776 		 /* XXX should get sw semaphore, too */
   5777 		rv = wm_get_swsm_semaphore(sc);
   5778 		break;
   5779 	case WM_T_82575:
   5780 	case WM_T_82576:
   5781 	case WM_T_82580:
   5782 	case WM_T_82580ER:
   5783 	case WM_T_I350:
   5784 	case WM_T_I354:
   5785 	case WM_T_I210:
   5786 	case WM_T_I211:
   5787 	case WM_T_80003:
   5788 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   5789 		break;
   5790 	case WM_T_ICH8:
   5791 	case WM_T_ICH9:
   5792 	case WM_T_ICH10:
   5793 	case WM_T_PCH:
   5794 	case WM_T_PCH2:
   5795 	case WM_T_PCH_LPT:
   5796 		rv = wm_get_swfwhw_semaphore(sc);
   5797 		break;
   5798 	default:
   5799 		/* nothing to do*/
   5800 		rv = 0;
   5801 		break;
   5802 	}
   5803 	if (rv != 0) {
   5804 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   5805 		    __func__);
   5806 		return;
   5807 	}
   5808 
   5809 	switch (sc->sc_type) {
   5810 	case WM_T_82542_2_0:
   5811 	case WM_T_82542_2_1:
   5812 		/* null */
   5813 		break;
   5814 	case WM_T_82543:
   5815 		/*
   5816 		 * With 82543, we need to force speed and duplex on the MAC
   5817 		 * equal to what the PHY speed and duplex configuration is.
   5818 		 * In addition, we need to perform a hardware reset on the PHY
   5819 		 * to take it out of reset.
   5820 		 */
   5821 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   5822 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5823 
   5824 		/* The PHY reset pin is active-low. */
   5825 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5826 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   5827 		    CTRL_EXT_SWDPIN(4));
   5828 		reg |= CTRL_EXT_SWDPIO(4);
   5829 
   5830 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5831 		CSR_WRITE_FLUSH(sc);
   5832 		delay(10*1000);
   5833 
   5834 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   5835 		CSR_WRITE_FLUSH(sc);
   5836 		delay(150);
   5837 #if 0
   5838 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   5839 #endif
   5840 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   5841 		break;
   5842 	case WM_T_82544:	/* reset 10000us */
   5843 	case WM_T_82540:
   5844 	case WM_T_82545:
   5845 	case WM_T_82545_3:
   5846 	case WM_T_82546:
   5847 	case WM_T_82546_3:
   5848 	case WM_T_82541:
   5849 	case WM_T_82541_2:
   5850 	case WM_T_82547:
   5851 	case WM_T_82547_2:
   5852 	case WM_T_82571:	/* reset 100us */
   5853 	case WM_T_82572:
   5854 	case WM_T_82573:
   5855 	case WM_T_82574:
   5856 	case WM_T_82575:
   5857 	case WM_T_82576:
   5858 	case WM_T_82580:
   5859 	case WM_T_82580ER:
   5860 	case WM_T_I350:
   5861 	case WM_T_I354:
   5862 	case WM_T_I210:
   5863 	case WM_T_I211:
   5864 	case WM_T_82583:
   5865 	case WM_T_80003:
   5866 		/* generic reset */
   5867 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   5868 		CSR_WRITE_FLUSH(sc);
   5869 		delay(20000);
   5870 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5871 		CSR_WRITE_FLUSH(sc);
   5872 		delay(20000);
   5873 
   5874 		if ((sc->sc_type == WM_T_82541)
   5875 		    || (sc->sc_type == WM_T_82541_2)
   5876 		    || (sc->sc_type == WM_T_82547)
   5877 		    || (sc->sc_type == WM_T_82547_2)) {
   5878 			/* workaround for igp are done in igp_reset() */
   5879 			/* XXX add code to set LED after phy reset */
   5880 		}
   5881 		break;
   5882 	case WM_T_ICH8:
   5883 	case WM_T_ICH9:
   5884 	case WM_T_ICH10:
   5885 	case WM_T_PCH:
   5886 	case WM_T_PCH2:
   5887 	case WM_T_PCH_LPT:
   5888 		/* generic reset */
   5889 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   5890 		CSR_WRITE_FLUSH(sc);
   5891 		delay(100);
   5892 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5893 		CSR_WRITE_FLUSH(sc);
   5894 		delay(150);
   5895 		break;
   5896 	default:
   5897 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   5898 		    __func__);
   5899 		break;
   5900 	}
   5901 
   5902 	/* release PHY semaphore */
   5903 	switch (sc->sc_type) {
   5904 	case WM_T_82571:
   5905 	case WM_T_82572:
   5906 	case WM_T_82573:
   5907 	case WM_T_82574:
   5908 	case WM_T_82583:
   5909 		 /* XXX should put sw semaphore, too */
   5910 		wm_put_swsm_semaphore(sc);
   5911 		break;
   5912 	case WM_T_82575:
   5913 	case WM_T_82576:
   5914 	case WM_T_82580:
   5915 	case WM_T_82580ER:
   5916 	case WM_T_I350:
   5917 	case WM_T_I354:
   5918 	case WM_T_I210:
   5919 	case WM_T_I211:
   5920 	case WM_T_80003:
   5921 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   5922 		break;
   5923 	case WM_T_ICH8:
   5924 	case WM_T_ICH9:
   5925 	case WM_T_ICH10:
   5926 	case WM_T_PCH:
   5927 	case WM_T_PCH2:
   5928 	case WM_T_PCH_LPT:
   5929 		wm_put_swfwhw_semaphore(sc);
   5930 		break;
   5931 	default:
   5932 		/* nothing to do*/
   5933 		rv = 0;
   5934 		break;
   5935 	}
   5936 
   5937 	/* get_cfg_done */
   5938 	wm_get_cfg_done(sc);
   5939 
   5940 	/* extra setup */
   5941 	switch (sc->sc_type) {
   5942 	case WM_T_82542_2_0:
   5943 	case WM_T_82542_2_1:
   5944 	case WM_T_82543:
   5945 	case WM_T_82544:
   5946 	case WM_T_82540:
   5947 	case WM_T_82545:
   5948 	case WM_T_82545_3:
   5949 	case WM_T_82546:
   5950 	case WM_T_82546_3:
   5951 	case WM_T_82541_2:
   5952 	case WM_T_82547_2:
   5953 	case WM_T_82571:
   5954 	case WM_T_82572:
   5955 	case WM_T_82573:
   5956 	case WM_T_82574:
   5957 	case WM_T_82575:
   5958 	case WM_T_82576:
   5959 	case WM_T_82580:
   5960 	case WM_T_82580ER:
   5961 	case WM_T_I350:
   5962 	case WM_T_I354:
   5963 	case WM_T_I210:
   5964 	case WM_T_I211:
   5965 	case WM_T_82583:
   5966 	case WM_T_80003:
   5967 		/* null */
   5968 		break;
   5969 	case WM_T_82541:
   5970 	case WM_T_82547:
   5971 		/* XXX Configure actively LED after PHY reset */
   5972 		break;
   5973 	case WM_T_ICH8:
   5974 	case WM_T_ICH9:
   5975 	case WM_T_ICH10:
   5976 	case WM_T_PCH:
   5977 	case WM_T_PCH2:
   5978 	case WM_T_PCH_LPT:
   5979 		/* Allow time for h/w to get to a quiescent state afer reset */
   5980 		delay(10*1000);
   5981 
   5982 		if (sc->sc_type == WM_T_PCH)
   5983 			wm_hv_phy_workaround_ich8lan(sc);
   5984 
   5985 		if (sc->sc_type == WM_T_PCH2)
   5986 			wm_lv_phy_workaround_ich8lan(sc);
   5987 
   5988 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   5989 			/*
   5990 			 * dummy read to clear the phy wakeup bit after lcd
   5991 			 * reset
   5992 			 */
   5993 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   5994 		}
   5995 
   5996 		/*
   5997 		 * XXX Configure the LCD with th extended configuration region
   5998 		 * in NVM
   5999 		 */
   6000 
   6001 		/* Configure the LCD with the OEM bits in NVM */
   6002 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   6003 		    || (sc->sc_type == WM_T_PCH_LPT)) {
   6004 			/*
   6005 			 * Disable LPLU.
   6006 			 * XXX It seems that 82567 has LPLU, too.
   6007 			 */
   6008 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   6009 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
   6010 			reg |= HV_OEM_BITS_ANEGNOW;
   6011 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   6012 		}
   6013 		break;
   6014 	default:
   6015 		panic("%s: unknown type\n", __func__);
   6016 		break;
   6017 	}
   6018 }
   6019 
   6020 /*
   6021  * wm_get_phy_id_82575:
   6022  *
   6023  * Return PHY ID. Return -1 if it failed.
   6024  */
   6025 static int
   6026 wm_get_phy_id_82575(struct wm_softc *sc)
   6027 {
   6028 	uint32_t reg;
   6029 	int phyid = -1;
   6030 
   6031 	/* XXX */
   6032 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   6033 		return -1;
   6034 
   6035 	if (wm_sgmii_uses_mdio(sc)) {
   6036 		switch (sc->sc_type) {
   6037 		case WM_T_82575:
   6038 		case WM_T_82576:
   6039 			reg = CSR_READ(sc, WMREG_MDIC);
   6040 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   6041 			break;
   6042 		case WM_T_82580:
   6043 		case WM_T_I350:
   6044 		case WM_T_I354:
   6045 		case WM_T_I210:
   6046 		case WM_T_I211:
   6047 			reg = CSR_READ(sc, WMREG_MDICNFG);
   6048 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   6049 			break;
   6050 		default:
   6051 			return -1;
   6052 		}
   6053 	}
   6054 
   6055 	return phyid;
   6056 }
   6057 
   6058 
   6059 /*
   6060  * wm_gmii_mediainit:
   6061  *
   6062  *	Initialize media for use on 1000BASE-T devices.
   6063  */
   6064 static void
   6065 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   6066 {
   6067 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6068 	struct mii_data *mii = &sc->sc_mii;
   6069 	uint32_t reg;
   6070 
   6071 	/* We have MII. */
   6072 	sc->sc_flags |= WM_F_HAS_MII;
   6073 
   6074 	if (sc->sc_type == WM_T_80003)
   6075 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   6076 	else
   6077 		sc->sc_tipg = TIPG_1000T_DFLT;
   6078 
   6079 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   6080 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   6081 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   6082 	    || (sc->sc_type == WM_T_I211)) {
   6083 		reg = CSR_READ(sc, WMREG_PHPM);
   6084 		reg &= ~PHPM_GO_LINK_D;
   6085 		CSR_WRITE(sc, WMREG_PHPM, reg);
   6086 	}
   6087 
   6088 	/*
   6089 	 * Let the chip set speed/duplex on its own based on
   6090 	 * signals from the PHY.
   6091 	 * XXXbouyer - I'm not sure this is right for the 80003,
   6092 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   6093 	 */
   6094 	sc->sc_ctrl |= CTRL_SLU;
   6095 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6096 
   6097 	/* Initialize our media structures and probe the GMII. */
   6098 	mii->mii_ifp = ifp;
   6099 
   6100 	/*
   6101 	 * Determine the PHY access method.
   6102 	 *
   6103 	 *  For SGMII, use SGMII specific method.
   6104 	 *
   6105 	 *  For some devices, we can determine the PHY access method
   6106 	 * from sc_type.
   6107 	 *
   6108 	 *  For ICH8 variants, it's difficult to detemine the PHY access
   6109 	 * method by sc_type, so use the PCI product ID for some devices.
   6110 	 * For other ICH8 variants, try to use igp's method. If the PHY
   6111 	 * can't detect, then use bm's method.
   6112 	 */
   6113 	switch (prodid) {
   6114 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   6115 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   6116 		/* 82577 */
   6117 		sc->sc_phytype = WMPHY_82577;
   6118 		mii->mii_readreg = wm_gmii_hv_readreg;
   6119 		mii->mii_writereg = wm_gmii_hv_writereg;
   6120 		break;
   6121 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   6122 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   6123 		/* 82578 */
   6124 		sc->sc_phytype = WMPHY_82578;
   6125 		mii->mii_readreg = wm_gmii_hv_readreg;
   6126 		mii->mii_writereg = wm_gmii_hv_writereg;
   6127 		break;
   6128 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   6129 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   6130 		/* 82579 */
   6131 		sc->sc_phytype = WMPHY_82579;
   6132 		mii->mii_readreg = wm_gmii_hv_readreg;
   6133 		mii->mii_writereg = wm_gmii_hv_writereg;
   6134 		break;
   6135 	case PCI_PRODUCT_INTEL_I217_LM:
   6136 	case PCI_PRODUCT_INTEL_I217_V:
   6137 	case PCI_PRODUCT_INTEL_I218_LM:
   6138 	case PCI_PRODUCT_INTEL_I218_V:
   6139 		/* I21[78] */
   6140 		mii->mii_readreg = wm_gmii_hv_readreg;
   6141 		mii->mii_writereg = wm_gmii_hv_writereg;
   6142 		break;
   6143 	case PCI_PRODUCT_INTEL_82801I_BM:
   6144 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   6145 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   6146 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   6147 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   6148 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   6149 		/* 82567 */
   6150 		sc->sc_phytype = WMPHY_BM;
   6151 		mii->mii_readreg = wm_gmii_bm_readreg;
   6152 		mii->mii_writereg = wm_gmii_bm_writereg;
   6153 		break;
   6154 	default:
   6155 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   6156 		    && !wm_sgmii_uses_mdio(sc)){
   6157 			mii->mii_readreg = wm_sgmii_readreg;
   6158 			mii->mii_writereg = wm_sgmii_writereg;
   6159 		} else if (sc->sc_type >= WM_T_80003) {
   6160 			mii->mii_readreg = wm_gmii_i80003_readreg;
   6161 			mii->mii_writereg = wm_gmii_i80003_writereg;
   6162 		} else if (sc->sc_type >= WM_T_I210) {
   6163 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6164 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6165 		} else if (sc->sc_type >= WM_T_82580) {
   6166 			sc->sc_phytype = WMPHY_82580;
   6167 			mii->mii_readreg = wm_gmii_82580_readreg;
   6168 			mii->mii_writereg = wm_gmii_82580_writereg;
   6169 		} else if (sc->sc_type >= WM_T_82544) {
   6170 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6171 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6172 		} else {
   6173 			mii->mii_readreg = wm_gmii_i82543_readreg;
   6174 			mii->mii_writereg = wm_gmii_i82543_writereg;
   6175 		}
   6176 		break;
   6177 	}
   6178 	mii->mii_statchg = wm_gmii_statchg;
   6179 
   6180 	wm_gmii_reset(sc);
   6181 
   6182 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   6183 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   6184 	    wm_gmii_mediastatus);
   6185 
   6186 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   6187 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   6188 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6189 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   6190 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   6191 			/* Attach only one port */
   6192 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   6193 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6194 		} else {
   6195 			int i, id;
   6196 			uint32_t ctrl_ext;
   6197 
   6198 			id = wm_get_phy_id_82575(sc);
   6199 			if (id != -1) {
   6200 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   6201 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   6202 			}
   6203 			if ((id == -1)
   6204 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6205 				/* Power on sgmii phy if it is disabled */
   6206 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   6207 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   6208 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   6209 				CSR_WRITE_FLUSH(sc);
   6210 				delay(300*1000); /* XXX too long */
   6211 
   6212 				/* from 1 to 8 */
   6213 				for (i = 1; i < 8; i++)
   6214 					mii_attach(sc->sc_dev, &sc->sc_mii,
   6215 					    0xffffffff, i, MII_OFFSET_ANY,
   6216 					    MIIF_DOPAUSE);
   6217 
   6218 				/* restore previous sfp cage power state */
   6219 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   6220 			}
   6221 		}
   6222 	} else {
   6223 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6224 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6225 	}
   6226 
   6227 	/*
   6228 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   6229 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   6230 	 */
   6231 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   6232 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6233 		wm_set_mdio_slow_mode_hv(sc);
   6234 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6235 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6236 	}
   6237 
   6238 	/*
   6239 	 * (For ICH8 variants)
   6240 	 * If PHY detection failed, use BM's r/w function and retry.
   6241 	 */
   6242 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6243 		/* if failed, retry with *_bm_* */
   6244 		mii->mii_readreg = wm_gmii_bm_readreg;
   6245 		mii->mii_writereg = wm_gmii_bm_writereg;
   6246 
   6247 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6248 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6249 	}
   6250 
   6251 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6252 		/* Any PHY wasn't find */
   6253 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   6254 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   6255 		sc->sc_phytype = WMPHY_NONE;
   6256 	} else {
   6257 		/*
   6258 		 * PHY Found!
   6259 		 * Check PHY type.
   6260 		 */
   6261 		uint32_t model;
   6262 		struct mii_softc *child;
   6263 
   6264 		child = LIST_FIRST(&mii->mii_phys);
   6265 		if (device_is_a(child->mii_dev, "igphy")) {
   6266 			struct igphy_softc *isc = (struct igphy_softc *)child;
   6267 
   6268 			model = isc->sc_mii.mii_mpd_model;
   6269 			if (model == MII_MODEL_yyINTEL_I82566)
   6270 				sc->sc_phytype = WMPHY_IGP_3;
   6271 		}
   6272 
   6273 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   6274 	}
   6275 }
   6276 
   6277 /*
   6278  * wm_gmii_mediastatus:	[ifmedia interface function]
   6279  *
   6280  *	Get the current interface media status on a 1000BASE-T device.
   6281  */
   6282 static void
   6283 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   6284 {
   6285 	struct wm_softc *sc = ifp->if_softc;
   6286 
   6287 	ether_mediastatus(ifp, ifmr);
   6288 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   6289 	    | sc->sc_flowflags;
   6290 }
   6291 
   6292 /*
   6293  * wm_gmii_mediachange:	[ifmedia interface function]
   6294  *
   6295  *	Set hardware to newly-selected media on a 1000BASE-T device.
   6296  */
   6297 static int
   6298 wm_gmii_mediachange(struct ifnet *ifp)
   6299 {
   6300 	struct wm_softc *sc = ifp->if_softc;
   6301 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6302 	int rc;
   6303 
   6304 	if ((ifp->if_flags & IFF_UP) == 0)
   6305 		return 0;
   6306 
   6307 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   6308 	sc->sc_ctrl |= CTRL_SLU;
   6309 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   6310 	    || (sc->sc_type > WM_T_82543)) {
   6311 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   6312 	} else {
   6313 		sc->sc_ctrl &= ~CTRL_ASDE;
   6314 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   6315 		if (ife->ifm_media & IFM_FDX)
   6316 			sc->sc_ctrl |= CTRL_FD;
   6317 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   6318 		case IFM_10_T:
   6319 			sc->sc_ctrl |= CTRL_SPEED_10;
   6320 			break;
   6321 		case IFM_100_TX:
   6322 			sc->sc_ctrl |= CTRL_SPEED_100;
   6323 			break;
   6324 		case IFM_1000_T:
   6325 			sc->sc_ctrl |= CTRL_SPEED_1000;
   6326 			break;
   6327 		default:
   6328 			panic("wm_gmii_mediachange: bad media 0x%x",
   6329 			    ife->ifm_media);
   6330 		}
   6331 	}
   6332 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6333 	if (sc->sc_type <= WM_T_82543)
   6334 		wm_gmii_reset(sc);
   6335 
   6336 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   6337 		return 0;
   6338 	return rc;
   6339 }
   6340 
   6341 #define	MDI_IO		CTRL_SWDPIN(2)
   6342 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   6343 #define	MDI_CLK		CTRL_SWDPIN(3)
   6344 
   6345 static void
   6346 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   6347 {
   6348 	uint32_t i, v;
   6349 
   6350 	v = CSR_READ(sc, WMREG_CTRL);
   6351 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6352 	v |= MDI_DIR | CTRL_SWDPIO(3);
   6353 
   6354 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   6355 		if (data & i)
   6356 			v |= MDI_IO;
   6357 		else
   6358 			v &= ~MDI_IO;
   6359 		CSR_WRITE(sc, WMREG_CTRL, v);
   6360 		CSR_WRITE_FLUSH(sc);
   6361 		delay(10);
   6362 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6363 		CSR_WRITE_FLUSH(sc);
   6364 		delay(10);
   6365 		CSR_WRITE(sc, WMREG_CTRL, v);
   6366 		CSR_WRITE_FLUSH(sc);
   6367 		delay(10);
   6368 	}
   6369 }
   6370 
   6371 static uint32_t
   6372 wm_i82543_mii_recvbits(struct wm_softc *sc)
   6373 {
   6374 	uint32_t v, i, data = 0;
   6375 
   6376 	v = CSR_READ(sc, WMREG_CTRL);
   6377 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6378 	v |= CTRL_SWDPIO(3);
   6379 
   6380 	CSR_WRITE(sc, WMREG_CTRL, v);
   6381 	CSR_WRITE_FLUSH(sc);
   6382 	delay(10);
   6383 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6384 	CSR_WRITE_FLUSH(sc);
   6385 	delay(10);
   6386 	CSR_WRITE(sc, WMREG_CTRL, v);
   6387 	CSR_WRITE_FLUSH(sc);
   6388 	delay(10);
   6389 
   6390 	for (i = 0; i < 16; i++) {
   6391 		data <<= 1;
   6392 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6393 		CSR_WRITE_FLUSH(sc);
   6394 		delay(10);
   6395 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   6396 			data |= 1;
   6397 		CSR_WRITE(sc, WMREG_CTRL, v);
   6398 		CSR_WRITE_FLUSH(sc);
   6399 		delay(10);
   6400 	}
   6401 
   6402 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6403 	CSR_WRITE_FLUSH(sc);
   6404 	delay(10);
   6405 	CSR_WRITE(sc, WMREG_CTRL, v);
   6406 	CSR_WRITE_FLUSH(sc);
   6407 	delay(10);
   6408 
   6409 	return data;
   6410 }
   6411 
   6412 #undef MDI_IO
   6413 #undef MDI_DIR
   6414 #undef MDI_CLK
   6415 
   6416 /*
   6417  * wm_gmii_i82543_readreg:	[mii interface function]
   6418  *
   6419  *	Read a PHY register on the GMII (i82543 version).
   6420  */
   6421 static int
   6422 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   6423 {
   6424 	struct wm_softc *sc = device_private(self);
   6425 	int rv;
   6426 
   6427 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6428 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   6429 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   6430 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   6431 
   6432 	DPRINTF(WM_DEBUG_GMII,
   6433 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   6434 	    device_xname(sc->sc_dev), phy, reg, rv));
   6435 
   6436 	return rv;
   6437 }
   6438 
   6439 /*
   6440  * wm_gmii_i82543_writereg:	[mii interface function]
   6441  *
   6442  *	Write a PHY register on the GMII (i82543 version).
   6443  */
   6444 static void
   6445 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   6446 {
   6447 	struct wm_softc *sc = device_private(self);
   6448 
   6449 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6450 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   6451 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   6452 	    (MII_COMMAND_START << 30), 32);
   6453 }
   6454 
   6455 /*
   6456  * wm_gmii_i82544_readreg:	[mii interface function]
   6457  *
   6458  *	Read a PHY register on the GMII.
   6459  */
   6460 static int
   6461 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   6462 {
   6463 	struct wm_softc *sc = device_private(self);
   6464 	uint32_t mdic = 0;
   6465 	int i, rv;
   6466 
   6467 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   6468 	    MDIC_REGADD(reg));
   6469 
   6470 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   6471 		mdic = CSR_READ(sc, WMREG_MDIC);
   6472 		if (mdic & MDIC_READY)
   6473 			break;
   6474 		delay(50);
   6475 	}
   6476 
   6477 	if ((mdic & MDIC_READY) == 0) {
   6478 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   6479 		    device_xname(sc->sc_dev), phy, reg);
   6480 		rv = 0;
   6481 	} else if (mdic & MDIC_E) {
   6482 #if 0 /* This is normal if no PHY is present. */
   6483 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   6484 		    device_xname(sc->sc_dev), phy, reg);
   6485 #endif
   6486 		rv = 0;
   6487 	} else {
   6488 		rv = MDIC_DATA(mdic);
   6489 		if (rv == 0xffff)
   6490 			rv = 0;
   6491 	}
   6492 
   6493 	return rv;
   6494 }
   6495 
   6496 /*
   6497  * wm_gmii_i82544_writereg:	[mii interface function]
   6498  *
   6499  *	Write a PHY register on the GMII.
   6500  */
   6501 static void
   6502 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   6503 {
   6504 	struct wm_softc *sc = device_private(self);
   6505 	uint32_t mdic = 0;
   6506 	int i;
   6507 
   6508 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   6509 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   6510 
   6511 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   6512 		mdic = CSR_READ(sc, WMREG_MDIC);
   6513 		if (mdic & MDIC_READY)
   6514 			break;
   6515 		delay(50);
   6516 	}
   6517 
   6518 	if ((mdic & MDIC_READY) == 0)
   6519 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   6520 		    device_xname(sc->sc_dev), phy, reg);
   6521 	else if (mdic & MDIC_E)
   6522 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   6523 		    device_xname(sc->sc_dev), phy, reg);
   6524 }
   6525 
   6526 /*
   6527  * wm_gmii_i80003_readreg:	[mii interface function]
   6528  *
   6529  *	Read a PHY register on the kumeran
   6530  * This could be handled by the PHY layer if we didn't have to lock the
   6531  * ressource ...
   6532  */
   6533 static int
   6534 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   6535 {
   6536 	struct wm_softc *sc = device_private(self);
   6537 	int sem;
   6538 	int rv;
   6539 
   6540 	if (phy != 1) /* only one PHY on kumeran bus */
   6541 		return 0;
   6542 
   6543 	sem = swfwphysem[sc->sc_funcid];
   6544 	if (wm_get_swfw_semaphore(sc, sem)) {
   6545 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6546 		    __func__);
   6547 		return 0;
   6548 	}
   6549 
   6550 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   6551 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   6552 		    reg >> GG82563_PAGE_SHIFT);
   6553 	} else {
   6554 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   6555 		    reg >> GG82563_PAGE_SHIFT);
   6556 	}
   6557 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   6558 	delay(200);
   6559 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   6560 	delay(200);
   6561 
   6562 	wm_put_swfw_semaphore(sc, sem);
   6563 	return rv;
   6564 }
   6565 
   6566 /*
   6567  * wm_gmii_i80003_writereg:	[mii interface function]
   6568  *
   6569  *	Write a PHY register on the kumeran.
   6570  * This could be handled by the PHY layer if we didn't have to lock the
   6571  * ressource ...
   6572  */
   6573 static void
   6574 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   6575 {
   6576 	struct wm_softc *sc = device_private(self);
   6577 	int sem;
   6578 
   6579 	if (phy != 1) /* only one PHY on kumeran bus */
   6580 		return;
   6581 
   6582 	sem = swfwphysem[sc->sc_funcid];
   6583 	if (wm_get_swfw_semaphore(sc, sem)) {
   6584 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6585 		    __func__);
   6586 		return;
   6587 	}
   6588 
   6589 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   6590 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   6591 		    reg >> GG82563_PAGE_SHIFT);
   6592 	} else {
   6593 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   6594 		    reg >> GG82563_PAGE_SHIFT);
   6595 	}
   6596 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   6597 	delay(200);
   6598 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   6599 	delay(200);
   6600 
   6601 	wm_put_swfw_semaphore(sc, sem);
   6602 }
   6603 
   6604 /*
   6605  * wm_gmii_bm_readreg:	[mii interface function]
   6606  *
   6607  *	Read a PHY register on the kumeran
   6608  * This could be handled by the PHY layer if we didn't have to lock the
   6609  * ressource ...
   6610  */
   6611 static int
   6612 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   6613 {
   6614 	struct wm_softc *sc = device_private(self);
   6615 	int sem;
   6616 	int rv;
   6617 
   6618 	sem = swfwphysem[sc->sc_funcid];
   6619 	if (wm_get_swfw_semaphore(sc, sem)) {
   6620 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6621 		    __func__);
   6622 		return 0;
   6623 	}
   6624 
   6625 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   6626 		if (phy == 1)
   6627 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   6628 			    reg);
   6629 		else
   6630 			wm_gmii_i82544_writereg(self, phy,
   6631 			    GG82563_PHY_PAGE_SELECT,
   6632 			    reg >> GG82563_PAGE_SHIFT);
   6633 	}
   6634 
   6635 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   6636 	wm_put_swfw_semaphore(sc, sem);
   6637 	return rv;
   6638 }
   6639 
   6640 /*
   6641  * wm_gmii_bm_writereg:	[mii interface function]
   6642  *
   6643  *	Write a PHY register on the kumeran.
   6644  * This could be handled by the PHY layer if we didn't have to lock the
   6645  * ressource ...
   6646  */
   6647 static void
   6648 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   6649 {
   6650 	struct wm_softc *sc = device_private(self);
   6651 	int sem;
   6652 
   6653 	sem = swfwphysem[sc->sc_funcid];
   6654 	if (wm_get_swfw_semaphore(sc, sem)) {
   6655 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6656 		    __func__);
   6657 		return;
   6658 	}
   6659 
   6660 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   6661 		if (phy == 1)
   6662 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   6663 			    reg);
   6664 		else
   6665 			wm_gmii_i82544_writereg(self, phy,
   6666 			    GG82563_PHY_PAGE_SELECT,
   6667 			    reg >> GG82563_PAGE_SHIFT);
   6668 	}
   6669 
   6670 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   6671 	wm_put_swfw_semaphore(sc, sem);
   6672 }
   6673 
   6674 static void
   6675 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   6676 {
   6677 	struct wm_softc *sc = device_private(self);
   6678 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   6679 	uint16_t wuce;
   6680 
   6681 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   6682 	if (sc->sc_type == WM_T_PCH) {
   6683 		/* XXX e1000 driver do nothing... why? */
   6684 	}
   6685 
   6686 	/* Set page 769 */
   6687 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6688 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   6689 
   6690 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   6691 
   6692 	wuce &= ~BM_WUC_HOST_WU_BIT;
   6693 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   6694 	    wuce | BM_WUC_ENABLE_BIT);
   6695 
   6696 	/* Select page 800 */
   6697 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6698 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   6699 
   6700 	/* Write page 800 */
   6701 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   6702 
   6703 	if (rd)
   6704 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   6705 	else
   6706 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   6707 
   6708 	/* Set page 769 */
   6709 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6710 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   6711 
   6712 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   6713 }
   6714 
   6715 /*
   6716  * wm_gmii_hv_readreg:	[mii interface function]
   6717  *
   6718  *	Read a PHY register on the kumeran
   6719  * This could be handled by the PHY layer if we didn't have to lock the
   6720  * ressource ...
   6721  */
   6722 static int
   6723 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   6724 {
   6725 	struct wm_softc *sc = device_private(self);
   6726 	uint16_t page = BM_PHY_REG_PAGE(reg);
   6727 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   6728 	uint16_t val;
   6729 	int rv;
   6730 
   6731 	if (wm_get_swfwhw_semaphore(sc)) {
   6732 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6733 		    __func__);
   6734 		return 0;
   6735 	}
   6736 
   6737 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   6738 	if (sc->sc_phytype == WMPHY_82577) {
   6739 		/* XXX must write */
   6740 	}
   6741 
   6742 	/* Page 800 works differently than the rest so it has its own func */
   6743 	if (page == BM_WUC_PAGE) {
   6744 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   6745 		return val;
   6746 	}
   6747 
   6748 	/*
   6749 	 * Lower than page 768 works differently than the rest so it has its
   6750 	 * own func
   6751 	 */
   6752 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   6753 		printf("gmii_hv_readreg!!!\n");
   6754 		return 0;
   6755 	}
   6756 
   6757 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   6758 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6759 		    page << BME1000_PAGE_SHIFT);
   6760 	}
   6761 
   6762 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   6763 	wm_put_swfwhw_semaphore(sc);
   6764 	return rv;
   6765 }
   6766 
   6767 /*
   6768  * wm_gmii_hv_writereg:	[mii interface function]
   6769  *
   6770  *	Write a PHY register on the kumeran.
   6771  * This could be handled by the PHY layer if we didn't have to lock the
   6772  * ressource ...
   6773  */
   6774 static void
   6775 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   6776 {
   6777 	struct wm_softc *sc = device_private(self);
   6778 	uint16_t page = BM_PHY_REG_PAGE(reg);
   6779 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   6780 
   6781 	if (wm_get_swfwhw_semaphore(sc)) {
   6782 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6783 		    __func__);
   6784 		return;
   6785 	}
   6786 
   6787 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   6788 
   6789 	/* Page 800 works differently than the rest so it has its own func */
   6790 	if (page == BM_WUC_PAGE) {
   6791 		uint16_t tmp;
   6792 
   6793 		tmp = val;
   6794 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   6795 		return;
   6796 	}
   6797 
   6798 	/*
   6799 	 * Lower than page 768 works differently than the rest so it has its
   6800 	 * own func
   6801 	 */
   6802 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   6803 		printf("gmii_hv_writereg!!!\n");
   6804 		return;
   6805 	}
   6806 
   6807 	/*
   6808 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   6809 	 * Power Down (whenever bit 11 of the PHY control register is set)
   6810 	 */
   6811 
   6812 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   6813 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6814 		    page << BME1000_PAGE_SHIFT);
   6815 	}
   6816 
   6817 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   6818 	wm_put_swfwhw_semaphore(sc);
   6819 }
   6820 
   6821 /*
   6822  * wm_gmii_82580_readreg:	[mii interface function]
   6823  *
   6824  *	Read a PHY register on the 82580 and I350.
   6825  * This could be handled by the PHY layer if we didn't have to lock the
   6826  * ressource ...
   6827  */
   6828 static int
   6829 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   6830 {
   6831 	struct wm_softc *sc = device_private(self);
   6832 	int sem;
   6833 	int rv;
   6834 
   6835 	sem = swfwphysem[sc->sc_funcid];
   6836 	if (wm_get_swfw_semaphore(sc, sem)) {
   6837 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6838 		    __func__);
   6839 		return 0;
   6840 	}
   6841 
   6842 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   6843 
   6844 	wm_put_swfw_semaphore(sc, sem);
   6845 	return rv;
   6846 }
   6847 
   6848 /*
   6849  * wm_gmii_82580_writereg:	[mii interface function]
   6850  *
   6851  *	Write a PHY register on the 82580 and I350.
   6852  * This could be handled by the PHY layer if we didn't have to lock the
   6853  * ressource ...
   6854  */
   6855 static void
   6856 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   6857 {
   6858 	struct wm_softc *sc = device_private(self);
   6859 	int sem;
   6860 
   6861 	sem = swfwphysem[sc->sc_funcid];
   6862 	if (wm_get_swfw_semaphore(sc, sem)) {
   6863 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6864 		    __func__);
   6865 		return;
   6866 	}
   6867 
   6868 	wm_gmii_i82544_writereg(self, phy, reg, val);
   6869 
   6870 	wm_put_swfw_semaphore(sc, sem);
   6871 }
   6872 
   6873 /*
   6874  * wm_gmii_statchg:	[mii interface function]
   6875  *
   6876  *	Callback from MII layer when media changes.
   6877  */
   6878 static void
   6879 wm_gmii_statchg(struct ifnet *ifp)
   6880 {
   6881 	struct wm_softc *sc = ifp->if_softc;
   6882 	struct mii_data *mii = &sc->sc_mii;
   6883 
   6884 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   6885 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   6886 	sc->sc_fcrtl &= ~FCRTL_XONE;
   6887 
   6888 	/*
   6889 	 * Get flow control negotiation result.
   6890 	 */
   6891 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   6892 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   6893 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   6894 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   6895 	}
   6896 
   6897 	if (sc->sc_flowflags & IFM_FLOW) {
   6898 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   6899 			sc->sc_ctrl |= CTRL_TFCE;
   6900 			sc->sc_fcrtl |= FCRTL_XONE;
   6901 		}
   6902 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   6903 			sc->sc_ctrl |= CTRL_RFCE;
   6904 	}
   6905 
   6906 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6907 		DPRINTF(WM_DEBUG_LINK,
   6908 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   6909 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6910 	} else {
   6911 		DPRINTF(WM_DEBUG_LINK,
   6912 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   6913 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   6914 	}
   6915 
   6916 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6917 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6918 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   6919 						 : WMREG_FCRTL, sc->sc_fcrtl);
   6920 	if (sc->sc_type == WM_T_80003) {
   6921 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   6922 		case IFM_1000_T:
   6923 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   6924 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   6925 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   6926 			break;
   6927 		default:
   6928 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   6929 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   6930 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   6931 			break;
   6932 		}
   6933 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6934 	}
   6935 }
   6936 
   6937 /*
   6938  * wm_kmrn_readreg:
   6939  *
   6940  *	Read a kumeran register
   6941  */
   6942 static int
   6943 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   6944 {
   6945 	int rv;
   6946 
   6947 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
   6948 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   6949 			aprint_error_dev(sc->sc_dev,
   6950 			    "%s: failed to get semaphore\n", __func__);
   6951 			return 0;
   6952 		}
   6953 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
   6954 		if (wm_get_swfwhw_semaphore(sc)) {
   6955 			aprint_error_dev(sc->sc_dev,
   6956 			    "%s: failed to get semaphore\n", __func__);
   6957 			return 0;
   6958 		}
   6959 	}
   6960 
   6961 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   6962 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   6963 	    KUMCTRLSTA_REN);
   6964 	CSR_WRITE_FLUSH(sc);
   6965 	delay(2);
   6966 
   6967 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   6968 
   6969 	if (sc->sc_flags == WM_F_LOCK_SWFW)
   6970 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   6971 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
   6972 		wm_put_swfwhw_semaphore(sc);
   6973 
   6974 	return rv;
   6975 }
   6976 
   6977 /*
   6978  * wm_kmrn_writereg:
   6979  *
   6980  *	Write a kumeran register
   6981  */
   6982 static void
   6983 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   6984 {
   6985 
   6986 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
   6987 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   6988 			aprint_error_dev(sc->sc_dev,
   6989 			    "%s: failed to get semaphore\n", __func__);
   6990 			return;
   6991 		}
   6992 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
   6993 		if (wm_get_swfwhw_semaphore(sc)) {
   6994 			aprint_error_dev(sc->sc_dev,
   6995 			    "%s: failed to get semaphore\n", __func__);
   6996 			return;
   6997 		}
   6998 	}
   6999 
   7000 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   7001 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   7002 	    (val & KUMCTRLSTA_MASK));
   7003 
   7004 	if (sc->sc_flags == WM_F_LOCK_SWFW)
   7005 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7006 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
   7007 		wm_put_swfwhw_semaphore(sc);
   7008 }
   7009 
   7010 /* SGMII related */
   7011 
   7012 /*
   7013  * wm_sgmii_uses_mdio
   7014  *
   7015  * Check whether the transaction is to the internal PHY or the external
   7016  * MDIO interface. Return true if it's MDIO.
   7017  */
   7018 static bool
   7019 wm_sgmii_uses_mdio(struct wm_softc *sc)
   7020 {
   7021 	uint32_t reg;
   7022 	bool ismdio = false;
   7023 
   7024 	switch (sc->sc_type) {
   7025 	case WM_T_82575:
   7026 	case WM_T_82576:
   7027 		reg = CSR_READ(sc, WMREG_MDIC);
   7028 		ismdio = ((reg & MDIC_DEST) != 0);
   7029 		break;
   7030 	case WM_T_82580:
   7031 	case WM_T_82580ER:
   7032 	case WM_T_I350:
   7033 	case WM_T_I354:
   7034 	case WM_T_I210:
   7035 	case WM_T_I211:
   7036 		reg = CSR_READ(sc, WMREG_MDICNFG);
   7037 		ismdio = ((reg & MDICNFG_DEST) != 0);
   7038 		break;
   7039 	default:
   7040 		break;
   7041 	}
   7042 
   7043 	return ismdio;
   7044 }
   7045 
   7046 /*
   7047  * wm_sgmii_readreg:	[mii interface function]
   7048  *
   7049  *	Read a PHY register on the SGMII
   7050  * This could be handled by the PHY layer if we didn't have to lock the
   7051  * ressource ...
   7052  */
   7053 static int
   7054 wm_sgmii_readreg(device_t self, int phy, int reg)
   7055 {
   7056 	struct wm_softc *sc = device_private(self);
   7057 	uint32_t i2ccmd;
   7058 	int i, rv;
   7059 
   7060 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7061 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7062 		    __func__);
   7063 		return 0;
   7064 	}
   7065 
   7066 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7067 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7068 	    | I2CCMD_OPCODE_READ;
   7069 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7070 
   7071 	/* Poll the ready bit */
   7072 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7073 		delay(50);
   7074 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7075 		if (i2ccmd & I2CCMD_READY)
   7076 			break;
   7077 	}
   7078 	if ((i2ccmd & I2CCMD_READY) == 0)
   7079 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   7080 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7081 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7082 
   7083 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   7084 
   7085 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7086 	return rv;
   7087 }
   7088 
   7089 /*
   7090  * wm_sgmii_writereg:	[mii interface function]
   7091  *
   7092  *	Write a PHY register on the SGMII.
   7093  * This could be handled by the PHY layer if we didn't have to lock the
   7094  * ressource ...
   7095  */
   7096 static void
   7097 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   7098 {
   7099 	struct wm_softc *sc = device_private(self);
   7100 	uint32_t i2ccmd;
   7101 	int i;
   7102 
   7103 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7104 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7105 		    __func__);
   7106 		return;
   7107 	}
   7108 
   7109 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7110 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7111 	    | I2CCMD_OPCODE_WRITE;
   7112 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7113 
   7114 	/* Poll the ready bit */
   7115 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7116 		delay(50);
   7117 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7118 		if (i2ccmd & I2CCMD_READY)
   7119 			break;
   7120 	}
   7121 	if ((i2ccmd & I2CCMD_READY) == 0)
   7122 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   7123 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7124 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7125 
   7126 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   7127 }
   7128 
   7129 /* TBI related */
   7130 
   7131 /* XXX Currently TBI only */
   7132 static int
   7133 wm_check_for_link(struct wm_softc *sc)
   7134 {
   7135 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7136 	uint32_t rxcw;
   7137 	uint32_t ctrl;
   7138 	uint32_t status;
   7139 	uint32_t sig;
   7140 
   7141 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
   7142 		sc->sc_tbi_linkup = 1;
   7143 		return 0;
   7144 	}
   7145 
   7146 	rxcw = CSR_READ(sc, WMREG_RXCW);
   7147 	ctrl = CSR_READ(sc, WMREG_CTRL);
   7148 	status = CSR_READ(sc, WMREG_STATUS);
   7149 
   7150 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   7151 
   7152 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   7153 		device_xname(sc->sc_dev), __func__,
   7154 		((ctrl & CTRL_SWDPIN(1)) == sig),
   7155 		((status & STATUS_LU) != 0),
   7156 		((rxcw & RXCW_C) != 0)
   7157 		    ));
   7158 
   7159 	/*
   7160 	 * SWDPIN   LU RXCW
   7161 	 *      0    0    0
   7162 	 *      0    0    1	(should not happen)
   7163 	 *      0    1    0	(should not happen)
   7164 	 *      0    1    1	(should not happen)
   7165 	 *      1    0    0	Disable autonego and force linkup
   7166 	 *      1    0    1	got /C/ but not linkup yet
   7167 	 *      1    1    0	(linkup)
   7168 	 *      1    1    1	If IFM_AUTO, back to autonego
   7169 	 *
   7170 	 */
   7171 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   7172 	    && ((status & STATUS_LU) == 0)
   7173 	    && ((rxcw & RXCW_C) == 0)) {
   7174 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   7175 			__func__));
   7176 		sc->sc_tbi_linkup = 0;
   7177 		/* Disable auto-negotiation in the TXCW register */
   7178 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   7179 
   7180 		/*
   7181 		 * Force link-up and also force full-duplex.
   7182 		 *
   7183 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   7184 		 * so we should update sc->sc_ctrl
   7185 		 */
   7186 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   7187 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7188 	} else if (((status & STATUS_LU) != 0)
   7189 	    && ((rxcw & RXCW_C) != 0)
   7190 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   7191 		sc->sc_tbi_linkup = 1;
   7192 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   7193 			__func__));
   7194 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7195 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   7196 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   7197 	    && ((rxcw & RXCW_C) != 0)) {
   7198 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   7199 	} else {
   7200 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   7201 			status));
   7202 	}
   7203 
   7204 	return 0;
   7205 }
   7206 
   7207 /*
   7208  * wm_tbi_mediainit:
   7209  *
   7210  *	Initialize media for use on 1000BASE-X devices.
   7211  */
   7212 static void
   7213 wm_tbi_mediainit(struct wm_softc *sc)
   7214 {
   7215 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7216 	const char *sep = "";
   7217 
   7218 	if (sc->sc_type < WM_T_82543)
   7219 		sc->sc_tipg = TIPG_WM_DFLT;
   7220 	else
   7221 		sc->sc_tipg = TIPG_LG_DFLT;
   7222 
   7223 	sc->sc_tbi_anegticks = 5;
   7224 
   7225 	/* Initialize our media structures */
   7226 	sc->sc_mii.mii_ifp = ifp;
   7227 
   7228 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   7229 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
   7230 	    wm_tbi_mediastatus);
   7231 
   7232 	/*
   7233 	 * SWD Pins:
   7234 	 *
   7235 	 *	0 = Link LED (output)
   7236 	 *	1 = Loss Of Signal (input)
   7237 	 */
   7238 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   7239 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   7240 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
   7241 		sc->sc_ctrl &= ~CTRL_LRST;
   7242 
   7243 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7244 
   7245 #define	ADD(ss, mm, dd)							\
   7246 do {									\
   7247 	aprint_normal("%s%s", sep, ss);					\
   7248 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   7249 	sep = ", ";							\
   7250 } while (/*CONSTCOND*/0)
   7251 
   7252 	aprint_normal_dev(sc->sc_dev, "");
   7253 
   7254 	/* Only 82545 is LX */
   7255 	if (sc->sc_type == WM_T_82545) {
   7256 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   7257 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
   7258 	} else {
   7259 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   7260 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   7261 	}
   7262 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   7263 	aprint_normal("\n");
   7264 
   7265 #undef ADD
   7266 
   7267 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   7268 }
   7269 
   7270 /*
   7271  * wm_tbi_mediastatus:	[ifmedia interface function]
   7272  *
   7273  *	Get the current interface media status on a 1000BASE-X device.
   7274  */
   7275 static void
   7276 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   7277 {
   7278 	struct wm_softc *sc = ifp->if_softc;
   7279 	uint32_t ctrl, status;
   7280 
   7281 	ifmr->ifm_status = IFM_AVALID;
   7282 	ifmr->ifm_active = IFM_ETHER;
   7283 
   7284 	status = CSR_READ(sc, WMREG_STATUS);
   7285 	if ((status & STATUS_LU) == 0) {
   7286 		ifmr->ifm_active |= IFM_NONE;
   7287 		return;
   7288 	}
   7289 
   7290 	ifmr->ifm_status |= IFM_ACTIVE;
   7291 	/* Only 82545 is LX */
   7292 	if (sc->sc_type == WM_T_82545)
   7293 		ifmr->ifm_active |= IFM_1000_LX;
   7294 	else
   7295 		ifmr->ifm_active |= IFM_1000_SX;
   7296 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   7297 		ifmr->ifm_active |= IFM_FDX;
   7298 	else
   7299 		ifmr->ifm_active |= IFM_HDX;
   7300 	ctrl = CSR_READ(sc, WMREG_CTRL);
   7301 	if (ctrl & CTRL_RFCE)
   7302 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   7303 	if (ctrl & CTRL_TFCE)
   7304 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   7305 }
   7306 
   7307 /*
   7308  * wm_tbi_mediachange:	[ifmedia interface function]
   7309  *
   7310  *	Set hardware to newly-selected media on a 1000BASE-X device.
   7311  */
   7312 static int
   7313 wm_tbi_mediachange(struct ifnet *ifp)
   7314 {
   7315 	struct wm_softc *sc = ifp->if_softc;
   7316 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7317 	uint32_t status;
   7318 	int i;
   7319 
   7320 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
   7321 		return 0;
   7322 
   7323 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   7324 	    || (sc->sc_type >= WM_T_82575))
   7325 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   7326 
   7327 	/* XXX power_up_serdes_link_82575() */
   7328 
   7329 	sc->sc_ctrl &= ~CTRL_LRST;
   7330 	sc->sc_txcw = TXCW_ANE;
   7331 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   7332 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   7333 	else if (ife->ifm_media & IFM_FDX)
   7334 		sc->sc_txcw |= TXCW_FD;
   7335 	else
   7336 		sc->sc_txcw |= TXCW_HD;
   7337 
   7338 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   7339 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   7340 
   7341 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   7342 		    device_xname(sc->sc_dev), sc->sc_txcw));
   7343 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7344 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7345 	CSR_WRITE_FLUSH(sc);
   7346 	delay(1000);
   7347 
   7348 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   7349 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   7350 
   7351 	/*
   7352 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   7353 	 * optics detect a signal, 0 if they don't.
   7354 	 */
   7355 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   7356 		/* Have signal; wait for the link to come up. */
   7357 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   7358 			delay(10000);
   7359 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   7360 				break;
   7361 		}
   7362 
   7363 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   7364 			    device_xname(sc->sc_dev),i));
   7365 
   7366 		status = CSR_READ(sc, WMREG_STATUS);
   7367 		DPRINTF(WM_DEBUG_LINK,
   7368 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   7369 			device_xname(sc->sc_dev),status, STATUS_LU));
   7370 		if (status & STATUS_LU) {
   7371 			/* Link is up. */
   7372 			DPRINTF(WM_DEBUG_LINK,
   7373 			    ("%s: LINK: set media -> link up %s\n",
   7374 			    device_xname(sc->sc_dev),
   7375 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7376 
   7377 			/*
   7378 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7379 			 * so we should update sc->sc_ctrl
   7380 			 */
   7381 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7382 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7383 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7384 			if (status & STATUS_FD)
   7385 				sc->sc_tctl |=
   7386 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7387 			else
   7388 				sc->sc_tctl |=
   7389 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7390 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   7391 				sc->sc_fcrtl |= FCRTL_XONE;
   7392 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7393 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7394 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7395 				      sc->sc_fcrtl);
   7396 			sc->sc_tbi_linkup = 1;
   7397 		} else {
   7398 			if (i == WM_LINKUP_TIMEOUT)
   7399 				wm_check_for_link(sc);
   7400 			/* Link is down. */
   7401 			DPRINTF(WM_DEBUG_LINK,
   7402 			    ("%s: LINK: set media -> link down\n",
   7403 			    device_xname(sc->sc_dev)));
   7404 			sc->sc_tbi_linkup = 0;
   7405 		}
   7406 	} else {
   7407 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   7408 		    device_xname(sc->sc_dev)));
   7409 		sc->sc_tbi_linkup = 0;
   7410 	}
   7411 
   7412 	wm_tbi_set_linkled(sc);
   7413 
   7414 	return 0;
   7415 }
   7416 
   7417 /*
   7418  * wm_tbi_set_linkled:
   7419  *
   7420  *	Update the link LED on 1000BASE-X devices.
   7421  */
   7422 static void
   7423 wm_tbi_set_linkled(struct wm_softc *sc)
   7424 {
   7425 
   7426 	if (sc->sc_tbi_linkup)
   7427 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7428 	else
   7429 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7430 
   7431 	/* 82540 or newer devices are active low */
   7432 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7433 
   7434 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7435 }
   7436 
   7437 /*
   7438  * wm_tbi_check_link:
   7439  *
   7440  *	Check the link on 1000BASE-X devices.
   7441  */
   7442 static void
   7443 wm_tbi_check_link(struct wm_softc *sc)
   7444 {
   7445 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7446 	uint32_t status;
   7447 
   7448 	KASSERT(WM_TX_LOCKED(sc));
   7449 
   7450 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
   7451 		sc->sc_tbi_linkup = 1;
   7452 		return;
   7453 	}
   7454 
   7455 	status = CSR_READ(sc, WMREG_STATUS);
   7456 
   7457 	/* XXX is this needed? */
   7458 	(void)CSR_READ(sc, WMREG_RXCW);
   7459 	(void)CSR_READ(sc, WMREG_CTRL);
   7460 
   7461 	/* set link status */
   7462 	if ((status & STATUS_LU) == 0) {
   7463 		DPRINTF(WM_DEBUG_LINK,
   7464 		    ("%s: LINK: checklink -> down\n",
   7465 			device_xname(sc->sc_dev)));
   7466 		sc->sc_tbi_linkup = 0;
   7467 	} else if (sc->sc_tbi_linkup == 0) {
   7468 		DPRINTF(WM_DEBUG_LINK,
   7469 		    ("%s: LINK: checklink -> up %s\n",
   7470 			device_xname(sc->sc_dev),
   7471 			(status & STATUS_FD) ? "FDX" : "HDX"));
   7472 		sc->sc_tbi_linkup = 1;
   7473 	}
   7474 
   7475 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
   7476 	    && ((status & STATUS_LU) == 0)) {
   7477 		sc->sc_tbi_linkup = 0;
   7478 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7479 			/* If the timer expired, retry autonegotiation */
   7480 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
   7481 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   7482 				sc->sc_tbi_ticks = 0;
   7483 				/*
   7484 				 * Reset the link, and let autonegotiation do
   7485 				 * its thing
   7486 				 */
   7487 				sc->sc_ctrl |= CTRL_LRST;
   7488 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7489 				CSR_WRITE_FLUSH(sc);
   7490 				delay(1000);
   7491 				sc->sc_ctrl &= ~CTRL_LRST;
   7492 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7493 				CSR_WRITE_FLUSH(sc);
   7494 				delay(1000);
   7495 				CSR_WRITE(sc, WMREG_TXCW,
   7496 				    sc->sc_txcw & ~TXCW_ANE);
   7497 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7498 			}
   7499 		}
   7500 	}
   7501 
   7502 	wm_tbi_set_linkled(sc);
   7503 }
   7504 
   7505 /*
   7506  * NVM related.
   7507  * Microwire, SPI (w/wo EERD) and Flash.
   7508  */
   7509 
   7510 /* Both spi and uwire */
   7511 
   7512 /*
   7513  * wm_eeprom_sendbits:
   7514  *
   7515  *	Send a series of bits to the EEPROM.
   7516  */
   7517 static void
   7518 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   7519 {
   7520 	uint32_t reg;
   7521 	int x;
   7522 
   7523 	reg = CSR_READ(sc, WMREG_EECD);
   7524 
   7525 	for (x = nbits; x > 0; x--) {
   7526 		if (bits & (1U << (x - 1)))
   7527 			reg |= EECD_DI;
   7528 		else
   7529 			reg &= ~EECD_DI;
   7530 		CSR_WRITE(sc, WMREG_EECD, reg);
   7531 		CSR_WRITE_FLUSH(sc);
   7532 		delay(2);
   7533 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   7534 		CSR_WRITE_FLUSH(sc);
   7535 		delay(2);
   7536 		CSR_WRITE(sc, WMREG_EECD, reg);
   7537 		CSR_WRITE_FLUSH(sc);
   7538 		delay(2);
   7539 	}
   7540 }
   7541 
   7542 /*
   7543  * wm_eeprom_recvbits:
   7544  *
   7545  *	Receive a series of bits from the EEPROM.
   7546  */
   7547 static void
   7548 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   7549 {
   7550 	uint32_t reg, val;
   7551 	int x;
   7552 
   7553 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   7554 
   7555 	val = 0;
   7556 	for (x = nbits; x > 0; x--) {
   7557 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   7558 		CSR_WRITE_FLUSH(sc);
   7559 		delay(2);
   7560 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   7561 			val |= (1U << (x - 1));
   7562 		CSR_WRITE(sc, WMREG_EECD, reg);
   7563 		CSR_WRITE_FLUSH(sc);
   7564 		delay(2);
   7565 	}
   7566 	*valp = val;
   7567 }
   7568 
   7569 /* Microwire */
   7570 
   7571 /*
   7572  * wm_nvm_read_uwire:
   7573  *
   7574  *	Read a word from the EEPROM using the MicroWire protocol.
   7575  */
   7576 static int
   7577 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   7578 {
   7579 	uint32_t reg, val;
   7580 	int i;
   7581 
   7582 	for (i = 0; i < wordcnt; i++) {
   7583 		/* Clear SK and DI. */
   7584 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   7585 		CSR_WRITE(sc, WMREG_EECD, reg);
   7586 
   7587 		/*
   7588 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   7589 		 * and Xen.
   7590 		 *
   7591 		 * We use this workaround only for 82540 because qemu's
   7592 		 * e1000 act as 82540.
   7593 		 */
   7594 		if (sc->sc_type == WM_T_82540) {
   7595 			reg |= EECD_SK;
   7596 			CSR_WRITE(sc, WMREG_EECD, reg);
   7597 			reg &= ~EECD_SK;
   7598 			CSR_WRITE(sc, WMREG_EECD, reg);
   7599 			CSR_WRITE_FLUSH(sc);
   7600 			delay(2);
   7601 		}
   7602 		/* XXX: end of workaround */
   7603 
   7604 		/* Set CHIP SELECT. */
   7605 		reg |= EECD_CS;
   7606 		CSR_WRITE(sc, WMREG_EECD, reg);
   7607 		CSR_WRITE_FLUSH(sc);
   7608 		delay(2);
   7609 
   7610 		/* Shift in the READ command. */
   7611 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   7612 
   7613 		/* Shift in address. */
   7614 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
   7615 
   7616 		/* Shift out the data. */
   7617 		wm_eeprom_recvbits(sc, &val, 16);
   7618 		data[i] = val & 0xffff;
   7619 
   7620 		/* Clear CHIP SELECT. */
   7621 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   7622 		CSR_WRITE(sc, WMREG_EECD, reg);
   7623 		CSR_WRITE_FLUSH(sc);
   7624 		delay(2);
   7625 	}
   7626 
   7627 	return 0;
   7628 }
   7629 
   7630 /* SPI */
   7631 
   7632 /* Set SPI related information */
   7633 static void
   7634 wm_set_spiaddrbits(struct wm_softc *sc)
   7635 {
   7636 	uint32_t reg;
   7637 
   7638 	sc->sc_flags |= WM_F_EEPROM_SPI;
   7639 	reg = CSR_READ(sc, WMREG_EECD);
   7640 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   7641 }
   7642 
   7643 /*
   7644  * wm_nvm_ready_spi:
   7645  *
   7646  *	Wait for a SPI EEPROM to be ready for commands.
   7647  */
   7648 static int
   7649 wm_nvm_ready_spi(struct wm_softc *sc)
   7650 {
   7651 	uint32_t val;
   7652 	int usec;
   7653 
   7654 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   7655 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   7656 		wm_eeprom_recvbits(sc, &val, 8);
   7657 		if ((val & SPI_SR_RDY) == 0)
   7658 			break;
   7659 	}
   7660 	if (usec >= SPI_MAX_RETRIES) {
   7661 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   7662 		return 1;
   7663 	}
   7664 	return 0;
   7665 }
   7666 
   7667 /*
   7668  * wm_nvm_read_spi:
   7669  *
   7670  *	Read a work from the EEPROM using the SPI protocol.
   7671  */
   7672 static int
   7673 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   7674 {
   7675 	uint32_t reg, val;
   7676 	int i;
   7677 	uint8_t opc;
   7678 
   7679 	/* Clear SK and CS. */
   7680 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   7681 	CSR_WRITE(sc, WMREG_EECD, reg);
   7682 	CSR_WRITE_FLUSH(sc);
   7683 	delay(2);
   7684 
   7685 	if (wm_nvm_ready_spi(sc))
   7686 		return 1;
   7687 
   7688 	/* Toggle CS to flush commands. */
   7689 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   7690 	CSR_WRITE_FLUSH(sc);
   7691 	delay(2);
   7692 	CSR_WRITE(sc, WMREG_EECD, reg);
   7693 	CSR_WRITE_FLUSH(sc);
   7694 	delay(2);
   7695 
   7696 	opc = SPI_OPC_READ;
   7697 	if (sc->sc_ee_addrbits == 8 && word >= 128)
   7698 		opc |= SPI_OPC_A8;
   7699 
   7700 	wm_eeprom_sendbits(sc, opc, 8);
   7701 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
   7702 
   7703 	for (i = 0; i < wordcnt; i++) {
   7704 		wm_eeprom_recvbits(sc, &val, 16);
   7705 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   7706 	}
   7707 
   7708 	/* Raise CS and clear SK. */
   7709 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   7710 	CSR_WRITE(sc, WMREG_EECD, reg);
   7711 	CSR_WRITE_FLUSH(sc);
   7712 	delay(2);
   7713 
   7714 	return 0;
   7715 }
   7716 
   7717 /* Using with EERD */
   7718 
   7719 static int
   7720 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   7721 {
   7722 	uint32_t attempts = 100000;
   7723 	uint32_t i, reg = 0;
   7724 	int32_t done = -1;
   7725 
   7726 	for (i = 0; i < attempts; i++) {
   7727 		reg = CSR_READ(sc, rw);
   7728 
   7729 		if (reg & EERD_DONE) {
   7730 			done = 0;
   7731 			break;
   7732 		}
   7733 		delay(5);
   7734 	}
   7735 
   7736 	return done;
   7737 }
   7738 
   7739 static int
   7740 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   7741     uint16_t *data)
   7742 {
   7743 	int i, eerd = 0;
   7744 	int error = 0;
   7745 
   7746 	for (i = 0; i < wordcnt; i++) {
   7747 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   7748 
   7749 		CSR_WRITE(sc, WMREG_EERD, eerd);
   7750 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   7751 		if (error != 0)
   7752 			break;
   7753 
   7754 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   7755 	}
   7756 
   7757 	return error;
   7758 }
   7759 
   7760 /* Flash */
   7761 
   7762 static int
   7763 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   7764 {
   7765 	uint32_t eecd;
   7766 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   7767 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   7768 	uint8_t sig_byte = 0;
   7769 
   7770 	switch (sc->sc_type) {
   7771 	case WM_T_ICH8:
   7772 	case WM_T_ICH9:
   7773 		eecd = CSR_READ(sc, WMREG_EECD);
   7774 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   7775 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   7776 			return 0;
   7777 		}
   7778 		/* FALLTHROUGH */
   7779 	default:
   7780 		/* Default to 0 */
   7781 		*bank = 0;
   7782 
   7783 		/* Check bank 0 */
   7784 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   7785 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   7786 			*bank = 0;
   7787 			return 0;
   7788 		}
   7789 
   7790 		/* Check bank 1 */
   7791 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   7792 		    &sig_byte);
   7793 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   7794 			*bank = 1;
   7795 			return 0;
   7796 		}
   7797 	}
   7798 
   7799 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   7800 		device_xname(sc->sc_dev)));
   7801 	return -1;
   7802 }
   7803 
   7804 /******************************************************************************
   7805  * This function does initial flash setup so that a new read/write/erase cycle
   7806  * can be started.
   7807  *
   7808  * sc - The pointer to the hw structure
   7809  ****************************************************************************/
   7810 static int32_t
   7811 wm_ich8_cycle_init(struct wm_softc *sc)
   7812 {
   7813 	uint16_t hsfsts;
   7814 	int32_t error = 1;
   7815 	int32_t i     = 0;
   7816 
   7817 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7818 
   7819 	/* May be check the Flash Des Valid bit in Hw status */
   7820 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   7821 		return error;
   7822 	}
   7823 
   7824 	/* Clear FCERR in Hw status by writing 1 */
   7825 	/* Clear DAEL in Hw status by writing a 1 */
   7826 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   7827 
   7828 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   7829 
   7830 	/*
   7831 	 * Either we should have a hardware SPI cycle in progress bit to check
   7832 	 * against, in order to start a new cycle or FDONE bit should be
   7833 	 * changed in the hardware so that it is 1 after harware reset, which
   7834 	 * can then be used as an indication whether a cycle is in progress or
   7835 	 * has been completed .. we should also have some software semaphore
   7836 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   7837 	 * threads access to those bits can be sequentiallized or a way so that
   7838 	 * 2 threads dont start the cycle at the same time
   7839 	 */
   7840 
   7841 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   7842 		/*
   7843 		 * There is no cycle running at present, so we can start a
   7844 		 * cycle
   7845 		 */
   7846 
   7847 		/* Begin by setting Flash Cycle Done. */
   7848 		hsfsts |= HSFSTS_DONE;
   7849 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   7850 		error = 0;
   7851 	} else {
   7852 		/*
   7853 		 * otherwise poll for sometime so the current cycle has a
   7854 		 * chance to end before giving up.
   7855 		 */
   7856 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   7857 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7858 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   7859 				error = 0;
   7860 				break;
   7861 			}
   7862 			delay(1);
   7863 		}
   7864 		if (error == 0) {
   7865 			/*
   7866 			 * Successful in waiting for previous cycle to timeout,
   7867 			 * now set the Flash Cycle Done.
   7868 			 */
   7869 			hsfsts |= HSFSTS_DONE;
   7870 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   7871 		}
   7872 	}
   7873 	return error;
   7874 }
   7875 
   7876 /******************************************************************************
   7877  * This function starts a flash cycle and waits for its completion
   7878  *
   7879  * sc - The pointer to the hw structure
   7880  ****************************************************************************/
   7881 static int32_t
   7882 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   7883 {
   7884 	uint16_t hsflctl;
   7885 	uint16_t hsfsts;
   7886 	int32_t error = 1;
   7887 	uint32_t i = 0;
   7888 
   7889 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   7890 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   7891 	hsflctl |= HSFCTL_GO;
   7892 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   7893 
   7894 	/* Wait till FDONE bit is set to 1 */
   7895 	do {
   7896 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7897 		if (hsfsts & HSFSTS_DONE)
   7898 			break;
   7899 		delay(1);
   7900 		i++;
   7901 	} while (i < timeout);
   7902 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   7903 		error = 0;
   7904 
   7905 	return error;
   7906 }
   7907 
   7908 /******************************************************************************
   7909  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   7910  *
   7911  * sc - The pointer to the hw structure
   7912  * index - The index of the byte or word to read.
   7913  * size - Size of data to read, 1=byte 2=word
   7914  * data - Pointer to the word to store the value read.
   7915  *****************************************************************************/
   7916 static int32_t
   7917 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   7918     uint32_t size, uint16_t *data)
   7919 {
   7920 	uint16_t hsfsts;
   7921 	uint16_t hsflctl;
   7922 	uint32_t flash_linear_address;
   7923 	uint32_t flash_data = 0;
   7924 	int32_t error = 1;
   7925 	int32_t count = 0;
   7926 
   7927 	if (size < 1  || size > 2 || data == 0x0 ||
   7928 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   7929 		return error;
   7930 
   7931 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   7932 	    sc->sc_ich8_flash_base;
   7933 
   7934 	do {
   7935 		delay(1);
   7936 		/* Steps */
   7937 		error = wm_ich8_cycle_init(sc);
   7938 		if (error)
   7939 			break;
   7940 
   7941 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   7942 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   7943 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   7944 		    & HSFCTL_BCOUNT_MASK;
   7945 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   7946 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   7947 
   7948 		/*
   7949 		 * Write the last 24 bits of index into Flash Linear address
   7950 		 * field in Flash Address
   7951 		 */
   7952 		/* TODO: TBD maybe check the index against the size of flash */
   7953 
   7954 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   7955 
   7956 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   7957 
   7958 		/*
   7959 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   7960 		 * the whole sequence a few more times, else read in (shift in)
   7961 		 * the Flash Data0, the order is least significant byte first
   7962 		 * msb to lsb
   7963 		 */
   7964 		if (error == 0) {
   7965 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   7966 			if (size == 1)
   7967 				*data = (uint8_t)(flash_data & 0x000000FF);
   7968 			else if (size == 2)
   7969 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   7970 			break;
   7971 		} else {
   7972 			/*
   7973 			 * If we've gotten here, then things are probably
   7974 			 * completely hosed, but if the error condition is
   7975 			 * detected, it won't hurt to give it another try...
   7976 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   7977 			 */
   7978 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7979 			if (hsfsts & HSFSTS_ERR) {
   7980 				/* Repeat for some time before giving up. */
   7981 				continue;
   7982 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   7983 				break;
   7984 		}
   7985 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   7986 
   7987 	return error;
   7988 }
   7989 
   7990 /******************************************************************************
   7991  * Reads a single byte from the NVM using the ICH8 flash access registers.
   7992  *
   7993  * sc - pointer to wm_hw structure
   7994  * index - The index of the byte to read.
   7995  * data - Pointer to a byte to store the value read.
   7996  *****************************************************************************/
   7997 static int32_t
   7998 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   7999 {
   8000 	int32_t status;
   8001 	uint16_t word = 0;
   8002 
   8003 	status = wm_read_ich8_data(sc, index, 1, &word);
   8004 	if (status == 0)
   8005 		*data = (uint8_t)word;
   8006 	else
   8007 		*data = 0;
   8008 
   8009 	return status;
   8010 }
   8011 
   8012 /******************************************************************************
   8013  * Reads a word from the NVM using the ICH8 flash access registers.
   8014  *
   8015  * sc - pointer to wm_hw structure
   8016  * index - The starting byte index of the word to read.
   8017  * data - Pointer to a word to store the value read.
   8018  *****************************************************************************/
   8019 static int32_t
   8020 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   8021 {
   8022 	int32_t status;
   8023 
   8024 	status = wm_read_ich8_data(sc, index, 2, data);
   8025 	return status;
   8026 }
   8027 
   8028 /******************************************************************************
   8029  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   8030  * register.
   8031  *
   8032  * sc - Struct containing variables accessed by shared code
   8033  * offset - offset of word in the EEPROM to read
   8034  * data - word read from the EEPROM
   8035  * words - number of words to read
   8036  *****************************************************************************/
   8037 static int
   8038 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   8039 {
   8040 	int32_t  error = 0;
   8041 	uint32_t flash_bank = 0;
   8042 	uint32_t act_offset = 0;
   8043 	uint32_t bank_offset = 0;
   8044 	uint16_t word = 0;
   8045 	uint16_t i = 0;
   8046 
   8047 	/*
   8048 	 * We need to know which is the valid flash bank.  In the event
   8049 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   8050 	 * managing flash_bank.  So it cannot be trusted and needs
   8051 	 * to be updated with each read.
   8052 	 */
   8053 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   8054 	if (error) {
   8055 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
   8056 		    __func__);
   8057 		flash_bank = 0;
   8058 	}
   8059 
   8060 	/*
   8061 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   8062 	 * size
   8063 	 */
   8064 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   8065 
   8066 	error = wm_get_swfwhw_semaphore(sc);
   8067 	if (error) {
   8068 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8069 		    __func__);
   8070 		return error;
   8071 	}
   8072 
   8073 	for (i = 0; i < words; i++) {
   8074 		/* The NVM part needs a byte offset, hence * 2 */
   8075 		act_offset = bank_offset + ((offset + i) * 2);
   8076 		error = wm_read_ich8_word(sc, act_offset, &word);
   8077 		if (error) {
   8078 			aprint_error_dev(sc->sc_dev,
   8079 			    "%s: failed to read NVM\n", __func__);
   8080 			break;
   8081 		}
   8082 		data[i] = word;
   8083 	}
   8084 
   8085 	wm_put_swfwhw_semaphore(sc);
   8086 	return error;
   8087 }
   8088 
   8089 /* Lock, detecting NVM type, validate checksum and read */
   8090 
   8091 /*
   8092  * wm_nvm_acquire:
   8093  *
   8094  *	Perform the EEPROM handshake required on some chips.
   8095  */
   8096 static int
   8097 wm_nvm_acquire(struct wm_softc *sc)
   8098 {
   8099 	uint32_t reg;
   8100 	int x;
   8101 	int ret = 0;
   8102 
   8103 	/* always success */
   8104 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   8105 		return 0;
   8106 
   8107 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8108 		ret = wm_get_swfwhw_semaphore(sc);
   8109 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8110 		/* This will also do wm_get_swsm_semaphore() if needed */
   8111 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   8112 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8113 		ret = wm_get_swsm_semaphore(sc);
   8114 	}
   8115 
   8116 	if (ret) {
   8117 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8118 			__func__);
   8119 		return 1;
   8120 	}
   8121 
   8122 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   8123 		reg = CSR_READ(sc, WMREG_EECD);
   8124 
   8125 		/* Request EEPROM access. */
   8126 		reg |= EECD_EE_REQ;
   8127 		CSR_WRITE(sc, WMREG_EECD, reg);
   8128 
   8129 		/* ..and wait for it to be granted. */
   8130 		for (x = 0; x < 1000; x++) {
   8131 			reg = CSR_READ(sc, WMREG_EECD);
   8132 			if (reg & EECD_EE_GNT)
   8133 				break;
   8134 			delay(5);
   8135 		}
   8136 		if ((reg & EECD_EE_GNT) == 0) {
   8137 			aprint_error_dev(sc->sc_dev,
   8138 			    "could not acquire EEPROM GNT\n");
   8139 			reg &= ~EECD_EE_REQ;
   8140 			CSR_WRITE(sc, WMREG_EECD, reg);
   8141 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8142 				wm_put_swfwhw_semaphore(sc);
   8143 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   8144 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   8145 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   8146 				wm_put_swsm_semaphore(sc);
   8147 			return 1;
   8148 		}
   8149 	}
   8150 
   8151 	return 0;
   8152 }
   8153 
   8154 /*
   8155  * wm_nvm_release:
   8156  *
   8157  *	Release the EEPROM mutex.
   8158  */
   8159 static void
   8160 wm_nvm_release(struct wm_softc *sc)
   8161 {
   8162 	uint32_t reg;
   8163 
   8164 	/* always success */
   8165 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   8166 		return;
   8167 
   8168 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   8169 		reg = CSR_READ(sc, WMREG_EECD);
   8170 		reg &= ~EECD_EE_REQ;
   8171 		CSR_WRITE(sc, WMREG_EECD, reg);
   8172 	}
   8173 
   8174 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8175 		wm_put_swfwhw_semaphore(sc);
   8176 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8177 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   8178 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   8179 		wm_put_swsm_semaphore(sc);
   8180 }
   8181 
   8182 static int
   8183 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   8184 {
   8185 	uint32_t eecd = 0;
   8186 
   8187 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   8188 	    || sc->sc_type == WM_T_82583) {
   8189 		eecd = CSR_READ(sc, WMREG_EECD);
   8190 
   8191 		/* Isolate bits 15 & 16 */
   8192 		eecd = ((eecd >> 15) & 0x03);
   8193 
   8194 		/* If both bits are set, device is Flash type */
   8195 		if (eecd == 0x03)
   8196 			return 0;
   8197 	}
   8198 	return 1;
   8199 }
   8200 
   8201 #define NVM_CHECKSUM			0xBABA
   8202 #define EEPROM_SIZE			0x0040
   8203 #define NVM_COMPAT			0x0003
   8204 #define NVM_COMPAT_VALID_CHECKSUM	0x0001
   8205 #define NVM_FUTURE_INIT_WORD1			0x0019
   8206 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM	0x0040
   8207 
   8208 /*
   8209  * wm_nvm_validate_checksum
   8210  *
   8211  * The checksum is defined as the sum of the first 64 (16 bit) words.
   8212  */
   8213 static int
   8214 wm_nvm_validate_checksum(struct wm_softc *sc)
   8215 {
   8216 	uint16_t checksum;
   8217 	uint16_t eeprom_data;
   8218 #ifdef WM_DEBUG
   8219 	uint16_t csum_wordaddr, valid_checksum;
   8220 #endif
   8221 	int i;
   8222 
   8223 	checksum = 0;
   8224 
   8225 	/* Don't check for I211 */
   8226 	if (sc->sc_type == WM_T_I211)
   8227 		return 0;
   8228 
   8229 #ifdef WM_DEBUG
   8230 	if (sc->sc_type == WM_T_PCH_LPT) {
   8231 		csum_wordaddr = NVM_COMPAT;
   8232 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   8233 	} else {
   8234 		csum_wordaddr = NVM_FUTURE_INIT_WORD1;
   8235 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   8236 	}
   8237 
   8238 	/* Dump EEPROM image for debug */
   8239 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   8240 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   8241 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   8242 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   8243 		if ((eeprom_data & valid_checksum) == 0) {
   8244 			DPRINTF(WM_DEBUG_NVM,
   8245 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   8246 				device_xname(sc->sc_dev), eeprom_data,
   8247 				    valid_checksum));
   8248 		}
   8249 	}
   8250 
   8251 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   8252 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   8253 		for (i = 0; i < EEPROM_SIZE; i++) {
   8254 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   8255 				printf("XX ");
   8256 			else
   8257 				printf("%04x ", eeprom_data);
   8258 			if (i % 8 == 7)
   8259 				printf("\n");
   8260 		}
   8261 	}
   8262 
   8263 #endif /* WM_DEBUG */
   8264 
   8265 	for (i = 0; i < EEPROM_SIZE; i++) {
   8266 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   8267 			return 1;
   8268 		checksum += eeprom_data;
   8269 	}
   8270 
   8271 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   8272 #ifdef WM_DEBUG
   8273 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   8274 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   8275 #endif
   8276 	}
   8277 
   8278 	return 0;
   8279 }
   8280 
   8281 /*
   8282  * wm_nvm_read:
   8283  *
   8284  *	Read data from the serial EEPROM.
   8285  */
   8286 static int
   8287 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   8288 {
   8289 	int rv;
   8290 
   8291 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   8292 		return 1;
   8293 
   8294 	if (wm_nvm_acquire(sc))
   8295 		return 1;
   8296 
   8297 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   8298 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   8299 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   8300 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   8301 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   8302 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   8303 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   8304 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   8305 	else
   8306 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   8307 
   8308 	wm_nvm_release(sc);
   8309 	return rv;
   8310 }
   8311 
   8312 /*
   8313  * Hardware semaphores.
   8314  * Very complexed...
   8315  */
   8316 
   8317 static int
   8318 wm_get_swsm_semaphore(struct wm_softc *sc)
   8319 {
   8320 	int32_t timeout;
   8321 	uint32_t swsm;
   8322 
   8323 	/* Get the SW semaphore. */
   8324 	timeout = 1000 + 1; /* XXX */
   8325 	while (timeout) {
   8326 		swsm = CSR_READ(sc, WMREG_SWSM);
   8327 
   8328 		if ((swsm & SWSM_SMBI) == 0)
   8329 			break;
   8330 
   8331 		delay(50);
   8332 		timeout--;
   8333 	}
   8334 
   8335 	if (timeout == 0) {
   8336 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   8337 		return 1;
   8338 	}
   8339 
   8340 	/* Get the FW semaphore. */
   8341 	timeout = 1000 + 1; /* XXX */
   8342 	while (timeout) {
   8343 		swsm = CSR_READ(sc, WMREG_SWSM);
   8344 		swsm |= SWSM_SWESMBI;
   8345 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   8346 		/* If we managed to set the bit we got the semaphore. */
   8347 		swsm = CSR_READ(sc, WMREG_SWSM);
   8348 		if (swsm & SWSM_SWESMBI)
   8349 			break;
   8350 
   8351 		delay(50);
   8352 		timeout--;
   8353 	}
   8354 
   8355 	if (timeout == 0) {
   8356 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   8357 		/* Release semaphores */
   8358 		wm_put_swsm_semaphore(sc);
   8359 		return 1;
   8360 	}
   8361 	return 0;
   8362 }
   8363 
   8364 static void
   8365 wm_put_swsm_semaphore(struct wm_softc *sc)
   8366 {
   8367 	uint32_t swsm;
   8368 
   8369 	swsm = CSR_READ(sc, WMREG_SWSM);
   8370 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   8371 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   8372 }
   8373 
   8374 static int
   8375 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   8376 {
   8377 	uint32_t swfw_sync;
   8378 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   8379 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   8380 	int timeout = 200;
   8381 
   8382 	for (timeout = 0; timeout < 200; timeout++) {
   8383 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8384 			if (wm_get_swsm_semaphore(sc)) {
   8385 				aprint_error_dev(sc->sc_dev,
   8386 				    "%s: failed to get semaphore\n",
   8387 				    __func__);
   8388 				return 1;
   8389 			}
   8390 		}
   8391 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   8392 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   8393 			swfw_sync |= swmask;
   8394 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   8395 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   8396 				wm_put_swsm_semaphore(sc);
   8397 			return 0;
   8398 		}
   8399 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   8400 			wm_put_swsm_semaphore(sc);
   8401 		delay(5000);
   8402 	}
   8403 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   8404 	    device_xname(sc->sc_dev), mask, swfw_sync);
   8405 	return 1;
   8406 }
   8407 
   8408 static void
   8409 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   8410 {
   8411 	uint32_t swfw_sync;
   8412 
   8413 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8414 		while (wm_get_swsm_semaphore(sc) != 0)
   8415 			continue;
   8416 	}
   8417 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   8418 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   8419 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   8420 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   8421 		wm_put_swsm_semaphore(sc);
   8422 }
   8423 
   8424 static int
   8425 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   8426 {
   8427 	uint32_t ext_ctrl;
   8428 	int timeout = 200;
   8429 
   8430 	for (timeout = 0; timeout < 200; timeout++) {
   8431 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   8432 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
   8433 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   8434 
   8435 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   8436 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
   8437 			return 0;
   8438 		delay(5000);
   8439 	}
   8440 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   8441 	    device_xname(sc->sc_dev), ext_ctrl);
   8442 	return 1;
   8443 }
   8444 
   8445 static void
   8446 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   8447 {
   8448 	uint32_t ext_ctrl;
   8449 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   8450 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
   8451 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   8452 }
   8453 
   8454 static int
   8455 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   8456 {
   8457 	int i = 0;
   8458 	uint32_t reg;
   8459 
   8460 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8461 	do {
   8462 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   8463 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   8464 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8465 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   8466 			break;
   8467 		delay(2*1000);
   8468 		i++;
   8469 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   8470 
   8471 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   8472 		wm_put_hw_semaphore_82573(sc);
   8473 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   8474 		    device_xname(sc->sc_dev));
   8475 		return -1;
   8476 	}
   8477 
   8478 	return 0;
   8479 }
   8480 
   8481 static void
   8482 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   8483 {
   8484 	uint32_t reg;
   8485 
   8486 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8487 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   8488 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   8489 }
   8490 
   8491 /*
   8492  * Management mode and power management related subroutines.
   8493  * BMC, AMT, suspend/resume and EEE.
   8494  */
   8495 
   8496 static int
   8497 wm_check_mng_mode(struct wm_softc *sc)
   8498 {
   8499 	int rv;
   8500 
   8501 	switch (sc->sc_type) {
   8502 	case WM_T_ICH8:
   8503 	case WM_T_ICH9:
   8504 	case WM_T_ICH10:
   8505 	case WM_T_PCH:
   8506 	case WM_T_PCH2:
   8507 	case WM_T_PCH_LPT:
   8508 		rv = wm_check_mng_mode_ich8lan(sc);
   8509 		break;
   8510 	case WM_T_82574:
   8511 	case WM_T_82583:
   8512 		rv = wm_check_mng_mode_82574(sc);
   8513 		break;
   8514 	case WM_T_82571:
   8515 	case WM_T_82572:
   8516 	case WM_T_82573:
   8517 	case WM_T_80003:
   8518 		rv = wm_check_mng_mode_generic(sc);
   8519 		break;
   8520 	default:
   8521 		/* noting to do */
   8522 		rv = 0;
   8523 		break;
   8524 	}
   8525 
   8526 	return rv;
   8527 }
   8528 
   8529 static int
   8530 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   8531 {
   8532 	uint32_t fwsm;
   8533 
   8534 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8535 
   8536 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   8537 		return 1;
   8538 
   8539 	return 0;
   8540 }
   8541 
   8542 static int
   8543 wm_check_mng_mode_82574(struct wm_softc *sc)
   8544 {
   8545 	uint16_t data;
   8546 
   8547 	wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
   8548 
   8549 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
   8550 		return 1;
   8551 
   8552 	return 0;
   8553 }
   8554 
   8555 static int
   8556 wm_check_mng_mode_generic(struct wm_softc *sc)
   8557 {
   8558 	uint32_t fwsm;
   8559 
   8560 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8561 
   8562 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   8563 		return 1;
   8564 
   8565 	return 0;
   8566 }
   8567 
   8568 static int
   8569 wm_enable_mng_pass_thru(struct wm_softc *sc)
   8570 {
   8571 	uint32_t manc, fwsm, factps;
   8572 
   8573 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   8574 		return 0;
   8575 
   8576 	manc = CSR_READ(sc, WMREG_MANC);
   8577 
   8578 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   8579 		device_xname(sc->sc_dev), manc));
   8580 	if ((manc & MANC_RECV_TCO_EN) == 0)
   8581 		return 0;
   8582 
   8583 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   8584 		fwsm = CSR_READ(sc, WMREG_FWSM);
   8585 		factps = CSR_READ(sc, WMREG_FACTPS);
   8586 		if (((factps & FACTPS_MNGCG) == 0)
   8587 		    && ((fwsm & FWSM_MODE_MASK)
   8588 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   8589 			return 1;
   8590 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   8591 		uint16_t data;
   8592 
   8593 		factps = CSR_READ(sc, WMREG_FACTPS);
   8594 		wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
   8595 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   8596 			device_xname(sc->sc_dev), factps, data));
   8597 		if (((factps & FACTPS_MNGCG) == 0)
   8598 		    && ((data & EEPROM_CFG2_MNGM_MASK)
   8599 			== (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
   8600 			return 1;
   8601 	} else if (((manc & MANC_SMBUS_EN) != 0)
   8602 	    && ((manc & MANC_ASF_EN) == 0))
   8603 		return 1;
   8604 
   8605 	return 0;
   8606 }
   8607 
   8608 static int
   8609 wm_check_reset_block(struct wm_softc *sc)
   8610 {
   8611 	uint32_t reg;
   8612 
   8613 	switch (sc->sc_type) {
   8614 	case WM_T_ICH8:
   8615 	case WM_T_ICH9:
   8616 	case WM_T_ICH10:
   8617 	case WM_T_PCH:
   8618 	case WM_T_PCH2:
   8619 	case WM_T_PCH_LPT:
   8620 		reg = CSR_READ(sc, WMREG_FWSM);
   8621 		if ((reg & FWSM_RSPCIPHY) != 0)
   8622 			return 0;
   8623 		else
   8624 			return -1;
   8625 		break;
   8626 	case WM_T_82571:
   8627 	case WM_T_82572:
   8628 	case WM_T_82573:
   8629 	case WM_T_82574:
   8630 	case WM_T_82583:
   8631 	case WM_T_80003:
   8632 		reg = CSR_READ(sc, WMREG_MANC);
   8633 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   8634 			return -1;
   8635 		else
   8636 			return 0;
   8637 		break;
   8638 	default:
   8639 		/* no problem */
   8640 		break;
   8641 	}
   8642 
   8643 	return 0;
   8644 }
   8645 
   8646 static void
   8647 wm_get_hw_control(struct wm_softc *sc)
   8648 {
   8649 	uint32_t reg;
   8650 
   8651 	switch (sc->sc_type) {
   8652 	case WM_T_82573:
   8653 		reg = CSR_READ(sc, WMREG_SWSM);
   8654 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   8655 		break;
   8656 	case WM_T_82571:
   8657 	case WM_T_82572:
   8658 	case WM_T_82574:
   8659 	case WM_T_82583:
   8660 	case WM_T_80003:
   8661 	case WM_T_ICH8:
   8662 	case WM_T_ICH9:
   8663 	case WM_T_ICH10:
   8664 	case WM_T_PCH:
   8665 	case WM_T_PCH2:
   8666 	case WM_T_PCH_LPT:
   8667 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8668 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   8669 		break;
   8670 	default:
   8671 		break;
   8672 	}
   8673 }
   8674 
   8675 static void
   8676 wm_release_hw_control(struct wm_softc *sc)
   8677 {
   8678 	uint32_t reg;
   8679 
   8680 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   8681 		return;
   8682 
   8683 	if (sc->sc_type == WM_T_82573) {
   8684 		reg = CSR_READ(sc, WMREG_SWSM);
   8685 		reg &= ~SWSM_DRV_LOAD;
   8686 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   8687 	} else {
   8688 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8689 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   8690 	}
   8691 }
   8692 
   8693 static void
   8694 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   8695 {
   8696 	uint32_t reg;
   8697 
   8698 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8699 
   8700 	if (on != 0)
   8701 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   8702 	else
   8703 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   8704 
   8705 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   8706 }
   8707 
   8708 static void
   8709 wm_smbustopci(struct wm_softc *sc)
   8710 {
   8711 	uint32_t fwsm;
   8712 
   8713 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8714 	if (((fwsm & FWSM_FW_VALID) == 0)
   8715 	    && ((wm_check_reset_block(sc) == 0))) {
   8716 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   8717 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   8718 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8719 		CSR_WRITE_FLUSH(sc);
   8720 		delay(10);
   8721 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   8722 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8723 		CSR_WRITE_FLUSH(sc);
   8724 		delay(50*1000);
   8725 
   8726 		/*
   8727 		 * Gate automatic PHY configuration by hardware on non-managed
   8728 		 * 82579
   8729 		 */
   8730 		if (sc->sc_type == WM_T_PCH2)
   8731 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   8732 	}
   8733 }
   8734 
   8735 static void
   8736 wm_init_manageability(struct wm_softc *sc)
   8737 {
   8738 
   8739 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   8740 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   8741 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   8742 
   8743 		/* Disable hardware interception of ARP */
   8744 		manc &= ~MANC_ARP_EN;
   8745 
   8746 		/* Enable receiving management packets to the host */
   8747 		if (sc->sc_type >= WM_T_82571) {
   8748 			manc |= MANC_EN_MNG2HOST;
   8749 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   8750 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   8751 
   8752 		}
   8753 
   8754 		CSR_WRITE(sc, WMREG_MANC, manc);
   8755 	}
   8756 }
   8757 
   8758 static void
   8759 wm_release_manageability(struct wm_softc *sc)
   8760 {
   8761 
   8762 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   8763 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   8764 
   8765 		manc |= MANC_ARP_EN;
   8766 		if (sc->sc_type >= WM_T_82571)
   8767 			manc &= ~MANC_EN_MNG2HOST;
   8768 
   8769 		CSR_WRITE(sc, WMREG_MANC, manc);
   8770 	}
   8771 }
   8772 
   8773 static void
   8774 wm_get_wakeup(struct wm_softc *sc)
   8775 {
   8776 
   8777 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   8778 	switch (sc->sc_type) {
   8779 	case WM_T_82573:
   8780 	case WM_T_82583:
   8781 		sc->sc_flags |= WM_F_HAS_AMT;
   8782 		/* FALLTHROUGH */
   8783 	case WM_T_80003:
   8784 	case WM_T_82541:
   8785 	case WM_T_82547:
   8786 	case WM_T_82571:
   8787 	case WM_T_82572:
   8788 	case WM_T_82574:
   8789 	case WM_T_82575:
   8790 	case WM_T_82576:
   8791 	case WM_T_82580:
   8792 	case WM_T_82580ER:
   8793 	case WM_T_I350:
   8794 	case WM_T_I354:
   8795 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   8796 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   8797 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   8798 		break;
   8799 	case WM_T_ICH8:
   8800 	case WM_T_ICH9:
   8801 	case WM_T_ICH10:
   8802 	case WM_T_PCH:
   8803 	case WM_T_PCH2:
   8804 	case WM_T_PCH_LPT:
   8805 		sc->sc_flags |= WM_F_HAS_AMT;
   8806 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   8807 		break;
   8808 	default:
   8809 		break;
   8810 	}
   8811 
   8812 	/* 1: HAS_MANAGE */
   8813 	if (wm_enable_mng_pass_thru(sc) != 0)
   8814 		sc->sc_flags |= WM_F_HAS_MANAGE;
   8815 
   8816 #ifdef WM_DEBUG
   8817 	printf("\n");
   8818 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   8819 		printf("HAS_AMT,");
   8820 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   8821 		printf("ARC_SUBSYS_VALID,");
   8822 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   8823 		printf("ASF_FIRMWARE_PRES,");
   8824 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   8825 		printf("HAS_MANAGE,");
   8826 	printf("\n");
   8827 #endif
   8828 	/*
   8829 	 * Note that the WOL flags is set after the resetting of the eeprom
   8830 	 * stuff
   8831 	 */
   8832 }
   8833 
   8834 #ifdef WM_WOL
   8835 /* WOL in the newer chipset interfaces (pchlan) */
   8836 static void
   8837 wm_enable_phy_wakeup(struct wm_softc *sc)
   8838 {
   8839 #if 0
   8840 	uint16_t preg;
   8841 
   8842 	/* Copy MAC RARs to PHY RARs */
   8843 
   8844 	/* Copy MAC MTA to PHY MTA */
   8845 
   8846 	/* Configure PHY Rx Control register */
   8847 
   8848 	/* Enable PHY wakeup in MAC register */
   8849 
   8850 	/* Configure and enable PHY wakeup in PHY registers */
   8851 
   8852 	/* Activate PHY wakeup */
   8853 
   8854 	/* XXX */
   8855 #endif
   8856 }
   8857 
   8858 /* Power down workaround on D3 */
   8859 static void
   8860 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   8861 {
   8862 	uint32_t reg;
   8863 	int i;
   8864 
   8865 	for (i = 0; i < 2; i++) {
   8866 		/* Disable link */
   8867 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   8868 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   8869 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   8870 
   8871 		/*
   8872 		 * Call gig speed drop workaround on Gig disable before
   8873 		 * accessing any PHY registers
   8874 		 */
   8875 		if (sc->sc_type == WM_T_ICH8)
   8876 			wm_gig_downshift_workaround_ich8lan(sc);
   8877 
   8878 		/* Write VR power-down enable */
   8879 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   8880 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   8881 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   8882 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   8883 
   8884 		/* Read it back and test */
   8885 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   8886 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   8887 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   8888 			break;
   8889 
   8890 		/* Issue PHY reset and repeat at most one more time */
   8891 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8892 	}
   8893 }
   8894 
   8895 static void
   8896 wm_enable_wakeup(struct wm_softc *sc)
   8897 {
   8898 	uint32_t reg, pmreg;
   8899 	pcireg_t pmode;
   8900 
   8901 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   8902 		&pmreg, NULL) == 0)
   8903 		return;
   8904 
   8905 	/* Advertise the wakeup capability */
   8906 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   8907 	    | CTRL_SWDPIN(3));
   8908 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   8909 
   8910 	/* ICH workaround */
   8911 	switch (sc->sc_type) {
   8912 	case WM_T_ICH8:
   8913 	case WM_T_ICH9:
   8914 	case WM_T_ICH10:
   8915 	case WM_T_PCH:
   8916 	case WM_T_PCH2:
   8917 	case WM_T_PCH_LPT:
   8918 		/* Disable gig during WOL */
   8919 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   8920 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   8921 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   8922 		if (sc->sc_type == WM_T_PCH)
   8923 			wm_gmii_reset(sc);
   8924 
   8925 		/* Power down workaround */
   8926 		if (sc->sc_phytype == WMPHY_82577) {
   8927 			struct mii_softc *child;
   8928 
   8929 			/* Assume that the PHY is copper */
   8930 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   8931 			if (child->mii_mpd_rev <= 2)
   8932 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   8933 				    (768 << 5) | 25, 0x0444); /* magic num */
   8934 		}
   8935 		break;
   8936 	default:
   8937 		break;
   8938 	}
   8939 
   8940 	/* Keep the laser running on fiber adapters */
   8941 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
   8942 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
   8943 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8944 		reg |= CTRL_EXT_SWDPIN(3);
   8945 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8946 	}
   8947 
   8948 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   8949 #if 0	/* for the multicast packet */
   8950 	reg |= WUFC_MC;
   8951 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   8952 #endif
   8953 
   8954 	if (sc->sc_type == WM_T_PCH) {
   8955 		wm_enable_phy_wakeup(sc);
   8956 	} else {
   8957 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   8958 		CSR_WRITE(sc, WMREG_WUFC, reg);
   8959 	}
   8960 
   8961 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   8962 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   8963 		|| (sc->sc_type == WM_T_PCH2))
   8964 		    && (sc->sc_phytype == WMPHY_IGP_3))
   8965 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   8966 
   8967 	/* Request PME */
   8968 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   8969 #if 0
   8970 	/* Disable WOL */
   8971 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   8972 #else
   8973 	/* For WOL */
   8974 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   8975 #endif
   8976 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   8977 }
   8978 #endif /* WM_WOL */
   8979 
   8980 /* EEE */
   8981 
   8982 static void
   8983 wm_set_eee_i350(struct wm_softc *sc)
   8984 {
   8985 	uint32_t ipcnfg, eeer;
   8986 
   8987 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   8988 	eeer = CSR_READ(sc, WMREG_EEER);
   8989 
   8990 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   8991 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   8992 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   8993 		    | EEER_LPI_FC);
   8994 	} else {
   8995 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   8996 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   8997 		    | EEER_LPI_FC);
   8998 	}
   8999 
   9000 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   9001 	CSR_WRITE(sc, WMREG_EEER, eeer);
   9002 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   9003 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   9004 }
   9005 
   9006 /*
   9007  * Workarounds (mainly PHY related).
   9008  * Basically, PHY's workarounds are in the PHY drivers.
   9009  */
   9010 
   9011 /* Work-around for 82566 Kumeran PCS lock loss */
   9012 static void
   9013 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   9014 {
   9015 	int miistatus, active, i;
   9016 	int reg;
   9017 
   9018 	miistatus = sc->sc_mii.mii_media_status;
   9019 
   9020 	/* If the link is not up, do nothing */
   9021 	if ((miistatus & IFM_ACTIVE) != 0)
   9022 		return;
   9023 
   9024 	active = sc->sc_mii.mii_media_active;
   9025 
   9026 	/* Nothing to do if the link is other than 1Gbps */
   9027 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   9028 		return;
   9029 
   9030 	for (i = 0; i < 10; i++) {
   9031 		/* read twice */
   9032 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   9033 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   9034 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
   9035 			goto out;	/* GOOD! */
   9036 
   9037 		/* Reset the PHY */
   9038 		wm_gmii_reset(sc);
   9039 		delay(5*1000);
   9040 	}
   9041 
   9042 	/* Disable GigE link negotiation */
   9043 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   9044 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   9045 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   9046 
   9047 	/*
   9048 	 * Call gig speed drop workaround on Gig disable before accessing
   9049 	 * any PHY registers.
   9050 	 */
   9051 	wm_gig_downshift_workaround_ich8lan(sc);
   9052 
   9053 out:
   9054 	return;
   9055 }
   9056 
   9057 /* WOL from S5 stops working */
   9058 static void
   9059 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   9060 {
   9061 	uint16_t kmrn_reg;
   9062 
   9063 	/* Only for igp3 */
   9064 	if (sc->sc_phytype == WMPHY_IGP_3) {
   9065 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   9066 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   9067 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   9068 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   9069 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   9070 	}
   9071 }
   9072 
   9073 /*
   9074  * Workaround for pch's PHYs
   9075  * XXX should be moved to new PHY driver?
   9076  */
   9077 static void
   9078 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   9079 {
   9080 	if (sc->sc_phytype == WMPHY_82577)
   9081 		wm_set_mdio_slow_mode_hv(sc);
   9082 
   9083 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   9084 
   9085 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   9086 
   9087 	/* 82578 */
   9088 	if (sc->sc_phytype == WMPHY_82578) {
   9089 		/* PCH rev. < 3 */
   9090 		if (sc->sc_rev < 3) {
   9091 			/* XXX 6 bit shift? Why? Is it page2? */
   9092 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   9093 			    0x66c0);
   9094 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   9095 			    0xffff);
   9096 		}
   9097 
   9098 		/* XXX phy rev. < 2 */
   9099 	}
   9100 
   9101 	/* Select page 0 */
   9102 
   9103 	/* XXX acquire semaphore */
   9104 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   9105 	/* XXX release semaphore */
   9106 
   9107 	/*
   9108 	 * Configure the K1 Si workaround during phy reset assuming there is
   9109 	 * link so that it disables K1 if link is in 1Gbps.
   9110 	 */
   9111 	wm_k1_gig_workaround_hv(sc, 1);
   9112 }
   9113 
   9114 static void
   9115 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   9116 {
   9117 
   9118 	wm_set_mdio_slow_mode_hv(sc);
   9119 }
   9120 
   9121 static void
   9122 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   9123 {
   9124 	int k1_enable = sc->sc_nvm_k1_enabled;
   9125 
   9126 	/* XXX acquire semaphore */
   9127 
   9128 	if (link) {
   9129 		k1_enable = 0;
   9130 
   9131 		/* Link stall fix for link up */
   9132 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   9133 	} else {
   9134 		/* Link stall fix for link down */
   9135 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   9136 	}
   9137 
   9138 	wm_configure_k1_ich8lan(sc, k1_enable);
   9139 
   9140 	/* XXX release semaphore */
   9141 }
   9142 
   9143 static void
   9144 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   9145 {
   9146 	uint32_t reg;
   9147 
   9148 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   9149 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   9150 	    reg | HV_KMRN_MDIO_SLOW);
   9151 }
   9152 
   9153 static void
   9154 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   9155 {
   9156 	uint32_t ctrl, ctrl_ext, tmp;
   9157 	uint16_t kmrn_reg;
   9158 
   9159 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   9160 
   9161 	if (k1_enable)
   9162 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   9163 	else
   9164 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   9165 
   9166 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   9167 
   9168 	delay(20);
   9169 
   9170 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9171 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9172 
   9173 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   9174 	tmp |= CTRL_FRCSPD;
   9175 
   9176 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   9177 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   9178 	CSR_WRITE_FLUSH(sc);
   9179 	delay(20);
   9180 
   9181 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   9182 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9183 	CSR_WRITE_FLUSH(sc);
   9184 	delay(20);
   9185 }
   9186 
   9187 /* special case - for 82575 - need to do manual init ... */
   9188 static void
   9189 wm_reset_init_script_82575(struct wm_softc *sc)
   9190 {
   9191 	/*
   9192 	 * remark: this is untested code - we have no board without EEPROM
   9193 	 *  same setup as mentioned int the freeBSD driver for the i82575
   9194 	 */
   9195 
   9196 	/* SerDes configuration via SERDESCTRL */
   9197 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   9198 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   9199 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   9200 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   9201 
   9202 	/* CCM configuration via CCMCTL register */
   9203 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   9204 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   9205 
   9206 	/* PCIe lanes configuration */
   9207 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   9208 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   9209 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   9210 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   9211 
   9212 	/* PCIe PLL Configuration */
   9213 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   9214 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   9215 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   9216 }
   9217