Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.297
      1 /*	$NetBSD: if_wm.c,v 1.297 2014/09/11 17:09:04 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- EEE (Energy Efficiency Ethernet)
     77  *	- MSI/MSI-X
     78  *	- Virtual Function
     79  *	- Set LED correctly (based on contents in EEPROM)
     80  *	- Rework how parameters are loaded from the EEPROM.
     81  */
     82 
     83 #include <sys/cdefs.h>
     84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.297 2014/09/11 17:09:04 msaitoh Exp $");
     85 
     86 #include <sys/param.h>
     87 #include <sys/systm.h>
     88 #include <sys/callout.h>
     89 #include <sys/mbuf.h>
     90 #include <sys/malloc.h>
     91 #include <sys/kernel.h>
     92 #include <sys/socket.h>
     93 #include <sys/ioctl.h>
     94 #include <sys/errno.h>
     95 #include <sys/device.h>
     96 #include <sys/queue.h>
     97 #include <sys/syslog.h>
     98 
     99 #include <sys/rnd.h>
    100 
    101 #include <net/if.h>
    102 #include <net/if_dl.h>
    103 #include <net/if_media.h>
    104 #include <net/if_ether.h>
    105 
    106 #include <net/bpf.h>
    107 
    108 #include <netinet/in.h>			/* XXX for struct ip */
    109 #include <netinet/in_systm.h>		/* XXX for struct ip */
    110 #include <netinet/ip.h>			/* XXX for struct ip */
    111 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    112 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    113 
    114 #include <sys/bus.h>
    115 #include <sys/intr.h>
    116 #include <machine/endian.h>
    117 
    118 #include <dev/mii/mii.h>
    119 #include <dev/mii/miivar.h>
    120 #include <dev/mii/miidevs.h>
    121 #include <dev/mii/mii_bitbang.h>
    122 #include <dev/mii/ikphyreg.h>
    123 #include <dev/mii/igphyreg.h>
    124 #include <dev/mii/igphyvar.h>
    125 #include <dev/mii/inbmphyreg.h>
    126 
    127 #include <dev/pci/pcireg.h>
    128 #include <dev/pci/pcivar.h>
    129 #include <dev/pci/pcidevs.h>
    130 
    131 #include <dev/pci/if_wmreg.h>
    132 #include <dev/pci/if_wmvar.h>
    133 
    134 #ifdef WM_DEBUG
    135 #define	WM_DEBUG_LINK		0x01
    136 #define	WM_DEBUG_TX		0x02
    137 #define	WM_DEBUG_RX		0x04
    138 #define	WM_DEBUG_GMII		0x08
    139 #define	WM_DEBUG_MANAGE		0x10
    140 #define	WM_DEBUG_NVM		0x20
    141 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    142     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    143 
    144 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    145 #else
    146 #define	DPRINTF(x, y)	/* nothing */
    147 #endif /* WM_DEBUG */
    148 
    149 #ifdef NET_MPSAFE
    150 #define WM_MPSAFE	1
    151 #endif
    152 
    153 /*
    154  * Transmit descriptor list size.  Due to errata, we can only have
    155  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    156  * on >= 82544.  We tell the upper layers that they can queue a lot
    157  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    158  * of them at a time.
    159  *
    160  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    161  * chains containing many small mbufs have been observed in zero-copy
    162  * situations with jumbo frames.
    163  */
    164 #define	WM_NTXSEGS		256
    165 #define	WM_IFQUEUELEN		256
    166 #define	WM_TXQUEUELEN_MAX	64
    167 #define	WM_TXQUEUELEN_MAX_82547	16
    168 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
    169 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
    170 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
    171 #define	WM_NTXDESC_82542	256
    172 #define	WM_NTXDESC_82544	4096
    173 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
    174 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
    175 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
    176 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
    177 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
    178 
    179 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    180 
    181 /*
    182  * Receive descriptor list size.  We have one Rx buffer for normal
    183  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    184  * packet.  We allocate 256 receive descriptors, each with a 2k
    185  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    186  */
    187 #define	WM_NRXDESC		256
    188 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    189 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    190 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    191 
    192 /*
    193  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    194  * a single clump that maps to a single DMA segment to make several things
    195  * easier.
    196  */
    197 struct wm_control_data_82544 {
    198 	/*
    199 	 * The receive descriptors.
    200 	 */
    201 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    202 
    203 	/*
    204 	 * The transmit descriptors.  Put these at the end, because
    205 	 * we might use a smaller number of them.
    206 	 */
    207 	union {
    208 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
    209 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
    210 	} wdc_u;
    211 };
    212 
    213 struct wm_control_data_82542 {
    214 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    215 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
    216 };
    217 
    218 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
    219 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
    220 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    221 
    222 /*
    223  * Software state for transmit jobs.
    224  */
    225 struct wm_txsoft {
    226 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    227 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    228 	int txs_firstdesc;		/* first descriptor in packet */
    229 	int txs_lastdesc;		/* last descriptor in packet */
    230 	int txs_ndesc;			/* # of descriptors used */
    231 };
    232 
    233 /*
    234  * Software state for receive buffers.  Each descriptor gets a
    235  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    236  * more than one buffer, we chain them together.
    237  */
    238 struct wm_rxsoft {
    239 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    240 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    241 };
    242 
    243 #define WM_LINKUP_TIMEOUT	50
    244 
    245 static uint16_t swfwphysem[] = {
    246 	SWFW_PHY0_SM,
    247 	SWFW_PHY1_SM,
    248 	SWFW_PHY2_SM,
    249 	SWFW_PHY3_SM
    250 };
    251 
    252 /*
    253  * Software state per device.
    254  */
    255 struct wm_softc {
    256 	device_t sc_dev;		/* generic device information */
    257 	bus_space_tag_t sc_st;		/* bus space tag */
    258 	bus_space_handle_t sc_sh;	/* bus space handle */
    259 	bus_size_t sc_ss;		/* bus space size */
    260 	bus_space_tag_t sc_iot;		/* I/O space tag */
    261 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    262 	bus_size_t sc_ios;		/* I/O space size */
    263 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    264 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    265 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    266 
    267 	struct ethercom sc_ethercom;	/* ethernet common data */
    268 	struct mii_data sc_mii;		/* MII/media information */
    269 
    270 	pci_chipset_tag_t sc_pc;
    271 	pcitag_t sc_pcitag;
    272 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    273 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    274 
    275 	wm_chip_type sc_type;		/* MAC type */
    276 	int sc_rev;			/* MAC revision */
    277 	wm_phy_type sc_phytype;		/* PHY type */
    278 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    279 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    280 	int sc_flags;			/* flags; see below */
    281 	int sc_if_flags;		/* last if_flags */
    282 	int sc_flowflags;		/* 802.3x flow control flags */
    283 	int sc_align_tweak;
    284 
    285 	void *sc_ih;			/* interrupt cookie */
    286 	callout_t sc_tick_ch;		/* tick callout */
    287 	bool sc_stopping;
    288 
    289 	int sc_nvm_addrbits;		/* NVM address bits */
    290 	unsigned int sc_nvm_wordsize;		/* NVM word size */
    291 	int sc_ich8_flash_base;
    292 	int sc_ich8_flash_bank_size;
    293 	int sc_nvm_k1_enabled;
    294 
    295 	/* Software state for the transmit and receive descriptors. */
    296 	int sc_txnum;			/* must be a power of two */
    297 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
    298 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    299 
    300 	/* Control data structures. */
    301 	int sc_ntxdesc;			/* must be a power of two */
    302 	struct wm_control_data_82544 *sc_control_data;
    303 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    304 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
    305 	int sc_cd_rseg;			/* real number of control segment */
    306 	size_t sc_cd_size;		/* control data size */
    307 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    308 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
    309 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
    310 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    311 
    312 #ifdef WM_EVENT_COUNTERS
    313 	/* Event counters. */
    314 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    315 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    316 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    317 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    318 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    319 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    320 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    321 
    322 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    323 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    324 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    325 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    326 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    327 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    328 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    329 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    330 
    331 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    332 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    333 
    334 	struct evcnt sc_ev_tu;		/* Tx underrun */
    335 
    336 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    337 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    338 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    339 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    340 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    341 #endif /* WM_EVENT_COUNTERS */
    342 
    343 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    344 
    345 	int	sc_txfree;		/* number of free Tx descriptors */
    346 	int	sc_txnext;		/* next ready Tx descriptor */
    347 
    348 	int	sc_txsfree;		/* number of free Tx jobs */
    349 	int	sc_txsnext;		/* next free Tx job */
    350 	int	sc_txsdirty;		/* dirty Tx jobs */
    351 
    352 	/* These 5 variables are used only on the 82547. */
    353 	int	sc_txfifo_size;		/* Tx FIFO size */
    354 	int	sc_txfifo_head;		/* current head of FIFO */
    355 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
    356 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
    357 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    358 
    359 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    360 
    361 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    362 	int	sc_rxdiscard;
    363 	int	sc_rxlen;
    364 	struct mbuf *sc_rxhead;
    365 	struct mbuf *sc_rxtail;
    366 	struct mbuf **sc_rxtailp;
    367 
    368 	uint32_t sc_ctrl;		/* prototype CTRL register */
    369 #if 0
    370 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    371 #endif
    372 	uint32_t sc_icr;		/* prototype interrupt bits */
    373 	uint32_t sc_itr;		/* prototype intr throttling reg */
    374 	uint32_t sc_tctl;		/* prototype TCTL register */
    375 	uint32_t sc_rctl;		/* prototype RCTL register */
    376 	uint32_t sc_txcw;		/* prototype TXCW register */
    377 	uint32_t sc_tipg;		/* prototype TIPG register */
    378 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    379 	uint32_t sc_pba;		/* prototype PBA register */
    380 
    381 	int sc_tbi_linkup;		/* TBI link status */
    382 	int sc_tbi_anegticks;		/* autonegotiation ticks */
    383 	int sc_tbi_ticks;		/* tbi ticks */
    384 
    385 	int sc_mchash_type;		/* multicast filter offset */
    386 
    387 	krndsource_t rnd_source;	/* random source */
    388 
    389 	kmutex_t *sc_tx_lock;		/* lock for tx operations */
    390 	kmutex_t *sc_rx_lock;		/* lock for rx operations */
    391 };
    392 
    393 #define WM_TX_LOCK(_sc)		if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
    394 #define WM_TX_UNLOCK(_sc)	if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
    395 #define WM_TX_LOCKED(_sc)	(!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
    396 #define WM_RX_LOCK(_sc)		if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
    397 #define WM_RX_UNLOCK(_sc)	if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
    398 #define WM_RX_LOCKED(_sc)	(!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
    399 #define WM_BOTH_LOCK(_sc)	do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
    400 #define WM_BOTH_UNLOCK(_sc)	do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
    401 #define WM_BOTH_LOCKED(_sc)	(WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
    402 
    403 #ifdef WM_MPSAFE
    404 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    405 #else
    406 #define CALLOUT_FLAGS	0
    407 #endif
    408 
    409 #define	WM_RXCHAIN_RESET(sc)						\
    410 do {									\
    411 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    412 	*(sc)->sc_rxtailp = NULL;					\
    413 	(sc)->sc_rxlen = 0;						\
    414 } while (/*CONSTCOND*/0)
    415 
    416 #define	WM_RXCHAIN_LINK(sc, m)						\
    417 do {									\
    418 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    419 	(sc)->sc_rxtailp = &(m)->m_next;				\
    420 } while (/*CONSTCOND*/0)
    421 
    422 #ifdef WM_EVENT_COUNTERS
    423 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    424 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    425 #else
    426 #define	WM_EVCNT_INCR(ev)	/* nothing */
    427 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    428 #endif
    429 
    430 #define	CSR_READ(sc, reg)						\
    431 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    432 #define	CSR_WRITE(sc, reg, val)						\
    433 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    434 #define	CSR_WRITE_FLUSH(sc)						\
    435 	(void) CSR_READ((sc), WMREG_STATUS)
    436 
    437 #define ICH8_FLASH_READ32(sc, reg) \
    438 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    439 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    440 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    441 
    442 #define ICH8_FLASH_READ16(sc, reg) \
    443 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    444 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    445 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    446 
    447 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    448 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    449 
    450 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
    451 #define	WM_CDTXADDR_HI(sc, x)						\
    452 	(sizeof(bus_addr_t) == 8 ?					\
    453 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
    454 
    455 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
    456 #define	WM_CDRXADDR_HI(sc, x)						\
    457 	(sizeof(bus_addr_t) == 8 ?					\
    458 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
    459 
    460 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    461 do {									\
    462 	int __x, __n;							\
    463 									\
    464 	__x = (x);							\
    465 	__n = (n);							\
    466 									\
    467 	/* If it will wrap around, sync to the end of the ring. */	\
    468 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
    469 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    470 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    471 		    (WM_NTXDESC(sc) - __x), (ops));			\
    472 		__n -= (WM_NTXDESC(sc) - __x);				\
    473 		__x = 0;						\
    474 	}								\
    475 									\
    476 	/* Now sync whatever is left. */				\
    477 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    478 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    479 } while (/*CONSTCOND*/0)
    480 
    481 #define	WM_CDRXSYNC(sc, x, ops)						\
    482 do {									\
    483 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    484 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    485 } while (/*CONSTCOND*/0)
    486 
    487 #define	WM_INIT_RXDESC(sc, x)						\
    488 do {									\
    489 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    490 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    491 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    492 									\
    493 	/*								\
    494 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    495 	 * so that the payload after the Ethernet header is aligned	\
    496 	 * to a 4-byte boundary.					\
    497 	 *								\
    498 	 * XXX BRAINDAMAGE ALERT!					\
    499 	 * The stupid chip uses the same size for every buffer, which	\
    500 	 * is set in the Receive Control register.  We are using the 2K	\
    501 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    502 	 * reason, we can't "scoot" packets longer than the standard	\
    503 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    504 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    505 	 * the upper layer copy the headers.				\
    506 	 */								\
    507 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    508 									\
    509 	wm_set_dma_addr(&__rxd->wrx_addr,				\
    510 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
    511 	__rxd->wrx_len = 0;						\
    512 	__rxd->wrx_cksum = 0;						\
    513 	__rxd->wrx_status = 0;						\
    514 	__rxd->wrx_errors = 0;						\
    515 	__rxd->wrx_special = 0;						\
    516 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    517 									\
    518 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    519 } while (/*CONSTCOND*/0)
    520 
    521 /*
    522  * Register read/write functions.
    523  * Other than CSR_{READ|WRITE}().
    524  */
    525 #if 0
    526 static inline uint32_t wm_io_read(struct wm_softc *, int);
    527 #endif
    528 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    529 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    530 	uint32_t, uint32_t);
    531 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    532 
    533 /*
    534  * Device driver interface functions and commonly used functions.
    535  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    536  */
    537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    538 static int	wm_match(device_t, cfdata_t, void *);
    539 static void	wm_attach(device_t, device_t, void *);
    540 static int	wm_detach(device_t, int);
    541 static bool	wm_suspend(device_t, const pmf_qual_t *);
    542 static bool	wm_resume(device_t, const pmf_qual_t *);
    543 static void	wm_watchdog(struct ifnet *);
    544 static void	wm_tick(void *);
    545 static int	wm_ifflags_cb(struct ethercom *);
    546 static int	wm_ioctl(struct ifnet *, u_long, void *);
    547 /* MAC address related */
    548 static int	wm_check_alt_mac_addr(struct wm_softc *);
    549 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    550 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    551 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    552 static void	wm_set_filter(struct wm_softc *);
    553 /* Reset and init related */
    554 static void	wm_set_vlan(struct wm_softc *);
    555 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    556 static void	wm_get_auto_rd_done(struct wm_softc *);
    557 static void	wm_lan_init_done(struct wm_softc *);
    558 static void	wm_get_cfg_done(struct wm_softc *);
    559 static void	wm_reset(struct wm_softc *);
    560 static int	wm_add_rxbuf(struct wm_softc *, int);
    561 static void	wm_rxdrain(struct wm_softc *);
    562 static int	wm_init(struct ifnet *);
    563 static int	wm_init_locked(struct ifnet *);
    564 static void	wm_stop(struct ifnet *, int);
    565 static void	wm_stop_locked(struct ifnet *, int);
    566 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    567     uint32_t *, uint8_t *);
    568 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    569 static void	wm_82547_txfifo_stall(void *);
    570 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    571 /* Start */
    572 static void	wm_start(struct ifnet *);
    573 static void	wm_start_locked(struct ifnet *);
    574 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    575     uint32_t *, uint32_t *, bool *);
    576 static void	wm_nq_start(struct ifnet *);
    577 static void	wm_nq_start_locked(struct ifnet *);
    578 /* Interrupt */
    579 static void	wm_txintr(struct wm_softc *);
    580 static void	wm_rxintr(struct wm_softc *);
    581 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    582 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    583 static void	wm_linkintr(struct wm_softc *, uint32_t);
    584 static int	wm_intr(void *);
    585 
    586 /*
    587  * Media related.
    588  * GMII, SGMII, TBI, SERDES and SFP.
    589  */
    590 /* GMII related */
    591 static void	wm_gmii_reset(struct wm_softc *);
    592 static int	wm_get_phy_id_82575(struct wm_softc *);
    593 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    594 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    595 static int	wm_gmii_mediachange(struct ifnet *);
    596 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    597 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    598 static int	wm_gmii_i82543_readreg(device_t, int, int);
    599 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    600 static int	wm_gmii_i82544_readreg(device_t, int, int);
    601 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    602 static int	wm_gmii_i80003_readreg(device_t, int, int);
    603 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    604 static int	wm_gmii_bm_readreg(device_t, int, int);
    605 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    606 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    607 static int	wm_gmii_hv_readreg(device_t, int, int);
    608 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    609 static int	wm_gmii_82580_readreg(device_t, int, int);
    610 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    611 static void	wm_gmii_statchg(struct ifnet *);
    612 static int	wm_kmrn_readreg(struct wm_softc *, int);
    613 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    614 /* SGMII */
    615 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    616 static int	wm_sgmii_readreg(device_t, int, int);
    617 static void	wm_sgmii_writereg(device_t, int, int, int);
    618 /* TBI related */
    619 static int	wm_check_for_link(struct wm_softc *);
    620 static void	wm_tbi_mediainit(struct wm_softc *);
    621 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    622 static int	wm_tbi_mediachange(struct ifnet *);
    623 static void	wm_tbi_set_linkled(struct wm_softc *);
    624 static void	wm_tbi_check_link(struct wm_softc *);
    625 /* SFP related */
    626 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    627 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    628 
    629 /*
    630  * NVM related.
    631  * Microwire, SPI (w/wo EERD) and Flash.
    632  */
    633 /* Misc functions */
    634 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    635 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    636 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    637 /* Microwire */
    638 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    639 /* SPI */
    640 static int	wm_nvm_ready_spi(struct wm_softc *);
    641 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    642 /* Using with EERD */
    643 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    644 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    645 /* Flash */
    646 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    647     unsigned int *);
    648 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    649 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    650 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    651 	uint16_t *);
    652 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    653 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    654 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    655 /* Lock, detecting NVM type, validate checksum and read */
    656 static int	wm_nvm_acquire(struct wm_softc *);
    657 static void	wm_nvm_release(struct wm_softc *);
    658 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    659 static int	wm_nvm_validate_checksum(struct wm_softc *);
    660 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    661 
    662 /*
    663  * Hardware semaphores.
    664  * Very complexed...
    665  */
    666 static int	wm_get_swsm_semaphore(struct wm_softc *);
    667 static void	wm_put_swsm_semaphore(struct wm_softc *);
    668 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    669 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    670 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    671 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    672 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    673 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    674 
    675 /*
    676  * Management mode and power management related subroutines.
    677  * BMC, AMT, suspend/resume and EEE.
    678  */
    679 static int	wm_check_mng_mode(struct wm_softc *);
    680 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    681 static int	wm_check_mng_mode_82574(struct wm_softc *);
    682 static int	wm_check_mng_mode_generic(struct wm_softc *);
    683 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    684 static int	wm_check_reset_block(struct wm_softc *);
    685 static void	wm_get_hw_control(struct wm_softc *);
    686 static void	wm_release_hw_control(struct wm_softc *);
    687 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    688 static void	wm_smbustopci(struct wm_softc *);
    689 static void	wm_init_manageability(struct wm_softc *);
    690 static void	wm_release_manageability(struct wm_softc *);
    691 static void	wm_get_wakeup(struct wm_softc *);
    692 #ifdef WM_WOL
    693 static void	wm_enable_phy_wakeup(struct wm_softc *);
    694 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    695 static void	wm_enable_wakeup(struct wm_softc *);
    696 #endif
    697 /* EEE */
    698 static void	wm_set_eee_i350(struct wm_softc *);
    699 
    700 /*
    701  * Workarounds (mainly PHY related).
    702  * Basically, PHY's workarounds are in the PHY drivers.
    703  */
    704 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    705 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    706 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    707 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    708 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    709 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    710 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    711 static void	wm_reset_init_script_82575(struct wm_softc *);
    712 
    713 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    714     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    715 
    716 /*
    717  * Devices supported by this driver.
    718  */
    719 static const struct wm_product {
    720 	pci_vendor_id_t		wmp_vendor;
    721 	pci_product_id_t	wmp_product;
    722 	const char		*wmp_name;
    723 	wm_chip_type		wmp_type;
    724 	uint32_t		wmp_flags;
    725 #define	WMP_F_UNKNOWN		0x00
    726 #define	WMP_F_FIBER		0x01
    727 #define	WMP_F_COPPER		0x02
    728 #define	WMP_F_SERDES		0x03 /* Internal SERDES */
    729 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    730 } wm_products[] = {
    731 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    732 	  "Intel i82542 1000BASE-X Ethernet",
    733 	  WM_T_82542_2_1,	WMP_F_FIBER },
    734 
    735 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    736 	  "Intel i82543GC 1000BASE-X Ethernet",
    737 	  WM_T_82543,		WMP_F_FIBER },
    738 
    739 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    740 	  "Intel i82543GC 1000BASE-T Ethernet",
    741 	  WM_T_82543,		WMP_F_COPPER },
    742 
    743 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    744 	  "Intel i82544EI 1000BASE-T Ethernet",
    745 	  WM_T_82544,		WMP_F_COPPER },
    746 
    747 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    748 	  "Intel i82544EI 1000BASE-X Ethernet",
    749 	  WM_T_82544,		WMP_F_FIBER },
    750 
    751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    752 	  "Intel i82544GC 1000BASE-T Ethernet",
    753 	  WM_T_82544,		WMP_F_COPPER },
    754 
    755 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    756 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    757 	  WM_T_82544,		WMP_F_COPPER },
    758 
    759 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    760 	  "Intel i82540EM 1000BASE-T Ethernet",
    761 	  WM_T_82540,		WMP_F_COPPER },
    762 
    763 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    764 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    765 	  WM_T_82540,		WMP_F_COPPER },
    766 
    767 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    768 	  "Intel i82540EP 1000BASE-T Ethernet",
    769 	  WM_T_82540,		WMP_F_COPPER },
    770 
    771 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    772 	  "Intel i82540EP 1000BASE-T Ethernet",
    773 	  WM_T_82540,		WMP_F_COPPER },
    774 
    775 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    776 	  "Intel i82540EP 1000BASE-T Ethernet",
    777 	  WM_T_82540,		WMP_F_COPPER },
    778 
    779 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    780 	  "Intel i82545EM 1000BASE-T Ethernet",
    781 	  WM_T_82545,		WMP_F_COPPER },
    782 
    783 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    784 	  "Intel i82545GM 1000BASE-T Ethernet",
    785 	  WM_T_82545_3,		WMP_F_COPPER },
    786 
    787 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    788 	  "Intel i82545GM 1000BASE-X Ethernet",
    789 	  WM_T_82545_3,		WMP_F_FIBER },
    790 
    791 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    792 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    793 	  WM_T_82545_3,		WMP_F_SERDES },
    794 
    795 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    796 	  "Intel i82546EB 1000BASE-T Ethernet",
    797 	  WM_T_82546,		WMP_F_COPPER },
    798 
    799 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    800 	  "Intel i82546EB 1000BASE-T Ethernet",
    801 	  WM_T_82546,		WMP_F_COPPER },
    802 
    803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    804 	  "Intel i82545EM 1000BASE-X Ethernet",
    805 	  WM_T_82545,		WMP_F_FIBER },
    806 
    807 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    808 	  "Intel i82546EB 1000BASE-X Ethernet",
    809 	  WM_T_82546,		WMP_F_FIBER },
    810 
    811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    812 	  "Intel i82546GB 1000BASE-T Ethernet",
    813 	  WM_T_82546_3,		WMP_F_COPPER },
    814 
    815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    816 	  "Intel i82546GB 1000BASE-X Ethernet",
    817 	  WM_T_82546_3,		WMP_F_FIBER },
    818 
    819 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    820 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    821 	  WM_T_82546_3,		WMP_F_SERDES },
    822 
    823 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    824 	  "i82546GB quad-port Gigabit Ethernet",
    825 	  WM_T_82546_3,		WMP_F_COPPER },
    826 
    827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    828 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    829 	  WM_T_82546_3,		WMP_F_COPPER },
    830 
    831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    832 	  "Intel PRO/1000MT (82546GB)",
    833 	  WM_T_82546_3,		WMP_F_COPPER },
    834 
    835 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    836 	  "Intel i82541EI 1000BASE-T Ethernet",
    837 	  WM_T_82541,		WMP_F_COPPER },
    838 
    839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    840 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    841 	  WM_T_82541,		WMP_F_COPPER },
    842 
    843 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    844 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    845 	  WM_T_82541,		WMP_F_COPPER },
    846 
    847 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    848 	  "Intel i82541ER 1000BASE-T Ethernet",
    849 	  WM_T_82541_2,		WMP_F_COPPER },
    850 
    851 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    852 	  "Intel i82541GI 1000BASE-T Ethernet",
    853 	  WM_T_82541_2,		WMP_F_COPPER },
    854 
    855 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    856 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    857 	  WM_T_82541_2,		WMP_F_COPPER },
    858 
    859 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    860 	  "Intel i82541PI 1000BASE-T Ethernet",
    861 	  WM_T_82541_2,		WMP_F_COPPER },
    862 
    863 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    864 	  "Intel i82547EI 1000BASE-T Ethernet",
    865 	  WM_T_82547,		WMP_F_COPPER },
    866 
    867 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    868 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    869 	  WM_T_82547,		WMP_F_COPPER },
    870 
    871 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    872 	  "Intel i82547GI 1000BASE-T Ethernet",
    873 	  WM_T_82547_2,		WMP_F_COPPER },
    874 
    875 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    876 	  "Intel PRO/1000 PT (82571EB)",
    877 	  WM_T_82571,		WMP_F_COPPER },
    878 
    879 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    880 	  "Intel PRO/1000 PF (82571EB)",
    881 	  WM_T_82571,		WMP_F_FIBER },
    882 
    883 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    884 	  "Intel PRO/1000 PB (82571EB)",
    885 	  WM_T_82571,		WMP_F_SERDES },
    886 
    887 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    888 	  "Intel PRO/1000 QT (82571EB)",
    889 	  WM_T_82571,		WMP_F_COPPER },
    890 
    891 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    892 	  "Intel i82572EI 1000baseT Ethernet",
    893 	  WM_T_82572,		WMP_F_COPPER },
    894 
    895 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    896 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    897 	  WM_T_82571,		WMP_F_COPPER, },
    898 
    899 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    900 	  "Intel i82572EI 1000baseX Ethernet",
    901 	  WM_T_82572,		WMP_F_FIBER },
    902 
    903 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    904 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    905 	  WM_T_82572,		WMP_F_SERDES },
    906 
    907 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    908 	  "Intel i82572EI 1000baseT Ethernet",
    909 	  WM_T_82572,		WMP_F_COPPER },
    910 
    911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    912 	  "Intel i82573E",
    913 	  WM_T_82573,		WMP_F_COPPER },
    914 
    915 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    916 	  "Intel i82573E IAMT",
    917 	  WM_T_82573,		WMP_F_COPPER },
    918 
    919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    920 	  "Intel i82573L Gigabit Ethernet",
    921 	  WM_T_82573,		WMP_F_COPPER },
    922 
    923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    924 	  "Intel i82574L",
    925 	  WM_T_82574,		WMP_F_COPPER },
    926 
    927 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
    928 	  "Intel i82583V",
    929 	  WM_T_82583,		WMP_F_COPPER },
    930 
    931 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
    932 	  "i80003 dual 1000baseT Ethernet",
    933 	  WM_T_80003,		WMP_F_COPPER },
    934 
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
    936 	  "i80003 dual 1000baseX Ethernet",
    937 	  WM_T_80003,		WMP_F_COPPER },
    938 
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
    940 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
    941 	  WM_T_80003,		WMP_F_SERDES },
    942 
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
    944 	  "Intel i80003 1000baseT Ethernet",
    945 	  WM_T_80003,		WMP_F_COPPER },
    946 
    947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
    948 	  "Intel i80003 Gigabit Ethernet (SERDES)",
    949 	  WM_T_80003,		WMP_F_SERDES },
    950 
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
    952 	  "Intel i82801H (M_AMT) LAN Controller",
    953 	  WM_T_ICH8,		WMP_F_COPPER },
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
    955 	  "Intel i82801H (AMT) LAN Controller",
    956 	  WM_T_ICH8,		WMP_F_COPPER },
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
    958 	  "Intel i82801H LAN Controller",
    959 	  WM_T_ICH8,		WMP_F_COPPER },
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
    961 	  "Intel i82801H (IFE) LAN Controller",
    962 	  WM_T_ICH8,		WMP_F_COPPER },
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
    964 	  "Intel i82801H (M) LAN Controller",
    965 	  WM_T_ICH8,		WMP_F_COPPER },
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
    967 	  "Intel i82801H IFE (GT) LAN Controller",
    968 	  WM_T_ICH8,		WMP_F_COPPER },
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
    970 	  "Intel i82801H IFE (G) LAN Controller",
    971 	  WM_T_ICH8,		WMP_F_COPPER },
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
    973 	  "82801I (AMT) LAN Controller",
    974 	  WM_T_ICH9,		WMP_F_COPPER },
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
    976 	  "82801I LAN Controller",
    977 	  WM_T_ICH9,		WMP_F_COPPER },
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
    979 	  "82801I (G) LAN Controller",
    980 	  WM_T_ICH9,		WMP_F_COPPER },
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
    982 	  "82801I (GT) LAN Controller",
    983 	  WM_T_ICH9,		WMP_F_COPPER },
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
    985 	  "82801I (C) LAN Controller",
    986 	  WM_T_ICH9,		WMP_F_COPPER },
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
    988 	  "82801I mobile LAN Controller",
    989 	  WM_T_ICH9,		WMP_F_COPPER },
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
    991 	  "82801I mobile (V) LAN Controller",
    992 	  WM_T_ICH9,		WMP_F_COPPER },
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
    994 	  "82801I mobile (AMT) LAN Controller",
    995 	  WM_T_ICH9,		WMP_F_COPPER },
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
    997 	  "82567LM-4 LAN Controller",
    998 	  WM_T_ICH9,		WMP_F_COPPER },
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1000 	  "82567V-3 LAN Controller",
   1001 	  WM_T_ICH9,		WMP_F_COPPER },
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1003 	  "82567LM-2 LAN Controller",
   1004 	  WM_T_ICH10,		WMP_F_COPPER },
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1006 	  "82567LF-2 LAN Controller",
   1007 	  WM_T_ICH10,		WMP_F_COPPER },
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1009 	  "82567LM-3 LAN Controller",
   1010 	  WM_T_ICH10,		WMP_F_COPPER },
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1012 	  "82567LF-3 LAN Controller",
   1013 	  WM_T_ICH10,		WMP_F_COPPER },
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1015 	  "82567V-2 LAN Controller",
   1016 	  WM_T_ICH10,		WMP_F_COPPER },
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1018 	  "82567V-3? LAN Controller",
   1019 	  WM_T_ICH10,		WMP_F_COPPER },
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1021 	  "HANKSVILLE LAN Controller",
   1022 	  WM_T_ICH10,		WMP_F_COPPER },
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1024 	  "PCH LAN (82577LM) Controller",
   1025 	  WM_T_PCH,		WMP_F_COPPER },
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1027 	  "PCH LAN (82577LC) Controller",
   1028 	  WM_T_PCH,		WMP_F_COPPER },
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1030 	  "PCH LAN (82578DM) Controller",
   1031 	  WM_T_PCH,		WMP_F_COPPER },
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1033 	  "PCH LAN (82578DC) Controller",
   1034 	  WM_T_PCH,		WMP_F_COPPER },
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1036 	  "PCH2 LAN (82579LM) Controller",
   1037 	  WM_T_PCH2,		WMP_F_COPPER },
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1039 	  "PCH2 LAN (82579V) Controller",
   1040 	  WM_T_PCH2,		WMP_F_COPPER },
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1042 	  "82575EB dual-1000baseT Ethernet",
   1043 	  WM_T_82575,		WMP_F_COPPER },
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1045 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1046 	  WM_T_82575,		WMP_F_SERDES },
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1048 	  "82575GB quad-1000baseT Ethernet",
   1049 	  WM_T_82575,		WMP_F_COPPER },
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1051 	  "82575GB quad-1000baseT Ethernet (PM)",
   1052 	  WM_T_82575,		WMP_F_COPPER },
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1054 	  "82576 1000BaseT Ethernet",
   1055 	  WM_T_82576,		WMP_F_COPPER },
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1057 	  "82576 1000BaseX Ethernet",
   1058 	  WM_T_82576,		WMP_F_FIBER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1061 	  "82576 gigabit Ethernet (SERDES)",
   1062 	  WM_T_82576,		WMP_F_SERDES },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1065 	  "82576 quad-1000BaseT Ethernet",
   1066 	  WM_T_82576,		WMP_F_COPPER },
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1068 	  "82576 gigabit Ethernet",
   1069 	  WM_T_82576,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1072 	  "82576 gigabit Ethernet (SERDES)",
   1073 	  WM_T_82576,		WMP_F_SERDES },
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1075 	  "82576 quad-gigabit Ethernet (SERDES)",
   1076 	  WM_T_82576,		WMP_F_SERDES },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1079 	  "82580 1000BaseT Ethernet",
   1080 	  WM_T_82580,		WMP_F_COPPER },
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1082 	  "82580 1000BaseX Ethernet",
   1083 	  WM_T_82580,		WMP_F_FIBER },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1086 	  "82580 1000BaseT Ethernet (SERDES)",
   1087 	  WM_T_82580,		WMP_F_SERDES },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1090 	  "82580 gigabit Ethernet (SGMII)",
   1091 	  WM_T_82580,		WMP_F_COPPER },
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1093 	  "82580 dual-1000BaseT Ethernet",
   1094 	  WM_T_82580,		WMP_F_COPPER },
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
   1096 	  "82580 1000BaseT Ethernet",
   1097 	  WM_T_82580ER,		WMP_F_COPPER },
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
   1099 	  "82580 dual-1000BaseT Ethernet",
   1100 	  WM_T_82580ER,		WMP_F_COPPER },
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1102 	  "82580 quad-1000BaseX Ethernet",
   1103 	  WM_T_82580,		WMP_F_FIBER },
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1105 	  "I350 Gigabit Network Connection",
   1106 	  WM_T_I350,		WMP_F_COPPER },
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1108 	  "I350 Gigabit Fiber Network Connection",
   1109 	  WM_T_I350,		WMP_F_FIBER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1112 	  "I350 Gigabit Backplane Connection",
   1113 	  WM_T_I350,		WMP_F_SERDES },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1116 	  "I350 Gigabit Connection",
   1117 	  WM_T_I350,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1120 	  "I354 Gigabit Connection",
   1121 	  WM_T_I354,		WMP_F_COPPER },
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1123 	  "I210-T1 Ethernet Server Adapter",
   1124 	  WM_T_I210,		WMP_F_COPPER },
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1126 	  "I210 Ethernet (Copper OEM)",
   1127 	  WM_T_I210,		WMP_F_COPPER },
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1129 	  "I210 Ethernet (Copper IT)",
   1130 	  WM_T_I210,		WMP_F_COPPER },
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1132 	  "I210 Gigabit Ethernet (Fiber)",
   1133 	  WM_T_I210,		WMP_F_FIBER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1136 	  "I210 Gigabit Ethernet (SERDES)",
   1137 	  WM_T_I210,		WMP_F_SERDES },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1140 	  "I210 Gigabit Ethernet (SGMII)",
   1141 	  WM_T_I210,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1144 	  "I211 Ethernet (COPPER)",
   1145 	  WM_T_I211,		WMP_F_COPPER },
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1147 	  "I217 V Ethernet Connection",
   1148 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1150 	  "I217 LM Ethernet Connection",
   1151 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1153 	  "I218 V Ethernet Connection",
   1154 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1156 	  "I218 LM Ethernet Connection",
   1157 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1158 	{ 0,			0,
   1159 	  NULL,
   1160 	  0,			0 },
   1161 };
   1162 
   1163 #ifdef WM_EVENT_COUNTERS
   1164 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1165 #endif /* WM_EVENT_COUNTERS */
   1166 
   1167 
   1168 /*
   1169  * Register read/write functions.
   1170  * Other than CSR_{READ|WRITE}().
   1171  */
   1172 
   1173 #if 0 /* Not currently used */
   1174 static inline uint32_t
   1175 wm_io_read(struct wm_softc *sc, int reg)
   1176 {
   1177 
   1178 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1179 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1180 }
   1181 #endif
   1182 
   1183 static inline void
   1184 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1185 {
   1186 
   1187 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1188 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1189 }
   1190 
   1191 static inline void
   1192 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1193     uint32_t data)
   1194 {
   1195 	uint32_t regval;
   1196 	int i;
   1197 
   1198 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1199 
   1200 	CSR_WRITE(sc, reg, regval);
   1201 
   1202 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1203 		delay(5);
   1204 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1205 			break;
   1206 	}
   1207 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1208 		aprint_error("%s: WARNING:"
   1209 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1210 		    device_xname(sc->sc_dev), reg);
   1211 	}
   1212 }
   1213 
   1214 static inline void
   1215 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1216 {
   1217 	wa->wa_low = htole32(v & 0xffffffffU);
   1218 	if (sizeof(bus_addr_t) == 8)
   1219 		wa->wa_high = htole32((uint64_t) v >> 32);
   1220 	else
   1221 		wa->wa_high = 0;
   1222 }
   1223 
   1224 /*
   1225  * Device driver interface functions and commonly used functions.
   1226  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1227  */
   1228 
   1229 /* Lookup supported device table */
   1230 static const struct wm_product *
   1231 wm_lookup(const struct pci_attach_args *pa)
   1232 {
   1233 	const struct wm_product *wmp;
   1234 
   1235 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1236 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1237 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1238 			return wmp;
   1239 	}
   1240 	return NULL;
   1241 }
   1242 
   1243 /* The match function (ca_match) */
   1244 static int
   1245 wm_match(device_t parent, cfdata_t cf, void *aux)
   1246 {
   1247 	struct pci_attach_args *pa = aux;
   1248 
   1249 	if (wm_lookup(pa) != NULL)
   1250 		return 1;
   1251 
   1252 	return 0;
   1253 }
   1254 
   1255 /* The attach function (ca_attach) */
   1256 static void
   1257 wm_attach(device_t parent, device_t self, void *aux)
   1258 {
   1259 	struct wm_softc *sc = device_private(self);
   1260 	struct pci_attach_args *pa = aux;
   1261 	prop_dictionary_t dict;
   1262 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1263 	pci_chipset_tag_t pc = pa->pa_pc;
   1264 	pci_intr_handle_t ih;
   1265 	const char *intrstr = NULL;
   1266 	const char *eetype, *xname;
   1267 	bus_space_tag_t memt;
   1268 	bus_space_handle_t memh;
   1269 	bus_size_t memsize;
   1270 	int memh_valid;
   1271 	int i, error;
   1272 	const struct wm_product *wmp;
   1273 	prop_data_t ea;
   1274 	prop_number_t pn;
   1275 	uint8_t enaddr[ETHER_ADDR_LEN];
   1276 	uint16_t cfg1, cfg2, swdpin, io3;
   1277 	pcireg_t preg, memtype;
   1278 	uint16_t eeprom_data, apme_mask;
   1279 	bool force_clear_smbi;
   1280 	uint32_t link_mode;
   1281 	uint32_t reg;
   1282 	char intrbuf[PCI_INTRSTR_LEN];
   1283 
   1284 	sc->sc_dev = self;
   1285 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1286 	sc->sc_stopping = false;
   1287 
   1288 	wmp = wm_lookup(pa);
   1289 #ifdef DIAGNOSTIC
   1290 	if (wmp == NULL) {
   1291 		printf("\n");
   1292 		panic("wm_attach: impossible");
   1293 	}
   1294 #endif
   1295 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1296 
   1297 	sc->sc_pc = pa->pa_pc;
   1298 	sc->sc_pcitag = pa->pa_tag;
   1299 
   1300 	if (pci_dma64_available(pa))
   1301 		sc->sc_dmat = pa->pa_dmat64;
   1302 	else
   1303 		sc->sc_dmat = pa->pa_dmat;
   1304 
   1305 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1306 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1307 
   1308 	sc->sc_type = wmp->wmp_type;
   1309 	if (sc->sc_type < WM_T_82543) {
   1310 		if (sc->sc_rev < 2) {
   1311 			aprint_error_dev(sc->sc_dev,
   1312 			    "i82542 must be at least rev. 2\n");
   1313 			return;
   1314 		}
   1315 		if (sc->sc_rev < 3)
   1316 			sc->sc_type = WM_T_82542_2_0;
   1317 	}
   1318 
   1319 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1320 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   1321 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1322 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1323 		sc->sc_flags |= WM_F_NEWQUEUE;
   1324 
   1325 	/* Set device properties (mactype) */
   1326 	dict = device_properties(sc->sc_dev);
   1327 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1328 
   1329 	/*
   1330 	 * Map the device.  All devices support memory-mapped acccess,
   1331 	 * and it is really required for normal operation.
   1332 	 */
   1333 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1334 	switch (memtype) {
   1335 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1336 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1337 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1338 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1339 		break;
   1340 	default:
   1341 		memh_valid = 0;
   1342 		break;
   1343 	}
   1344 
   1345 	if (memh_valid) {
   1346 		sc->sc_st = memt;
   1347 		sc->sc_sh = memh;
   1348 		sc->sc_ss = memsize;
   1349 	} else {
   1350 		aprint_error_dev(sc->sc_dev,
   1351 		    "unable to map device registers\n");
   1352 		return;
   1353 	}
   1354 
   1355 	/*
   1356 	 * In addition, i82544 and later support I/O mapped indirect
   1357 	 * register access.  It is not desirable (nor supported in
   1358 	 * this driver) to use it for normal operation, though it is
   1359 	 * required to work around bugs in some chip versions.
   1360 	 */
   1361 	if (sc->sc_type >= WM_T_82544) {
   1362 		/* First we have to find the I/O BAR. */
   1363 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1364 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1365 			if (memtype == PCI_MAPREG_TYPE_IO)
   1366 				break;
   1367 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1368 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1369 				i += 4;	/* skip high bits, too */
   1370 		}
   1371 		if (i < PCI_MAPREG_END) {
   1372 			/*
   1373 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1374 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1375 			 * It's no problem because newer chips has no this
   1376 			 * bug.
   1377 			 *
   1378 			 * The i8254x doesn't apparently respond when the
   1379 			 * I/O BAR is 0, which looks somewhat like it's not
   1380 			 * been configured.
   1381 			 */
   1382 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1383 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1384 				aprint_error_dev(sc->sc_dev,
   1385 				    "WARNING: I/O BAR at zero.\n");
   1386 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1387 					0, &sc->sc_iot, &sc->sc_ioh,
   1388 					NULL, &sc->sc_ios) == 0) {
   1389 				sc->sc_flags |= WM_F_IOH_VALID;
   1390 			} else {
   1391 				aprint_error_dev(sc->sc_dev,
   1392 				    "WARNING: unable to map I/O space\n");
   1393 			}
   1394 		}
   1395 
   1396 	}
   1397 
   1398 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1399 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1400 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1401 	if (sc->sc_type < WM_T_82542_2_1)
   1402 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1403 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1404 
   1405 	/* power up chip */
   1406 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1407 	    NULL)) && error != EOPNOTSUPP) {
   1408 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1409 		return;
   1410 	}
   1411 
   1412 	/*
   1413 	 * Map and establish our interrupt.
   1414 	 */
   1415 	if (pci_intr_map(pa, &ih)) {
   1416 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
   1417 		return;
   1418 	}
   1419 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
   1420 #ifdef WM_MPSAFE
   1421 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
   1422 #endif
   1423 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
   1424 	if (sc->sc_ih == NULL) {
   1425 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
   1426 		if (intrstr != NULL)
   1427 			aprint_error(" at %s", intrstr);
   1428 		aprint_error("\n");
   1429 		return;
   1430 	}
   1431 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   1432 
   1433 	/*
   1434 	 * Check the function ID (unit number of the chip).
   1435 	 */
   1436 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1437 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1438 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1439 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   1440 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1441 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1442 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1443 	else
   1444 		sc->sc_funcid = 0;
   1445 
   1446 	/*
   1447 	 * Determine a few things about the bus we're connected to.
   1448 	 */
   1449 	if (sc->sc_type < WM_T_82543) {
   1450 		/* We don't really know the bus characteristics here. */
   1451 		sc->sc_bus_speed = 33;
   1452 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1453 		/*
   1454 		 * CSA (Communication Streaming Architecture) is about as fast
   1455 		 * a 32-bit 66MHz PCI Bus.
   1456 		 */
   1457 		sc->sc_flags |= WM_F_CSA;
   1458 		sc->sc_bus_speed = 66;
   1459 		aprint_verbose_dev(sc->sc_dev,
   1460 		    "Communication Streaming Architecture\n");
   1461 		if (sc->sc_type == WM_T_82547) {
   1462 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1463 			callout_setfunc(&sc->sc_txfifo_ch,
   1464 					wm_82547_txfifo_stall, sc);
   1465 			aprint_verbose_dev(sc->sc_dev,
   1466 			    "using 82547 Tx FIFO stall work-around\n");
   1467 		}
   1468 	} else if (sc->sc_type >= WM_T_82571) {
   1469 		sc->sc_flags |= WM_F_PCIE;
   1470 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1471 		    && (sc->sc_type != WM_T_ICH10)
   1472 		    && (sc->sc_type != WM_T_PCH)
   1473 		    && (sc->sc_type != WM_T_PCH2)
   1474 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1475 			/* ICH* and PCH* have no PCIe capability registers */
   1476 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1477 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1478 				NULL) == 0)
   1479 				aprint_error_dev(sc->sc_dev,
   1480 				    "unable to find PCIe capability\n");
   1481 		}
   1482 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1483 	} else {
   1484 		reg = CSR_READ(sc, WMREG_STATUS);
   1485 		if (reg & STATUS_BUS64)
   1486 			sc->sc_flags |= WM_F_BUS64;
   1487 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1488 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1489 
   1490 			sc->sc_flags |= WM_F_PCIX;
   1491 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1492 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1493 				aprint_error_dev(sc->sc_dev,
   1494 				    "unable to find PCIX capability\n");
   1495 			else if (sc->sc_type != WM_T_82545_3 &&
   1496 				 sc->sc_type != WM_T_82546_3) {
   1497 				/*
   1498 				 * Work around a problem caused by the BIOS
   1499 				 * setting the max memory read byte count
   1500 				 * incorrectly.
   1501 				 */
   1502 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1503 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1504 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1505 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1506 
   1507 				bytecnt =
   1508 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1509 				    PCIX_CMD_BYTECNT_SHIFT;
   1510 				maxb =
   1511 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1512 				    PCIX_STATUS_MAXB_SHIFT;
   1513 				if (bytecnt > maxb) {
   1514 					aprint_verbose_dev(sc->sc_dev,
   1515 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1516 					    512 << bytecnt, 512 << maxb);
   1517 					pcix_cmd = (pcix_cmd &
   1518 					    ~PCIX_CMD_BYTECNT_MASK) |
   1519 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1520 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1521 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1522 					    pcix_cmd);
   1523 				}
   1524 			}
   1525 		}
   1526 		/*
   1527 		 * The quad port adapter is special; it has a PCIX-PCIX
   1528 		 * bridge on the board, and can run the secondary bus at
   1529 		 * a higher speed.
   1530 		 */
   1531 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1532 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1533 								      : 66;
   1534 		} else if (sc->sc_flags & WM_F_PCIX) {
   1535 			switch (reg & STATUS_PCIXSPD_MASK) {
   1536 			case STATUS_PCIXSPD_50_66:
   1537 				sc->sc_bus_speed = 66;
   1538 				break;
   1539 			case STATUS_PCIXSPD_66_100:
   1540 				sc->sc_bus_speed = 100;
   1541 				break;
   1542 			case STATUS_PCIXSPD_100_133:
   1543 				sc->sc_bus_speed = 133;
   1544 				break;
   1545 			default:
   1546 				aprint_error_dev(sc->sc_dev,
   1547 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1548 				    reg & STATUS_PCIXSPD_MASK);
   1549 				sc->sc_bus_speed = 66;
   1550 				break;
   1551 			}
   1552 		} else
   1553 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1554 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1555 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1556 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1557 	}
   1558 
   1559 	/*
   1560 	 * Allocate the control data structures, and create and load the
   1561 	 * DMA map for it.
   1562 	 *
   1563 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   1564 	 * memory.  So must Rx descriptors.  We simplify by allocating
   1565 	 * both sets within the same 4G segment.
   1566 	 */
   1567 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
   1568 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
   1569 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
   1570 	    sizeof(struct wm_control_data_82542) :
   1571 	    sizeof(struct wm_control_data_82544);
   1572 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
   1573 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
   1574 		    &sc->sc_cd_rseg, 0)) != 0) {
   1575 		aprint_error_dev(sc->sc_dev,
   1576 		    "unable to allocate control data, error = %d\n",
   1577 		    error);
   1578 		goto fail_0;
   1579 	}
   1580 
   1581 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
   1582 		    sc->sc_cd_rseg, sc->sc_cd_size,
   1583 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
   1584 		aprint_error_dev(sc->sc_dev,
   1585 		    "unable to map control data, error = %d\n", error);
   1586 		goto fail_1;
   1587 	}
   1588 
   1589 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
   1590 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
   1591 		aprint_error_dev(sc->sc_dev,
   1592 		    "unable to create control data DMA map, error = %d\n",
   1593 		    error);
   1594 		goto fail_2;
   1595 	}
   1596 
   1597 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
   1598 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
   1599 		aprint_error_dev(sc->sc_dev,
   1600 		    "unable to load control data DMA map, error = %d\n",
   1601 		    error);
   1602 		goto fail_3;
   1603 	}
   1604 
   1605 	/* Create the transmit buffer DMA maps. */
   1606 	WM_TXQUEUELEN(sc) =
   1607 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   1608 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   1609 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   1610 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   1611 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   1612 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
   1613 			aprint_error_dev(sc->sc_dev,
   1614 			    "unable to create Tx DMA map %d, error = %d\n",
   1615 			    i, error);
   1616 			goto fail_4;
   1617 		}
   1618 	}
   1619 
   1620 	/* Create the receive buffer DMA maps. */
   1621 	for (i = 0; i < WM_NRXDESC; i++) {
   1622 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   1623 			    MCLBYTES, 0, 0,
   1624 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
   1625 			aprint_error_dev(sc->sc_dev,
   1626 			    "unable to create Rx DMA map %d error = %d\n",
   1627 			    i, error);
   1628 			goto fail_5;
   1629 		}
   1630 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
   1631 	}
   1632 
   1633 	/* clear interesting stat counters */
   1634 	CSR_READ(sc, WMREG_COLC);
   1635 	CSR_READ(sc, WMREG_RXERRC);
   1636 
   1637 	/* get PHY control from SMBus to PCIe */
   1638 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1639 	    || (sc->sc_type == WM_T_PCH_LPT))
   1640 		wm_smbustopci(sc);
   1641 
   1642 	/* Reset the chip to a known state. */
   1643 	wm_reset(sc);
   1644 
   1645 	/* Get some information about the EEPROM. */
   1646 	switch (sc->sc_type) {
   1647 	case WM_T_82542_2_0:
   1648 	case WM_T_82542_2_1:
   1649 	case WM_T_82543:
   1650 	case WM_T_82544:
   1651 		/* Microwire */
   1652 		sc->sc_nvm_wordsize = 64;
   1653 		sc->sc_nvm_addrbits = 6;
   1654 		break;
   1655 	case WM_T_82540:
   1656 	case WM_T_82545:
   1657 	case WM_T_82545_3:
   1658 	case WM_T_82546:
   1659 	case WM_T_82546_3:
   1660 		/* Microwire */
   1661 		reg = CSR_READ(sc, WMREG_EECD);
   1662 		if (reg & EECD_EE_SIZE) {
   1663 			sc->sc_nvm_wordsize = 256;
   1664 			sc->sc_nvm_addrbits = 8;
   1665 		} else {
   1666 			sc->sc_nvm_wordsize = 64;
   1667 			sc->sc_nvm_addrbits = 6;
   1668 		}
   1669 		sc->sc_flags |= WM_F_LOCK_EECD;
   1670 		break;
   1671 	case WM_T_82541:
   1672 	case WM_T_82541_2:
   1673 	case WM_T_82547:
   1674 	case WM_T_82547_2:
   1675 		reg = CSR_READ(sc, WMREG_EECD);
   1676 		if (reg & EECD_EE_TYPE) {
   1677 			/* SPI */
   1678 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1679 			wm_nvm_set_addrbits_size_eecd(sc);
   1680 		} else {
   1681 			/* Microwire */
   1682 			if ((reg & EECD_EE_ABITS) != 0) {
   1683 				sc->sc_nvm_wordsize = 256;
   1684 				sc->sc_nvm_addrbits = 8;
   1685 			} else {
   1686 				sc->sc_nvm_wordsize = 64;
   1687 				sc->sc_nvm_addrbits = 6;
   1688 			}
   1689 		}
   1690 		sc->sc_flags |= WM_F_LOCK_EECD;
   1691 		break;
   1692 	case WM_T_82571:
   1693 	case WM_T_82572:
   1694 		/* SPI */
   1695 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1696 		wm_nvm_set_addrbits_size_eecd(sc);
   1697 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1698 		break;
   1699 	case WM_T_82573:
   1700 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1701 		/* FALLTHROUGH */
   1702 	case WM_T_82574:
   1703 	case WM_T_82583:
   1704 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1705 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1706 			sc->sc_nvm_wordsize = 2048;
   1707 		} else {
   1708 			/* SPI */
   1709 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1710 			wm_nvm_set_addrbits_size_eecd(sc);
   1711 		}
   1712 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1713 		break;
   1714 	case WM_T_82575:
   1715 	case WM_T_82576:
   1716 	case WM_T_82580:
   1717 	case WM_T_82580ER:
   1718 	case WM_T_I350:
   1719 	case WM_T_I354:
   1720 	case WM_T_80003:
   1721 		/* SPI */
   1722 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1723 		wm_nvm_set_addrbits_size_eecd(sc);
   1724 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1725 		    | WM_F_LOCK_SWSM;
   1726 		break;
   1727 	case WM_T_ICH8:
   1728 	case WM_T_ICH9:
   1729 	case WM_T_ICH10:
   1730 	case WM_T_PCH:
   1731 	case WM_T_PCH2:
   1732 	case WM_T_PCH_LPT:
   1733 		/* FLASH */
   1734 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1735 		sc->sc_nvm_wordsize = 2048;
   1736 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1737 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1738 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
   1739 			aprint_error_dev(sc->sc_dev,
   1740 			    "can't map FLASH registers\n");
   1741 			goto fail_5;
   1742 		}
   1743 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1744 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1745 						ICH_FLASH_SECTOR_SIZE;
   1746 		sc->sc_ich8_flash_bank_size =
   1747 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1748 		sc->sc_ich8_flash_bank_size -=
   1749 		    (reg & ICH_GFPREG_BASE_MASK);
   1750 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1751 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1752 		break;
   1753 	case WM_T_I210:
   1754 	case WM_T_I211:
   1755 		wm_nvm_set_addrbits_size_eecd(sc);
   1756 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1757 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1758 		break;
   1759 	default:
   1760 		break;
   1761 	}
   1762 
   1763 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1764 	switch (sc->sc_type) {
   1765 	case WM_T_82571:
   1766 	case WM_T_82572:
   1767 		reg = CSR_READ(sc, WMREG_SWSM2);
   1768 		if ((reg & SWSM2_LOCK) != 0) {
   1769 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1770 			force_clear_smbi = true;
   1771 		} else
   1772 			force_clear_smbi = false;
   1773 		break;
   1774 	case WM_T_82573:
   1775 	case WM_T_82574:
   1776 	case WM_T_82583:
   1777 		force_clear_smbi = true;
   1778 		break;
   1779 	default:
   1780 		force_clear_smbi = false;
   1781 		break;
   1782 	}
   1783 	if (force_clear_smbi) {
   1784 		reg = CSR_READ(sc, WMREG_SWSM);
   1785 		if ((reg & SWSM_SMBI) != 0)
   1786 			aprint_error_dev(sc->sc_dev,
   1787 			    "Please update the Bootagent\n");
   1788 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1789 	}
   1790 
   1791 	/*
   1792 	 * Defer printing the EEPROM type until after verifying the checksum
   1793 	 * This allows the EEPROM type to be printed correctly in the case
   1794 	 * that no EEPROM is attached.
   1795 	 */
   1796 	/*
   1797 	 * Validate the EEPROM checksum. If the checksum fails, flag
   1798 	 * this for later, so we can fail future reads from the EEPROM.
   1799 	 */
   1800 	if (wm_nvm_validate_checksum(sc)) {
   1801 		/*
   1802 		 * Read twice again because some PCI-e parts fail the
   1803 		 * first check due to the link being in sleep state.
   1804 		 */
   1805 		if (wm_nvm_validate_checksum(sc))
   1806 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   1807 	}
   1808 
   1809 	/* Set device properties (macflags) */
   1810 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   1811 
   1812 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1813 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
   1814 	else {
   1815 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   1816 		    sc->sc_nvm_wordsize);
   1817 		if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
   1818 			aprint_verbose("FLASH(HW)\n");
   1819 		} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   1820 			aprint_verbose("FLASH\n");
   1821 		} else {
   1822 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   1823 				eetype = "SPI";
   1824 			else
   1825 				eetype = "MicroWire";
   1826 			aprint_verbose("(%d address bits) %s EEPROM\n",
   1827 			    sc->sc_nvm_addrbits, eetype);
   1828 		}
   1829 	}
   1830 
   1831 	switch (sc->sc_type) {
   1832 	case WM_T_82571:
   1833 	case WM_T_82572:
   1834 	case WM_T_82573:
   1835 	case WM_T_82574:
   1836 	case WM_T_82583:
   1837 	case WM_T_80003:
   1838 	case WM_T_ICH8:
   1839 	case WM_T_ICH9:
   1840 	case WM_T_ICH10:
   1841 	case WM_T_PCH:
   1842 	case WM_T_PCH2:
   1843 	case WM_T_PCH_LPT:
   1844 		if (wm_check_mng_mode(sc) != 0)
   1845 			wm_get_hw_control(sc);
   1846 		break;
   1847 	default:
   1848 		break;
   1849 	}
   1850 	wm_get_wakeup(sc);
   1851 	/*
   1852 	 * Read the Ethernet address from the EEPROM, if not first found
   1853 	 * in device properties.
   1854 	 */
   1855 	ea = prop_dictionary_get(dict, "mac-address");
   1856 	if (ea != NULL) {
   1857 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   1858 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   1859 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   1860 	} else {
   1861 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   1862 			aprint_error_dev(sc->sc_dev,
   1863 			    "unable to read Ethernet address\n");
   1864 			goto fail_5;
   1865 		}
   1866 	}
   1867 
   1868 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   1869 	    ether_sprintf(enaddr));
   1870 
   1871 	/*
   1872 	 * Read the config info from the EEPROM, and set up various
   1873 	 * bits in the control registers based on their contents.
   1874 	 */
   1875 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   1876 	if (pn != NULL) {
   1877 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1878 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   1879 	} else {
   1880 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   1881 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   1882 			goto fail_5;
   1883 		}
   1884 	}
   1885 
   1886 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   1887 	if (pn != NULL) {
   1888 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1889 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   1890 	} else {
   1891 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   1892 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   1893 			goto fail_5;
   1894 		}
   1895 	}
   1896 
   1897 	/* check for WM_F_WOL */
   1898 	switch (sc->sc_type) {
   1899 	case WM_T_82542_2_0:
   1900 	case WM_T_82542_2_1:
   1901 	case WM_T_82543:
   1902 		/* dummy? */
   1903 		eeprom_data = 0;
   1904 		apme_mask = NVM_CFG3_APME;
   1905 		break;
   1906 	case WM_T_82544:
   1907 		apme_mask = NVM_CFG2_82544_APM_EN;
   1908 		eeprom_data = cfg2;
   1909 		break;
   1910 	case WM_T_82546:
   1911 	case WM_T_82546_3:
   1912 	case WM_T_82571:
   1913 	case WM_T_82572:
   1914 	case WM_T_82573:
   1915 	case WM_T_82574:
   1916 	case WM_T_82583:
   1917 	case WM_T_80003:
   1918 	default:
   1919 		apme_mask = NVM_CFG3_APME;
   1920 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   1921 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   1922 		break;
   1923 	case WM_T_82575:
   1924 	case WM_T_82576:
   1925 	case WM_T_82580:
   1926 	case WM_T_82580ER:
   1927 	case WM_T_I350:
   1928 	case WM_T_I354: /* XXX ok? */
   1929 	case WM_T_ICH8:
   1930 	case WM_T_ICH9:
   1931 	case WM_T_ICH10:
   1932 	case WM_T_PCH:
   1933 	case WM_T_PCH2:
   1934 	case WM_T_PCH_LPT:
   1935 		/* XXX The funcid should be checked on some devices */
   1936 		apme_mask = WUC_APME;
   1937 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   1938 		break;
   1939 	}
   1940 
   1941 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   1942 	if ((eeprom_data & apme_mask) != 0)
   1943 		sc->sc_flags |= WM_F_WOL;
   1944 #ifdef WM_DEBUG
   1945 	if ((sc->sc_flags & WM_F_WOL) != 0)
   1946 		printf("WOL\n");
   1947 #endif
   1948 
   1949 	/*
   1950 	 * XXX need special handling for some multiple port cards
   1951 	 * to disable a paticular port.
   1952 	 */
   1953 
   1954 	if (sc->sc_type >= WM_T_82544) {
   1955 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   1956 		if (pn != NULL) {
   1957 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1958 			swdpin = (uint16_t) prop_number_integer_value(pn);
   1959 		} else {
   1960 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   1961 				aprint_error_dev(sc->sc_dev,
   1962 				    "unable to read SWDPIN\n");
   1963 				goto fail_5;
   1964 			}
   1965 		}
   1966 	}
   1967 
   1968 	if (cfg1 & NVM_CFG1_ILOS)
   1969 		sc->sc_ctrl |= CTRL_ILOS;
   1970 	if (sc->sc_type >= WM_T_82544) {
   1971 		sc->sc_ctrl |=
   1972 		    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   1973 		    CTRL_SWDPIO_SHIFT;
   1974 		sc->sc_ctrl |=
   1975 		    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   1976 		    CTRL_SWDPINS_SHIFT;
   1977 	} else {
   1978 		sc->sc_ctrl |=
   1979 		    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   1980 		    CTRL_SWDPIO_SHIFT;
   1981 	}
   1982 
   1983 #if 0
   1984 	if (sc->sc_type >= WM_T_82544) {
   1985 		if (cfg1 & NVM_CFG1_IPS0)
   1986 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   1987 		if (cfg1 & NVM_CFG1_IPS1)
   1988 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   1989 		sc->sc_ctrl_ext |=
   1990 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   1991 		    CTRL_EXT_SWDPIO_SHIFT;
   1992 		sc->sc_ctrl_ext |=
   1993 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   1994 		    CTRL_EXT_SWDPINS_SHIFT;
   1995 	} else {
   1996 		sc->sc_ctrl_ext |=
   1997 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   1998 		    CTRL_EXT_SWDPIO_SHIFT;
   1999 	}
   2000 #endif
   2001 
   2002 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2003 #if 0
   2004 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2005 #endif
   2006 
   2007 	/*
   2008 	 * Set up some register offsets that are different between
   2009 	 * the i82542 and the i82543 and later chips.
   2010 	 */
   2011 	if (sc->sc_type < WM_T_82543) {
   2012 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   2013 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   2014 	} else {
   2015 		sc->sc_rdt_reg = WMREG_RDT;
   2016 		sc->sc_tdt_reg = WMREG_TDT;
   2017 	}
   2018 
   2019 	if (sc->sc_type == WM_T_PCH) {
   2020 		uint16_t val;
   2021 
   2022 		/* Save the NVM K1 bit setting */
   2023 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2024 
   2025 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2026 			sc->sc_nvm_k1_enabled = 1;
   2027 		else
   2028 			sc->sc_nvm_k1_enabled = 0;
   2029 	}
   2030 
   2031 	/*
   2032 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2033 	 * media structures accordingly.
   2034 	 */
   2035 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2036 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2037 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2038 	    || sc->sc_type == WM_T_82573
   2039 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2040 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2041 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2042 	} else if (sc->sc_type < WM_T_82543 ||
   2043 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2044 		if (sc->sc_mediatype & WMP_F_COPPER) {
   2045 			aprint_error_dev(sc->sc_dev,
   2046 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2047 			sc->sc_mediatype = WMP_F_FIBER;
   2048 		}
   2049 		wm_tbi_mediainit(sc);
   2050 	} else {
   2051 		switch (sc->sc_type) {
   2052 		case WM_T_82575:
   2053 		case WM_T_82576:
   2054 		case WM_T_82580:
   2055 		case WM_T_82580ER:
   2056 		case WM_T_I350:
   2057 		case WM_T_I354:
   2058 		case WM_T_I210:
   2059 		case WM_T_I211:
   2060 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2061 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2062 			switch (link_mode) {
   2063 			case CTRL_EXT_LINK_MODE_1000KX:
   2064 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2065 				sc->sc_mediatype = WMP_F_SERDES;
   2066 				break;
   2067 			case CTRL_EXT_LINK_MODE_SGMII:
   2068 				if (wm_sgmii_uses_mdio(sc)) {
   2069 					aprint_verbose_dev(sc->sc_dev,
   2070 					    "SGMII(MDIO)\n");
   2071 					sc->sc_flags |= WM_F_SGMII;
   2072 					sc->sc_mediatype = WMP_F_COPPER;
   2073 					break;
   2074 				}
   2075 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2076 				/*FALLTHROUGH*/
   2077 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2078 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2079 				if (sc->sc_mediatype == WMP_F_UNKNOWN) {
   2080 					if (link_mode
   2081 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2082 						sc->sc_mediatype
   2083 						    = WMP_F_COPPER;
   2084 						sc->sc_flags |= WM_F_SGMII;
   2085 					} else {
   2086 						sc->sc_mediatype
   2087 						    = WMP_F_SERDES;
   2088 						aprint_verbose_dev(sc->sc_dev,
   2089 						    "SERDES\n");
   2090 					}
   2091 					break;
   2092 				}
   2093 				if (sc->sc_mediatype == WMP_F_SERDES)
   2094 					aprint_verbose_dev(sc->sc_dev,
   2095 					    "SERDES\n");
   2096 
   2097 				/* Change current link mode setting */
   2098 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2099 				switch (sc->sc_mediatype) {
   2100 				case WMP_F_COPPER:
   2101 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2102 					break;
   2103 				case WMP_F_SERDES:
   2104 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2105 					break;
   2106 				default:
   2107 					break;
   2108 				}
   2109 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2110 				break;
   2111 			case CTRL_EXT_LINK_MODE_GMII:
   2112 			default:
   2113 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2114 				sc->sc_mediatype = WMP_F_COPPER;
   2115 				break;
   2116 			}
   2117 
   2118 			reg &= ~CTRL_EXT_I2C_ENA;
   2119 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2120 				reg |= CTRL_EXT_I2C_ENA;
   2121 			else
   2122 				reg &= ~CTRL_EXT_I2C_ENA;
   2123 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2124 
   2125 			if (sc->sc_mediatype == WMP_F_COPPER)
   2126 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2127 			else
   2128 				wm_tbi_mediainit(sc);
   2129 			break;
   2130 		default:
   2131 			if (sc->sc_mediatype & WMP_F_FIBER)
   2132 				aprint_error_dev(sc->sc_dev,
   2133 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2134 			sc->sc_mediatype = WMP_F_COPPER;
   2135 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2136 		}
   2137 	}
   2138 
   2139 	ifp = &sc->sc_ethercom.ec_if;
   2140 	xname = device_xname(sc->sc_dev);
   2141 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2142 	ifp->if_softc = sc;
   2143 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2144 	ifp->if_ioctl = wm_ioctl;
   2145 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2146 		ifp->if_start = wm_nq_start;
   2147 	else
   2148 		ifp->if_start = wm_start;
   2149 	ifp->if_watchdog = wm_watchdog;
   2150 	ifp->if_init = wm_init;
   2151 	ifp->if_stop = wm_stop;
   2152 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2153 	IFQ_SET_READY(&ifp->if_snd);
   2154 
   2155 	/* Check for jumbo frame */
   2156 	switch (sc->sc_type) {
   2157 	case WM_T_82573:
   2158 		/* XXX limited to 9234 if ASPM is disabled */
   2159 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
   2160 		if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
   2161 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2162 		break;
   2163 	case WM_T_82571:
   2164 	case WM_T_82572:
   2165 	case WM_T_82574:
   2166 	case WM_T_82575:
   2167 	case WM_T_82576:
   2168 	case WM_T_82580:
   2169 	case WM_T_82580ER:
   2170 	case WM_T_I350:
   2171 	case WM_T_I354: /* XXXX ok? */
   2172 	case WM_T_I210:
   2173 	case WM_T_I211:
   2174 	case WM_T_80003:
   2175 	case WM_T_ICH9:
   2176 	case WM_T_ICH10:
   2177 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2178 	case WM_T_PCH_LPT:
   2179 		/* XXX limited to 9234 */
   2180 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2181 		break;
   2182 	case WM_T_PCH:
   2183 		/* XXX limited to 4096 */
   2184 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2185 		break;
   2186 	case WM_T_82542_2_0:
   2187 	case WM_T_82542_2_1:
   2188 	case WM_T_82583:
   2189 	case WM_T_ICH8:
   2190 		/* No support for jumbo frame */
   2191 		break;
   2192 	default:
   2193 		/* ETHER_MAX_LEN_JUMBO */
   2194 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2195 		break;
   2196 	}
   2197 
   2198 	/* If we're a i82543 or greater, we can support VLANs. */
   2199 	if (sc->sc_type >= WM_T_82543)
   2200 		sc->sc_ethercom.ec_capabilities |=
   2201 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2202 
   2203 	/*
   2204 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2205 	 * on i82543 and later.
   2206 	 */
   2207 	if (sc->sc_type >= WM_T_82543) {
   2208 		ifp->if_capabilities |=
   2209 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2210 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2211 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2212 		    IFCAP_CSUM_TCPv6_Tx |
   2213 		    IFCAP_CSUM_UDPv6_Tx;
   2214 	}
   2215 
   2216 	/*
   2217 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2218 	 *
   2219 	 *	82541GI (8086:1076) ... no
   2220 	 *	82572EI (8086:10b9) ... yes
   2221 	 */
   2222 	if (sc->sc_type >= WM_T_82571) {
   2223 		ifp->if_capabilities |=
   2224 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2225 	}
   2226 
   2227 	/*
   2228 	 * If we're a i82544 or greater (except i82547), we can do
   2229 	 * TCP segmentation offload.
   2230 	 */
   2231 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2232 		ifp->if_capabilities |= IFCAP_TSOv4;
   2233 	}
   2234 
   2235 	if (sc->sc_type >= WM_T_82571) {
   2236 		ifp->if_capabilities |= IFCAP_TSOv6;
   2237 	}
   2238 
   2239 #ifdef WM_MPSAFE
   2240 	sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2241 	sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2242 #else
   2243 	sc->sc_tx_lock = NULL;
   2244 	sc->sc_rx_lock = NULL;
   2245 #endif
   2246 
   2247 	/* Attach the interface. */
   2248 	if_attach(ifp);
   2249 	ether_ifattach(ifp, enaddr);
   2250 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2251 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2252 			  RND_FLAG_DEFAULT);
   2253 
   2254 #ifdef WM_EVENT_COUNTERS
   2255 	/* Attach event counters. */
   2256 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2257 	    NULL, xname, "txsstall");
   2258 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2259 	    NULL, xname, "txdstall");
   2260 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2261 	    NULL, xname, "txfifo_stall");
   2262 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2263 	    NULL, xname, "txdw");
   2264 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2265 	    NULL, xname, "txqe");
   2266 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2267 	    NULL, xname, "rxintr");
   2268 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2269 	    NULL, xname, "linkintr");
   2270 
   2271 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2272 	    NULL, xname, "rxipsum");
   2273 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2274 	    NULL, xname, "rxtusum");
   2275 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2276 	    NULL, xname, "txipsum");
   2277 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2278 	    NULL, xname, "txtusum");
   2279 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2280 	    NULL, xname, "txtusum6");
   2281 
   2282 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2283 	    NULL, xname, "txtso");
   2284 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2285 	    NULL, xname, "txtso6");
   2286 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2287 	    NULL, xname, "txtsopain");
   2288 
   2289 	for (i = 0; i < WM_NTXSEGS; i++) {
   2290 		snprintf(wm_txseg_evcnt_names[i],
   2291 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2292 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2293 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2294 	}
   2295 
   2296 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2297 	    NULL, xname, "txdrop");
   2298 
   2299 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2300 	    NULL, xname, "tu");
   2301 
   2302 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2303 	    NULL, xname, "tx_xoff");
   2304 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2305 	    NULL, xname, "tx_xon");
   2306 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2307 	    NULL, xname, "rx_xoff");
   2308 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2309 	    NULL, xname, "rx_xon");
   2310 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2311 	    NULL, xname, "rx_macctl");
   2312 #endif /* WM_EVENT_COUNTERS */
   2313 
   2314 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2315 		pmf_class_network_register(self, ifp);
   2316 	else
   2317 		aprint_error_dev(self, "couldn't establish power handler\n");
   2318 
   2319 	sc->sc_flags |= WM_F_ATTACHED;
   2320 	return;
   2321 
   2322 	/*
   2323 	 * Free any resources we've allocated during the failed attach
   2324 	 * attempt.  Do this in reverse order and fall through.
   2325 	 */
   2326  fail_5:
   2327 	for (i = 0; i < WM_NRXDESC; i++) {
   2328 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2329 			bus_dmamap_destroy(sc->sc_dmat,
   2330 			    sc->sc_rxsoft[i].rxs_dmamap);
   2331 	}
   2332  fail_4:
   2333 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2334 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2335 			bus_dmamap_destroy(sc->sc_dmat,
   2336 			    sc->sc_txsoft[i].txs_dmamap);
   2337 	}
   2338 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2339  fail_3:
   2340 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2341  fail_2:
   2342 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2343 	    sc->sc_cd_size);
   2344  fail_1:
   2345 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2346  fail_0:
   2347 	return;
   2348 }
   2349 
   2350 /* The detach function (ca_detach) */
   2351 static int
   2352 wm_detach(device_t self, int flags __unused)
   2353 {
   2354 	struct wm_softc *sc = device_private(self);
   2355 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2356 	int i;
   2357 #ifndef WM_MPSAFE
   2358 	int s;
   2359 #endif
   2360 
   2361 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2362 		return 0;
   2363 
   2364 #ifndef WM_MPSAFE
   2365 	s = splnet();
   2366 #endif
   2367 	/* Stop the interface. Callouts are stopped in it. */
   2368 	wm_stop(ifp, 1);
   2369 
   2370 #ifndef WM_MPSAFE
   2371 	splx(s);
   2372 #endif
   2373 
   2374 	pmf_device_deregister(self);
   2375 
   2376 	/* Tell the firmware about the release */
   2377 	WM_BOTH_LOCK(sc);
   2378 	wm_release_manageability(sc);
   2379 	wm_release_hw_control(sc);
   2380 	WM_BOTH_UNLOCK(sc);
   2381 
   2382 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2383 
   2384 	/* Delete all remaining media. */
   2385 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2386 
   2387 	ether_ifdetach(ifp);
   2388 	if_detach(ifp);
   2389 
   2390 
   2391 	/* Unload RX dmamaps and free mbufs */
   2392 	WM_RX_LOCK(sc);
   2393 	wm_rxdrain(sc);
   2394 	WM_RX_UNLOCK(sc);
   2395 	/* Must unlock here */
   2396 
   2397 	/* Free dmamap. It's the same as the end of the wm_attach() function */
   2398 	for (i = 0; i < WM_NRXDESC; i++) {
   2399 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2400 			bus_dmamap_destroy(sc->sc_dmat,
   2401 			    sc->sc_rxsoft[i].rxs_dmamap);
   2402 	}
   2403 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2404 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2405 			bus_dmamap_destroy(sc->sc_dmat,
   2406 			    sc->sc_txsoft[i].txs_dmamap);
   2407 	}
   2408 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2409 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2410 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2411 	    sc->sc_cd_size);
   2412 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2413 
   2414 	/* Disestablish the interrupt handler */
   2415 	if (sc->sc_ih != NULL) {
   2416 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   2417 		sc->sc_ih = NULL;
   2418 	}
   2419 
   2420 	/* Unmap the registers */
   2421 	if (sc->sc_ss) {
   2422 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2423 		sc->sc_ss = 0;
   2424 	}
   2425 
   2426 	if (sc->sc_ios) {
   2427 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2428 		sc->sc_ios = 0;
   2429 	}
   2430 
   2431 	if (sc->sc_tx_lock)
   2432 		mutex_obj_free(sc->sc_tx_lock);
   2433 	if (sc->sc_rx_lock)
   2434 		mutex_obj_free(sc->sc_rx_lock);
   2435 
   2436 	return 0;
   2437 }
   2438 
   2439 static bool
   2440 wm_suspend(device_t self, const pmf_qual_t *qual)
   2441 {
   2442 	struct wm_softc *sc = device_private(self);
   2443 
   2444 	wm_release_manageability(sc);
   2445 	wm_release_hw_control(sc);
   2446 #ifdef WM_WOL
   2447 	wm_enable_wakeup(sc);
   2448 #endif
   2449 
   2450 	return true;
   2451 }
   2452 
   2453 static bool
   2454 wm_resume(device_t self, const pmf_qual_t *qual)
   2455 {
   2456 	struct wm_softc *sc = device_private(self);
   2457 
   2458 	wm_init_manageability(sc);
   2459 
   2460 	return true;
   2461 }
   2462 
   2463 /*
   2464  * wm_watchdog:		[ifnet interface function]
   2465  *
   2466  *	Watchdog timer handler.
   2467  */
   2468 static void
   2469 wm_watchdog(struct ifnet *ifp)
   2470 {
   2471 	struct wm_softc *sc = ifp->if_softc;
   2472 
   2473 	/*
   2474 	 * Since we're using delayed interrupts, sweep up
   2475 	 * before we report an error.
   2476 	 */
   2477 	WM_TX_LOCK(sc);
   2478 	wm_txintr(sc);
   2479 	WM_TX_UNLOCK(sc);
   2480 
   2481 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
   2482 #ifdef WM_DEBUG
   2483 		int i, j;
   2484 		struct wm_txsoft *txs;
   2485 #endif
   2486 		log(LOG_ERR,
   2487 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2488 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
   2489 		    sc->sc_txnext);
   2490 		ifp->if_oerrors++;
   2491 #ifdef WM_DEBUG
   2492 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
   2493 		    i = WM_NEXTTXS(sc, i)) {
   2494 		    txs = &sc->sc_txsoft[i];
   2495 		    printf("txs %d tx %d -> %d\n",
   2496 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2497 		    for (j = txs->txs_firstdesc; ;
   2498 			j = WM_NEXTTX(sc, j)) {
   2499 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2500 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
   2501 			printf("\t %#08x%08x\n",
   2502 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
   2503 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
   2504 			if (j == txs->txs_lastdesc)
   2505 				break;
   2506 			}
   2507 		}
   2508 #endif
   2509 		/* Reset the interface. */
   2510 		(void) wm_init(ifp);
   2511 	}
   2512 
   2513 	/* Try to get more packets going. */
   2514 	ifp->if_start(ifp);
   2515 }
   2516 
   2517 /*
   2518  * wm_tick:
   2519  *
   2520  *	One second timer, used to check link status, sweep up
   2521  *	completed transmit jobs, etc.
   2522  */
   2523 static void
   2524 wm_tick(void *arg)
   2525 {
   2526 	struct wm_softc *sc = arg;
   2527 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2528 #ifndef WM_MPSAFE
   2529 	int s;
   2530 
   2531 	s = splnet();
   2532 #endif
   2533 
   2534 	WM_TX_LOCK(sc);
   2535 
   2536 	if (sc->sc_stopping)
   2537 		goto out;
   2538 
   2539 	if (sc->sc_type >= WM_T_82542_2_1) {
   2540 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2541 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2542 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2543 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2544 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2545 	}
   2546 
   2547 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2548 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2549 	    + CSR_READ(sc, WMREG_CRCERRS)
   2550 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2551 	    + CSR_READ(sc, WMREG_SYMERRC)
   2552 	    + CSR_READ(sc, WMREG_RXERRC)
   2553 	    + CSR_READ(sc, WMREG_SEC)
   2554 	    + CSR_READ(sc, WMREG_CEXTERR)
   2555 	    + CSR_READ(sc, WMREG_RLEC);
   2556 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2557 
   2558 	if (sc->sc_flags & WM_F_HAS_MII)
   2559 		mii_tick(&sc->sc_mii);
   2560 	else
   2561 		wm_tbi_check_link(sc);
   2562 
   2563 out:
   2564 	WM_TX_UNLOCK(sc);
   2565 #ifndef WM_MPSAFE
   2566 	splx(s);
   2567 #endif
   2568 
   2569 	if (!sc->sc_stopping)
   2570 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2571 }
   2572 
   2573 static int
   2574 wm_ifflags_cb(struct ethercom *ec)
   2575 {
   2576 	struct ifnet *ifp = &ec->ec_if;
   2577 	struct wm_softc *sc = ifp->if_softc;
   2578 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2579 	int rc = 0;
   2580 
   2581 	WM_BOTH_LOCK(sc);
   2582 
   2583 	if (change != 0)
   2584 		sc->sc_if_flags = ifp->if_flags;
   2585 
   2586 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   2587 		rc = ENETRESET;
   2588 		goto out;
   2589 	}
   2590 
   2591 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2592 		wm_set_filter(sc);
   2593 
   2594 	wm_set_vlan(sc);
   2595 
   2596 out:
   2597 	WM_BOTH_UNLOCK(sc);
   2598 
   2599 	return rc;
   2600 }
   2601 
   2602 /*
   2603  * wm_ioctl:		[ifnet interface function]
   2604  *
   2605  *	Handle control requests from the operator.
   2606  */
   2607 static int
   2608 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2609 {
   2610 	struct wm_softc *sc = ifp->if_softc;
   2611 	struct ifreq *ifr = (struct ifreq *) data;
   2612 	struct ifaddr *ifa = (struct ifaddr *)data;
   2613 	struct sockaddr_dl *sdl;
   2614 	int s, error;
   2615 
   2616 #ifndef WM_MPSAFE
   2617 	s = splnet();
   2618 #endif
   2619 	WM_BOTH_LOCK(sc);
   2620 
   2621 	switch (cmd) {
   2622 	case SIOCSIFMEDIA:
   2623 	case SIOCGIFMEDIA:
   2624 		/* Flow control requires full-duplex mode. */
   2625 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2626 		    (ifr->ifr_media & IFM_FDX) == 0)
   2627 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2628 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2629 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2630 				/* We can do both TXPAUSE and RXPAUSE. */
   2631 				ifr->ifr_media |=
   2632 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2633 			}
   2634 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2635 		}
   2636 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2637 		break;
   2638 	case SIOCINITIFADDR:
   2639 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2640 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2641 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2642 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2643 			/* unicast address is first multicast entry */
   2644 			wm_set_filter(sc);
   2645 			error = 0;
   2646 			break;
   2647 		}
   2648 		/*FALLTHROUGH*/
   2649 	default:
   2650 		WM_BOTH_UNLOCK(sc);
   2651 #ifdef WM_MPSAFE
   2652 		s = splnet();
   2653 #endif
   2654 		/* It may call wm_start, so unlock here */
   2655 		error = ether_ioctl(ifp, cmd, data);
   2656 #ifdef WM_MPSAFE
   2657 		splx(s);
   2658 #endif
   2659 		WM_BOTH_LOCK(sc);
   2660 
   2661 		if (error != ENETRESET)
   2662 			break;
   2663 
   2664 		error = 0;
   2665 
   2666 		if (cmd == SIOCSIFCAP) {
   2667 			WM_BOTH_UNLOCK(sc);
   2668 			error = (*ifp->if_init)(ifp);
   2669 			WM_BOTH_LOCK(sc);
   2670 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2671 			;
   2672 		else if (ifp->if_flags & IFF_RUNNING) {
   2673 			/*
   2674 			 * Multicast list has changed; set the hardware filter
   2675 			 * accordingly.
   2676 			 */
   2677 			wm_set_filter(sc);
   2678 		}
   2679 		break;
   2680 	}
   2681 
   2682 	WM_BOTH_UNLOCK(sc);
   2683 
   2684 	/* Try to get more packets going. */
   2685 	ifp->if_start(ifp);
   2686 
   2687 #ifndef WM_MPSAFE
   2688 	splx(s);
   2689 #endif
   2690 	return error;
   2691 }
   2692 
   2693 /* MAC address related */
   2694 
   2695 static int
   2696 wm_check_alt_mac_addr(struct wm_softc *sc)
   2697 {
   2698 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2699 	uint16_t offset = NVM_OFF_MACADDR;
   2700 
   2701 	/* Try to read alternative MAC address pointer */
   2702 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2703 		return -1;
   2704 
   2705 	/* Check pointer */
   2706 	if (offset == 0xffff)
   2707 		return -1;
   2708 
   2709 	/*
   2710 	 * Check whether alternative MAC address is valid or not.
   2711 	 * Some cards have non 0xffff pointer but those don't use
   2712 	 * alternative MAC address in reality.
   2713 	 *
   2714 	 * Check whether the broadcast bit is set or not.
   2715 	 */
   2716 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2717 		if (((myea[0] & 0xff) & 0x01) == 0)
   2718 			return 0; /* found! */
   2719 
   2720 	/* not found */
   2721 	return -1;
   2722 }
   2723 
   2724 static int
   2725 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2726 {
   2727 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2728 	uint16_t offset = NVM_OFF_MACADDR;
   2729 	int do_invert = 0;
   2730 
   2731 	switch (sc->sc_type) {
   2732 	case WM_T_82580:
   2733 	case WM_T_82580ER:
   2734 	case WM_T_I350:
   2735 	case WM_T_I354:
   2736 		switch (sc->sc_funcid) {
   2737 		case 0:
   2738 			/* default value (== NVM_OFF_MACADDR) */
   2739 			break;
   2740 		case 1:
   2741 			offset = NVM_OFF_LAN1;
   2742 			break;
   2743 		case 2:
   2744 			offset = NVM_OFF_LAN2;
   2745 			break;
   2746 		case 3:
   2747 			offset = NVM_OFF_LAN3;
   2748 			break;
   2749 		default:
   2750 			goto bad;
   2751 			/* NOTREACHED */
   2752 			break;
   2753 		}
   2754 		break;
   2755 	case WM_T_82571:
   2756 	case WM_T_82575:
   2757 	case WM_T_82576:
   2758 	case WM_T_80003:
   2759 	case WM_T_I210:
   2760 	case WM_T_I211:
   2761 		if (wm_check_alt_mac_addr(sc) != 0) {
   2762 			/* reset the offset to LAN0 */
   2763 			offset = NVM_OFF_MACADDR;
   2764 			if ((sc->sc_funcid & 0x01) == 1)
   2765 				do_invert = 1;
   2766 			goto do_read;
   2767 		}
   2768 		switch (sc->sc_funcid) {
   2769 		case 0:
   2770 			/*
   2771 			 * The offset is the value in NVM_OFF_ALT_MAC_ADDR_PTR
   2772 			 * itself.
   2773 			 */
   2774 			break;
   2775 		case 1:
   2776 			offset += NVM_OFF_MACADDR_LAN1;
   2777 			break;
   2778 		case 2:
   2779 			offset += NVM_OFF_MACADDR_LAN2;
   2780 			break;
   2781 		case 3:
   2782 			offset += NVM_OFF_MACADDR_LAN3;
   2783 			break;
   2784 		default:
   2785 			goto bad;
   2786 			/* NOTREACHED */
   2787 			break;
   2788 		}
   2789 		break;
   2790 	default:
   2791 		if ((sc->sc_funcid & 0x01) == 1)
   2792 			do_invert = 1;
   2793 		break;
   2794 	}
   2795 
   2796  do_read:
   2797 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2798 		myea) != 0) {
   2799 		goto bad;
   2800 	}
   2801 
   2802 	enaddr[0] = myea[0] & 0xff;
   2803 	enaddr[1] = myea[0] >> 8;
   2804 	enaddr[2] = myea[1] & 0xff;
   2805 	enaddr[3] = myea[1] >> 8;
   2806 	enaddr[4] = myea[2] & 0xff;
   2807 	enaddr[5] = myea[2] >> 8;
   2808 
   2809 	/*
   2810 	 * Toggle the LSB of the MAC address on the second port
   2811 	 * of some dual port cards.
   2812 	 */
   2813 	if (do_invert != 0)
   2814 		enaddr[5] ^= 1;
   2815 
   2816 	return 0;
   2817 
   2818  bad:
   2819 	return -1;
   2820 }
   2821 
   2822 /*
   2823  * wm_set_ral:
   2824  *
   2825  *	Set an entery in the receive address list.
   2826  */
   2827 static void
   2828 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2829 {
   2830 	uint32_t ral_lo, ral_hi;
   2831 
   2832 	if (enaddr != NULL) {
   2833 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2834 		    (enaddr[3] << 24);
   2835 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2836 		ral_hi |= RAL_AV;
   2837 	} else {
   2838 		ral_lo = 0;
   2839 		ral_hi = 0;
   2840 	}
   2841 
   2842 	if (sc->sc_type >= WM_T_82544) {
   2843 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2844 		    ral_lo);
   2845 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2846 		    ral_hi);
   2847 	} else {
   2848 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2849 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2850 	}
   2851 }
   2852 
   2853 /*
   2854  * wm_mchash:
   2855  *
   2856  *	Compute the hash of the multicast address for the 4096-bit
   2857  *	multicast filter.
   2858  */
   2859 static uint32_t
   2860 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2861 {
   2862 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2863 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2864 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   2865 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   2866 	uint32_t hash;
   2867 
   2868 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2869 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2870 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   2871 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   2872 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   2873 		return (hash & 0x3ff);
   2874 	}
   2875 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   2876 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   2877 
   2878 	return (hash & 0xfff);
   2879 }
   2880 
   2881 /*
   2882  * wm_set_filter:
   2883  *
   2884  *	Set up the receive filter.
   2885  */
   2886 static void
   2887 wm_set_filter(struct wm_softc *sc)
   2888 {
   2889 	struct ethercom *ec = &sc->sc_ethercom;
   2890 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2891 	struct ether_multi *enm;
   2892 	struct ether_multistep step;
   2893 	bus_addr_t mta_reg;
   2894 	uint32_t hash, reg, bit;
   2895 	int i, size;
   2896 
   2897 	if (sc->sc_type >= WM_T_82544)
   2898 		mta_reg = WMREG_CORDOVA_MTA;
   2899 	else
   2900 		mta_reg = WMREG_MTA;
   2901 
   2902 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   2903 
   2904 	if (ifp->if_flags & IFF_BROADCAST)
   2905 		sc->sc_rctl |= RCTL_BAM;
   2906 	if (ifp->if_flags & IFF_PROMISC) {
   2907 		sc->sc_rctl |= RCTL_UPE;
   2908 		goto allmulti;
   2909 	}
   2910 
   2911 	/*
   2912 	 * Set the station address in the first RAL slot, and
   2913 	 * clear the remaining slots.
   2914 	 */
   2915 	if (sc->sc_type == WM_T_ICH8)
   2916 		size = WM_RAL_TABSIZE_ICH8 -1;
   2917 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   2918 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   2919 	    || (sc->sc_type == WM_T_PCH_LPT))
   2920 		size = WM_RAL_TABSIZE_ICH8;
   2921 	else if (sc->sc_type == WM_T_82575)
   2922 		size = WM_RAL_TABSIZE_82575;
   2923 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   2924 		size = WM_RAL_TABSIZE_82576;
   2925 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2926 		size = WM_RAL_TABSIZE_I350;
   2927 	else
   2928 		size = WM_RAL_TABSIZE;
   2929 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   2930 	for (i = 1; i < size; i++)
   2931 		wm_set_ral(sc, NULL, i);
   2932 
   2933 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2934 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2935 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   2936 		size = WM_ICH8_MC_TABSIZE;
   2937 	else
   2938 		size = WM_MC_TABSIZE;
   2939 	/* Clear out the multicast table. */
   2940 	for (i = 0; i < size; i++)
   2941 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   2942 
   2943 	ETHER_FIRST_MULTI(step, ec, enm);
   2944 	while (enm != NULL) {
   2945 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2946 			/*
   2947 			 * We must listen to a range of multicast addresses.
   2948 			 * For now, just accept all multicasts, rather than
   2949 			 * trying to set only those filter bits needed to match
   2950 			 * the range.  (At this time, the only use of address
   2951 			 * ranges is for IP multicast routing, for which the
   2952 			 * range is big enough to require all bits set.)
   2953 			 */
   2954 			goto allmulti;
   2955 		}
   2956 
   2957 		hash = wm_mchash(sc, enm->enm_addrlo);
   2958 
   2959 		reg = (hash >> 5);
   2960 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2961 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2962 		    || (sc->sc_type == WM_T_PCH2)
   2963 		    || (sc->sc_type == WM_T_PCH_LPT))
   2964 			reg &= 0x1f;
   2965 		else
   2966 			reg &= 0x7f;
   2967 		bit = hash & 0x1f;
   2968 
   2969 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   2970 		hash |= 1U << bit;
   2971 
   2972 		/* XXX Hardware bug?? */
   2973 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   2974 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   2975 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2976 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   2977 		} else
   2978 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2979 
   2980 		ETHER_NEXT_MULTI(step, enm);
   2981 	}
   2982 
   2983 	ifp->if_flags &= ~IFF_ALLMULTI;
   2984 	goto setit;
   2985 
   2986  allmulti:
   2987 	ifp->if_flags |= IFF_ALLMULTI;
   2988 	sc->sc_rctl |= RCTL_MPE;
   2989 
   2990  setit:
   2991 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   2992 }
   2993 
   2994 /* Reset and init related */
   2995 
   2996 static void
   2997 wm_set_vlan(struct wm_softc *sc)
   2998 {
   2999 	/* Deal with VLAN enables. */
   3000 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3001 		sc->sc_ctrl |= CTRL_VME;
   3002 	else
   3003 		sc->sc_ctrl &= ~CTRL_VME;
   3004 
   3005 	/* Write the control registers. */
   3006 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3007 }
   3008 
   3009 static void
   3010 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3011 {
   3012 	uint32_t gcr;
   3013 	pcireg_t ctrl2;
   3014 
   3015 	gcr = CSR_READ(sc, WMREG_GCR);
   3016 
   3017 	/* Only take action if timeout value is defaulted to 0 */
   3018 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3019 		goto out;
   3020 
   3021 	if ((gcr & GCR_CAP_VER2) == 0) {
   3022 		gcr |= GCR_CMPL_TMOUT_10MS;
   3023 		goto out;
   3024 	}
   3025 
   3026 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3027 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3028 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3029 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3030 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3031 
   3032 out:
   3033 	/* Disable completion timeout resend */
   3034 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3035 
   3036 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3037 }
   3038 
   3039 void
   3040 wm_get_auto_rd_done(struct wm_softc *sc)
   3041 {
   3042 	int i;
   3043 
   3044 	/* wait for eeprom to reload */
   3045 	switch (sc->sc_type) {
   3046 	case WM_T_82571:
   3047 	case WM_T_82572:
   3048 	case WM_T_82573:
   3049 	case WM_T_82574:
   3050 	case WM_T_82583:
   3051 	case WM_T_82575:
   3052 	case WM_T_82576:
   3053 	case WM_T_82580:
   3054 	case WM_T_82580ER:
   3055 	case WM_T_I350:
   3056 	case WM_T_I354:
   3057 	case WM_T_I210:
   3058 	case WM_T_I211:
   3059 	case WM_T_80003:
   3060 	case WM_T_ICH8:
   3061 	case WM_T_ICH9:
   3062 		for (i = 0; i < 10; i++) {
   3063 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3064 				break;
   3065 			delay(1000);
   3066 		}
   3067 		if (i == 10) {
   3068 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3069 			    "complete\n", device_xname(sc->sc_dev));
   3070 		}
   3071 		break;
   3072 	default:
   3073 		break;
   3074 	}
   3075 }
   3076 
   3077 void
   3078 wm_lan_init_done(struct wm_softc *sc)
   3079 {
   3080 	uint32_t reg = 0;
   3081 	int i;
   3082 
   3083 	/* wait for eeprom to reload */
   3084 	switch (sc->sc_type) {
   3085 	case WM_T_ICH10:
   3086 	case WM_T_PCH:
   3087 	case WM_T_PCH2:
   3088 	case WM_T_PCH_LPT:
   3089 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3090 			reg = CSR_READ(sc, WMREG_STATUS);
   3091 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3092 				break;
   3093 			delay(100);
   3094 		}
   3095 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3096 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3097 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3098 		}
   3099 		break;
   3100 	default:
   3101 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3102 		    __func__);
   3103 		break;
   3104 	}
   3105 
   3106 	reg &= ~STATUS_LAN_INIT_DONE;
   3107 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3108 }
   3109 
   3110 void
   3111 wm_get_cfg_done(struct wm_softc *sc)
   3112 {
   3113 	int mask;
   3114 	uint32_t reg;
   3115 	int i;
   3116 
   3117 	/* wait for eeprom to reload */
   3118 	switch (sc->sc_type) {
   3119 	case WM_T_82542_2_0:
   3120 	case WM_T_82542_2_1:
   3121 		/* null */
   3122 		break;
   3123 	case WM_T_82543:
   3124 	case WM_T_82544:
   3125 	case WM_T_82540:
   3126 	case WM_T_82545:
   3127 	case WM_T_82545_3:
   3128 	case WM_T_82546:
   3129 	case WM_T_82546_3:
   3130 	case WM_T_82541:
   3131 	case WM_T_82541_2:
   3132 	case WM_T_82547:
   3133 	case WM_T_82547_2:
   3134 	case WM_T_82573:
   3135 	case WM_T_82574:
   3136 	case WM_T_82583:
   3137 		/* generic */
   3138 		delay(10*1000);
   3139 		break;
   3140 	case WM_T_80003:
   3141 	case WM_T_82571:
   3142 	case WM_T_82572:
   3143 	case WM_T_82575:
   3144 	case WM_T_82576:
   3145 	case WM_T_82580:
   3146 	case WM_T_82580ER:
   3147 	case WM_T_I350:
   3148 	case WM_T_I354:
   3149 	case WM_T_I210:
   3150 	case WM_T_I211:
   3151 		if (sc->sc_type == WM_T_82571) {
   3152 			/* Only 82571 shares port 0 */
   3153 			mask = EEMNGCTL_CFGDONE_0;
   3154 		} else
   3155 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3156 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3157 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3158 				break;
   3159 			delay(1000);
   3160 		}
   3161 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3162 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3163 				device_xname(sc->sc_dev), __func__));
   3164 		}
   3165 		break;
   3166 	case WM_T_ICH8:
   3167 	case WM_T_ICH9:
   3168 	case WM_T_ICH10:
   3169 	case WM_T_PCH:
   3170 	case WM_T_PCH2:
   3171 	case WM_T_PCH_LPT:
   3172 		delay(10*1000);
   3173 		if (sc->sc_type >= WM_T_ICH10)
   3174 			wm_lan_init_done(sc);
   3175 		else
   3176 			wm_get_auto_rd_done(sc);
   3177 
   3178 		reg = CSR_READ(sc, WMREG_STATUS);
   3179 		if ((reg & STATUS_PHYRA) != 0)
   3180 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3181 		break;
   3182 	default:
   3183 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3184 		    __func__);
   3185 		break;
   3186 	}
   3187 }
   3188 
   3189 /*
   3190  * wm_reset:
   3191  *
   3192  *	Reset the i82542 chip.
   3193  */
   3194 static void
   3195 wm_reset(struct wm_softc *sc)
   3196 {
   3197 	int phy_reset = 0;
   3198 	int error = 0;
   3199 	uint32_t reg, mask;
   3200 
   3201 	/*
   3202 	 * Allocate on-chip memory according to the MTU size.
   3203 	 * The Packet Buffer Allocation register must be written
   3204 	 * before the chip is reset.
   3205 	 */
   3206 	switch (sc->sc_type) {
   3207 	case WM_T_82547:
   3208 	case WM_T_82547_2:
   3209 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3210 		    PBA_22K : PBA_30K;
   3211 		sc->sc_txfifo_head = 0;
   3212 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3213 		sc->sc_txfifo_size =
   3214 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3215 		sc->sc_txfifo_stall = 0;
   3216 		break;
   3217 	case WM_T_82571:
   3218 	case WM_T_82572:
   3219 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3220 	case WM_T_I350:
   3221 	case WM_T_I354:
   3222 	case WM_T_80003:
   3223 		sc->sc_pba = PBA_32K;
   3224 		break;
   3225 	case WM_T_82580:
   3226 	case WM_T_82580ER:
   3227 		sc->sc_pba = PBA_35K;
   3228 		break;
   3229 	case WM_T_I210:
   3230 	case WM_T_I211:
   3231 		sc->sc_pba = PBA_34K;
   3232 		break;
   3233 	case WM_T_82576:
   3234 		sc->sc_pba = PBA_64K;
   3235 		break;
   3236 	case WM_T_82573:
   3237 		sc->sc_pba = PBA_12K;
   3238 		break;
   3239 	case WM_T_82574:
   3240 	case WM_T_82583:
   3241 		sc->sc_pba = PBA_20K;
   3242 		break;
   3243 	case WM_T_ICH8:
   3244 		sc->sc_pba = PBA_8K;
   3245 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3246 		break;
   3247 	case WM_T_ICH9:
   3248 	case WM_T_ICH10:
   3249 		sc->sc_pba = PBA_10K;
   3250 		break;
   3251 	case WM_T_PCH:
   3252 	case WM_T_PCH2:
   3253 	case WM_T_PCH_LPT:
   3254 		sc->sc_pba = PBA_26K;
   3255 		break;
   3256 	default:
   3257 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3258 		    PBA_40K : PBA_48K;
   3259 		break;
   3260 	}
   3261 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3262 
   3263 	/* Prevent the PCI-E bus from sticking */
   3264 	if (sc->sc_flags & WM_F_PCIE) {
   3265 		int timeout = 800;
   3266 
   3267 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3268 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3269 
   3270 		while (timeout--) {
   3271 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3272 			    == 0)
   3273 				break;
   3274 			delay(100);
   3275 		}
   3276 	}
   3277 
   3278 	/* Set the completion timeout for interface */
   3279 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3280 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   3281 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3282 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3283 		wm_set_pcie_completion_timeout(sc);
   3284 
   3285 	/* Clear interrupt */
   3286 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3287 
   3288 	/* Stop the transmit and receive processes. */
   3289 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3290 	sc->sc_rctl &= ~RCTL_EN;
   3291 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3292 	CSR_WRITE_FLUSH(sc);
   3293 
   3294 	/* XXX set_tbi_sbp_82543() */
   3295 
   3296 	delay(10*1000);
   3297 
   3298 	/* Must acquire the MDIO ownership before MAC reset */
   3299 	switch (sc->sc_type) {
   3300 	case WM_T_82573:
   3301 	case WM_T_82574:
   3302 	case WM_T_82583:
   3303 		error = wm_get_hw_semaphore_82573(sc);
   3304 		break;
   3305 	default:
   3306 		break;
   3307 	}
   3308 
   3309 	/*
   3310 	 * 82541 Errata 29? & 82547 Errata 28?
   3311 	 * See also the description about PHY_RST bit in CTRL register
   3312 	 * in 8254x_GBe_SDM.pdf.
   3313 	 */
   3314 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3315 		CSR_WRITE(sc, WMREG_CTRL,
   3316 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3317 		CSR_WRITE_FLUSH(sc);
   3318 		delay(5000);
   3319 	}
   3320 
   3321 	switch (sc->sc_type) {
   3322 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3323 	case WM_T_82541:
   3324 	case WM_T_82541_2:
   3325 	case WM_T_82547:
   3326 	case WM_T_82547_2:
   3327 		/*
   3328 		 * On some chipsets, a reset through a memory-mapped write
   3329 		 * cycle can cause the chip to reset before completing the
   3330 		 * write cycle.  This causes major headache that can be
   3331 		 * avoided by issuing the reset via indirect register writes
   3332 		 * through I/O space.
   3333 		 *
   3334 		 * So, if we successfully mapped the I/O BAR at attach time,
   3335 		 * use that.  Otherwise, try our luck with a memory-mapped
   3336 		 * reset.
   3337 		 */
   3338 		if (sc->sc_flags & WM_F_IOH_VALID)
   3339 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3340 		else
   3341 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3342 		break;
   3343 	case WM_T_82545_3:
   3344 	case WM_T_82546_3:
   3345 		/* Use the shadow control register on these chips. */
   3346 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3347 		break;
   3348 	case WM_T_80003:
   3349 		mask = swfwphysem[sc->sc_funcid];
   3350 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3351 		wm_get_swfw_semaphore(sc, mask);
   3352 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3353 		wm_put_swfw_semaphore(sc, mask);
   3354 		break;
   3355 	case WM_T_ICH8:
   3356 	case WM_T_ICH9:
   3357 	case WM_T_ICH10:
   3358 	case WM_T_PCH:
   3359 	case WM_T_PCH2:
   3360 	case WM_T_PCH_LPT:
   3361 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3362 		if (wm_check_reset_block(sc) == 0) {
   3363 			/*
   3364 			 * Gate automatic PHY configuration by hardware on
   3365 			 * non-managed 82579
   3366 			 */
   3367 			if ((sc->sc_type == WM_T_PCH2)
   3368 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3369 				!= 0))
   3370 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3371 
   3372 
   3373 			reg |= CTRL_PHY_RESET;
   3374 			phy_reset = 1;
   3375 		}
   3376 		wm_get_swfwhw_semaphore(sc);
   3377 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3378 		/* Don't insert a completion barrier when reset */
   3379 		delay(20*1000);
   3380 		wm_put_swfwhw_semaphore(sc);
   3381 		break;
   3382 	case WM_T_82542_2_0:
   3383 	case WM_T_82542_2_1:
   3384 	case WM_T_82543:
   3385 	case WM_T_82540:
   3386 	case WM_T_82545:
   3387 	case WM_T_82546:
   3388 	case WM_T_82571:
   3389 	case WM_T_82572:
   3390 	case WM_T_82573:
   3391 	case WM_T_82574:
   3392 	case WM_T_82575:
   3393 	case WM_T_82576:
   3394 	case WM_T_82580:
   3395 	case WM_T_82580ER:
   3396 	case WM_T_82583:
   3397 	case WM_T_I350:
   3398 	case WM_T_I354:
   3399 	case WM_T_I210:
   3400 	case WM_T_I211:
   3401 	default:
   3402 		/* Everything else can safely use the documented method. */
   3403 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3404 		break;
   3405 	}
   3406 
   3407 	/* Must release the MDIO ownership after MAC reset */
   3408 	switch (sc->sc_type) {
   3409 	case WM_T_82573:
   3410 	case WM_T_82574:
   3411 	case WM_T_82583:
   3412 		if (error == 0)
   3413 			wm_put_hw_semaphore_82573(sc);
   3414 		break;
   3415 	default:
   3416 		break;
   3417 	}
   3418 
   3419 	if (phy_reset != 0)
   3420 		wm_get_cfg_done(sc);
   3421 
   3422 	/* reload EEPROM */
   3423 	switch (sc->sc_type) {
   3424 	case WM_T_82542_2_0:
   3425 	case WM_T_82542_2_1:
   3426 	case WM_T_82543:
   3427 	case WM_T_82544:
   3428 		delay(10);
   3429 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3430 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3431 		CSR_WRITE_FLUSH(sc);
   3432 		delay(2000);
   3433 		break;
   3434 	case WM_T_82540:
   3435 	case WM_T_82545:
   3436 	case WM_T_82545_3:
   3437 	case WM_T_82546:
   3438 	case WM_T_82546_3:
   3439 		delay(5*1000);
   3440 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3441 		break;
   3442 	case WM_T_82541:
   3443 	case WM_T_82541_2:
   3444 	case WM_T_82547:
   3445 	case WM_T_82547_2:
   3446 		delay(20000);
   3447 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3448 		break;
   3449 	case WM_T_82571:
   3450 	case WM_T_82572:
   3451 	case WM_T_82573:
   3452 	case WM_T_82574:
   3453 	case WM_T_82583:
   3454 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3455 			delay(10);
   3456 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3457 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3458 			CSR_WRITE_FLUSH(sc);
   3459 		}
   3460 		/* check EECD_EE_AUTORD */
   3461 		wm_get_auto_rd_done(sc);
   3462 		/*
   3463 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3464 		 * is set.
   3465 		 */
   3466 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3467 		    || (sc->sc_type == WM_T_82583))
   3468 			delay(25*1000);
   3469 		break;
   3470 	case WM_T_82575:
   3471 	case WM_T_82576:
   3472 	case WM_T_82580:
   3473 	case WM_T_82580ER:
   3474 	case WM_T_I350:
   3475 	case WM_T_I354:
   3476 	case WM_T_I210:
   3477 	case WM_T_I211:
   3478 	case WM_T_80003:
   3479 		/* check EECD_EE_AUTORD */
   3480 		wm_get_auto_rd_done(sc);
   3481 		break;
   3482 	case WM_T_ICH8:
   3483 	case WM_T_ICH9:
   3484 	case WM_T_ICH10:
   3485 	case WM_T_PCH:
   3486 	case WM_T_PCH2:
   3487 	case WM_T_PCH_LPT:
   3488 		break;
   3489 	default:
   3490 		panic("%s: unknown type\n", __func__);
   3491 	}
   3492 
   3493 	/* Check whether EEPROM is present or not */
   3494 	switch (sc->sc_type) {
   3495 	case WM_T_82575:
   3496 	case WM_T_82576:
   3497 #if 0 /* XXX */
   3498 	case WM_T_82580:
   3499 	case WM_T_82580ER:
   3500 #endif
   3501 	case WM_T_I350:
   3502 	case WM_T_I354:
   3503 	case WM_T_ICH8:
   3504 	case WM_T_ICH9:
   3505 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3506 			/* Not found */
   3507 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3508 			if ((sc->sc_type == WM_T_82575)
   3509 			    || (sc->sc_type == WM_T_82576)
   3510 			    || (sc->sc_type == WM_T_82580)
   3511 			    || (sc->sc_type == WM_T_82580ER)
   3512 			    || (sc->sc_type == WM_T_I350)
   3513 			    || (sc->sc_type == WM_T_I354))
   3514 				wm_reset_init_script_82575(sc);
   3515 		}
   3516 		break;
   3517 	default:
   3518 		break;
   3519 	}
   3520 
   3521 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   3522 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3523 		/* clear global device reset status bit */
   3524 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3525 	}
   3526 
   3527 	/* Clear any pending interrupt events. */
   3528 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3529 	reg = CSR_READ(sc, WMREG_ICR);
   3530 
   3531 	/* reload sc_ctrl */
   3532 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3533 
   3534 	if (sc->sc_type == WM_T_I350)
   3535 		wm_set_eee_i350(sc);
   3536 
   3537 	/* dummy read from WUC */
   3538 	if (sc->sc_type == WM_T_PCH)
   3539 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   3540 	/*
   3541 	 * For PCH, this write will make sure that any noise will be detected
   3542 	 * as a CRC error and be dropped rather than show up as a bad packet
   3543 	 * to the DMA engine
   3544 	 */
   3545 	if (sc->sc_type == WM_T_PCH)
   3546 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   3547 
   3548 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3549 		CSR_WRITE(sc, WMREG_WUC, 0);
   3550 
   3551 	/* XXX need special handling for 82580 */
   3552 }
   3553 
   3554 /*
   3555  * wm_add_rxbuf:
   3556  *
   3557  *	Add a receive buffer to the indiciated descriptor.
   3558  */
   3559 static int
   3560 wm_add_rxbuf(struct wm_softc *sc, int idx)
   3561 {
   3562 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   3563 	struct mbuf *m;
   3564 	int error;
   3565 
   3566 	KASSERT(WM_RX_LOCKED(sc));
   3567 
   3568 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3569 	if (m == NULL)
   3570 		return ENOBUFS;
   3571 
   3572 	MCLGET(m, M_DONTWAIT);
   3573 	if ((m->m_flags & M_EXT) == 0) {
   3574 		m_freem(m);
   3575 		return ENOBUFS;
   3576 	}
   3577 
   3578 	if (rxs->rxs_mbuf != NULL)
   3579 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3580 
   3581 	rxs->rxs_mbuf = m;
   3582 
   3583 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3584 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3585 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3586 	if (error) {
   3587 		/* XXX XXX XXX */
   3588 		aprint_error_dev(sc->sc_dev,
   3589 		    "unable to load rx DMA map %d, error = %d\n",
   3590 		    idx, error);
   3591 		panic("wm_add_rxbuf");
   3592 	}
   3593 
   3594 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3595 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3596 
   3597 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3598 		if ((sc->sc_rctl & RCTL_EN) != 0)
   3599 			WM_INIT_RXDESC(sc, idx);
   3600 	} else
   3601 		WM_INIT_RXDESC(sc, idx);
   3602 
   3603 	return 0;
   3604 }
   3605 
   3606 /*
   3607  * wm_rxdrain:
   3608  *
   3609  *	Drain the receive queue.
   3610  */
   3611 static void
   3612 wm_rxdrain(struct wm_softc *sc)
   3613 {
   3614 	struct wm_rxsoft *rxs;
   3615 	int i;
   3616 
   3617 	KASSERT(WM_RX_LOCKED(sc));
   3618 
   3619 	for (i = 0; i < WM_NRXDESC; i++) {
   3620 		rxs = &sc->sc_rxsoft[i];
   3621 		if (rxs->rxs_mbuf != NULL) {
   3622 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3623 			m_freem(rxs->rxs_mbuf);
   3624 			rxs->rxs_mbuf = NULL;
   3625 		}
   3626 	}
   3627 }
   3628 
   3629 /*
   3630  * wm_init:		[ifnet interface function]
   3631  *
   3632  *	Initialize the interface.
   3633  */
   3634 static int
   3635 wm_init(struct ifnet *ifp)
   3636 {
   3637 	struct wm_softc *sc = ifp->if_softc;
   3638 	int ret;
   3639 
   3640 	WM_BOTH_LOCK(sc);
   3641 	ret = wm_init_locked(ifp);
   3642 	WM_BOTH_UNLOCK(sc);
   3643 
   3644 	return ret;
   3645 }
   3646 
   3647 static int
   3648 wm_init_locked(struct ifnet *ifp)
   3649 {
   3650 	struct wm_softc *sc = ifp->if_softc;
   3651 	struct wm_rxsoft *rxs;
   3652 	int i, j, trynum, error = 0;
   3653 	uint32_t reg;
   3654 
   3655 	KASSERT(WM_BOTH_LOCKED(sc));
   3656 	/*
   3657 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   3658 	 * There is a small but measurable benefit to avoiding the adjusment
   3659 	 * of the descriptor so that the headers are aligned, for normal mtu,
   3660 	 * on such platforms.  One possibility is that the DMA itself is
   3661 	 * slightly more efficient if the front of the entire packet (instead
   3662 	 * of the front of the headers) is aligned.
   3663 	 *
   3664 	 * Note we must always set align_tweak to 0 if we are using
   3665 	 * jumbo frames.
   3666 	 */
   3667 #ifdef __NO_STRICT_ALIGNMENT
   3668 	sc->sc_align_tweak = 0;
   3669 #else
   3670 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   3671 		sc->sc_align_tweak = 0;
   3672 	else
   3673 		sc->sc_align_tweak = 2;
   3674 #endif /* __NO_STRICT_ALIGNMENT */
   3675 
   3676 	/* Cancel any pending I/O. */
   3677 	wm_stop_locked(ifp, 0);
   3678 
   3679 	/* update statistics before reset */
   3680 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3681 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   3682 
   3683 	/* Reset the chip to a known state. */
   3684 	wm_reset(sc);
   3685 
   3686 	switch (sc->sc_type) {
   3687 	case WM_T_82571:
   3688 	case WM_T_82572:
   3689 	case WM_T_82573:
   3690 	case WM_T_82574:
   3691 	case WM_T_82583:
   3692 	case WM_T_80003:
   3693 	case WM_T_ICH8:
   3694 	case WM_T_ICH9:
   3695 	case WM_T_ICH10:
   3696 	case WM_T_PCH:
   3697 	case WM_T_PCH2:
   3698 	case WM_T_PCH_LPT:
   3699 		if (wm_check_mng_mode(sc) != 0)
   3700 			wm_get_hw_control(sc);
   3701 		break;
   3702 	default:
   3703 		break;
   3704 	}
   3705 
   3706 	/* Reset the PHY. */
   3707 	if (sc->sc_flags & WM_F_HAS_MII)
   3708 		wm_gmii_reset(sc);
   3709 
   3710 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3711 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
   3712 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   3713 	    || (sc->sc_type == WM_T_PCH_LPT))
   3714 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
   3715 
   3716 	/* Initialize the transmit descriptor ring. */
   3717 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
   3718 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
   3719 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   3720 	sc->sc_txfree = WM_NTXDESC(sc);
   3721 	sc->sc_txnext = 0;
   3722 
   3723 	if (sc->sc_type < WM_T_82543) {
   3724 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
   3725 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
   3726 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
   3727 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   3728 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   3729 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   3730 	} else {
   3731 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
   3732 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
   3733 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
   3734 		CSR_WRITE(sc, WMREG_TDH, 0);
   3735 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
   3736 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
   3737 
   3738 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3739 			/*
   3740 			 * Don't write TDT before TCTL.EN is set.
   3741 			 * See the document.
   3742 			 */
   3743 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
   3744 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   3745 			    | TXDCTL_WTHRESH(0));
   3746 		else {
   3747 			CSR_WRITE(sc, WMREG_TDT, 0);
   3748 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   3749 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   3750 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   3751 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   3752 		}
   3753 	}
   3754 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   3755 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   3756 
   3757 	/* Initialize the transmit job descriptors. */
   3758 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
   3759 		sc->sc_txsoft[i].txs_mbuf = NULL;
   3760 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
   3761 	sc->sc_txsnext = 0;
   3762 	sc->sc_txsdirty = 0;
   3763 
   3764 	/*
   3765 	 * Initialize the receive descriptor and receive job
   3766 	 * descriptor rings.
   3767 	 */
   3768 	if (sc->sc_type < WM_T_82543) {
   3769 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
   3770 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
   3771 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   3772 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   3773 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   3774 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   3775 
   3776 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   3777 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   3778 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   3779 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   3780 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   3781 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   3782 	} else {
   3783 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
   3784 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
   3785 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   3786 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3787 			CSR_WRITE(sc, WMREG_EITR(0), 450);
   3788 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   3789 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   3790 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
   3791 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   3792 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
   3793 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   3794 			    | RXDCTL_WTHRESH(1));
   3795 		} else {
   3796 			CSR_WRITE(sc, WMREG_RDH, 0);
   3797 			CSR_WRITE(sc, WMREG_RDT, 0);
   3798 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
   3799 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
   3800 		}
   3801 	}
   3802 	for (i = 0; i < WM_NRXDESC; i++) {
   3803 		rxs = &sc->sc_rxsoft[i];
   3804 		if (rxs->rxs_mbuf == NULL) {
   3805 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   3806 				log(LOG_ERR, "%s: unable to allocate or map "
   3807 				    "rx buffer %d, error = %d\n",
   3808 				    device_xname(sc->sc_dev), i, error);
   3809 				/*
   3810 				 * XXX Should attempt to run with fewer receive
   3811 				 * XXX buffers instead of just failing.
   3812 				 */
   3813 				wm_rxdrain(sc);
   3814 				goto out;
   3815 			}
   3816 		} else {
   3817 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3818 				WM_INIT_RXDESC(sc, i);
   3819 			/*
   3820 			 * For 82575 and newer device, the RX descriptors
   3821 			 * must be initialized after the setting of RCTL.EN in
   3822 			 * wm_set_filter()
   3823 			 */
   3824 		}
   3825 	}
   3826 	sc->sc_rxptr = 0;
   3827 	sc->sc_rxdiscard = 0;
   3828 	WM_RXCHAIN_RESET(sc);
   3829 
   3830 	/*
   3831 	 * Clear out the VLAN table -- we don't use it (yet).
   3832 	 */
   3833 	CSR_WRITE(sc, WMREG_VET, 0);
   3834 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3835 		trynum = 10; /* Due to hw errata */
   3836 	else
   3837 		trynum = 1;
   3838 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   3839 		for (j = 0; j < trynum; j++)
   3840 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   3841 
   3842 	/*
   3843 	 * Set up flow-control parameters.
   3844 	 *
   3845 	 * XXX Values could probably stand some tuning.
   3846 	 */
   3847 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   3848 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   3849 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   3850 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   3851 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   3852 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   3853 	}
   3854 
   3855 	sc->sc_fcrtl = FCRTL_DFLT;
   3856 	if (sc->sc_type < WM_T_82543) {
   3857 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   3858 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   3859 	} else {
   3860 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   3861 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   3862 	}
   3863 
   3864 	if (sc->sc_type == WM_T_80003)
   3865 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   3866 	else
   3867 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   3868 
   3869 	/* Writes the control register. */
   3870 	wm_set_vlan(sc);
   3871 
   3872 	if (sc->sc_flags & WM_F_HAS_MII) {
   3873 		int val;
   3874 
   3875 		switch (sc->sc_type) {
   3876 		case WM_T_80003:
   3877 		case WM_T_ICH8:
   3878 		case WM_T_ICH9:
   3879 		case WM_T_ICH10:
   3880 		case WM_T_PCH:
   3881 		case WM_T_PCH2:
   3882 		case WM_T_PCH_LPT:
   3883 			/*
   3884 			 * Set the mac to wait the maximum time between each
   3885 			 * iteration and increase the max iterations when
   3886 			 * polling the phy; this fixes erroneous timeouts at
   3887 			 * 10Mbps.
   3888 			 */
   3889 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   3890 			    0xFFFF);
   3891 			val = wm_kmrn_readreg(sc,
   3892 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   3893 			val |= 0x3F;
   3894 			wm_kmrn_writereg(sc,
   3895 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   3896 			break;
   3897 		default:
   3898 			break;
   3899 		}
   3900 
   3901 		if (sc->sc_type == WM_T_80003) {
   3902 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   3903 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   3904 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   3905 
   3906 			/* Bypass RX and TX FIFO's */
   3907 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   3908 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   3909 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   3910 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   3911 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   3912 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   3913 		}
   3914 	}
   3915 #if 0
   3916 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   3917 #endif
   3918 
   3919 	/* Set up checksum offload parameters. */
   3920 	reg = CSR_READ(sc, WMREG_RXCSUM);
   3921 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   3922 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   3923 		reg |= RXCSUM_IPOFL;
   3924 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   3925 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   3926 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   3927 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   3928 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   3929 
   3930 	/* Set up the interrupt registers. */
   3931 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3932 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   3933 	    ICR_RXO | ICR_RXT0;
   3934 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   3935 
   3936 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3937 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3938 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   3939 		reg = CSR_READ(sc, WMREG_KABGTXD);
   3940 		reg |= KABGTXD_BGSQLBIAS;
   3941 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   3942 	}
   3943 
   3944 	/* Set up the inter-packet gap. */
   3945 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   3946 
   3947 	if (sc->sc_type >= WM_T_82543) {
   3948 		/*
   3949 		 * Set up the interrupt throttling register (units of 256ns)
   3950 		 * Note that a footnote in Intel's documentation says this
   3951 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   3952 		 * or 10Mbit mode.  Empirically, it appears to be the case
   3953 		 * that that is also true for the 1024ns units of the other
   3954 		 * interrupt-related timer registers -- so, really, we ought
   3955 		 * to divide this value by 4 when the link speed is low.
   3956 		 *
   3957 		 * XXX implement this division at link speed change!
   3958 		 */
   3959 
   3960 		/*
   3961 		 * For N interrupts/sec, set this value to:
   3962 		 * 1000000000 / (N * 256).  Note that we set the
   3963 		 * absolute and packet timer values to this value
   3964 		 * divided by 4 to get "simple timer" behavior.
   3965 		 */
   3966 
   3967 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   3968 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   3969 	}
   3970 
   3971 	/* Set the VLAN ethernetype. */
   3972 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   3973 
   3974 	/*
   3975 	 * Set up the transmit control register; we start out with
   3976 	 * a collision distance suitable for FDX, but update it whe
   3977 	 * we resolve the media type.
   3978 	 */
   3979 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   3980 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   3981 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3982 	if (sc->sc_type >= WM_T_82571)
   3983 		sc->sc_tctl |= TCTL_MULR;
   3984 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3985 
   3986 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3987 		/* Write TDT after TCTL.EN is set. See the document. */
   3988 		CSR_WRITE(sc, WMREG_TDT, 0);
   3989 	}
   3990 
   3991 	if (sc->sc_type == WM_T_80003) {
   3992 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   3993 		reg &= ~TCTL_EXT_GCEX_MASK;
   3994 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   3995 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   3996 	}
   3997 
   3998 	/* Set the media. */
   3999 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4000 		goto out;
   4001 
   4002 	/* Configure for OS presence */
   4003 	wm_init_manageability(sc);
   4004 
   4005 	/*
   4006 	 * Set up the receive control register; we actually program
   4007 	 * the register when we set the receive filter.  Use multicast
   4008 	 * address offset type 0.
   4009 	 *
   4010 	 * Only the i82544 has the ability to strip the incoming
   4011 	 * CRC, so we don't enable that feature.
   4012 	 */
   4013 	sc->sc_mchash_type = 0;
   4014 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4015 	    | RCTL_MO(sc->sc_mchash_type);
   4016 
   4017 	/*
   4018 	 * The I350 has a bug where it always strips the CRC whether
   4019 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4020 	 */
   4021 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4022 	    || (sc->sc_type == WM_T_I210))
   4023 		sc->sc_rctl |= RCTL_SECRC;
   4024 
   4025 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4026 	    && (ifp->if_mtu > ETHERMTU)) {
   4027 		sc->sc_rctl |= RCTL_LPE;
   4028 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4029 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4030 	}
   4031 
   4032 	if (MCLBYTES == 2048) {
   4033 		sc->sc_rctl |= RCTL_2k;
   4034 	} else {
   4035 		if (sc->sc_type >= WM_T_82543) {
   4036 			switch (MCLBYTES) {
   4037 			case 4096:
   4038 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4039 				break;
   4040 			case 8192:
   4041 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4042 				break;
   4043 			case 16384:
   4044 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4045 				break;
   4046 			default:
   4047 				panic("wm_init: MCLBYTES %d unsupported",
   4048 				    MCLBYTES);
   4049 				break;
   4050 			}
   4051 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4052 	}
   4053 
   4054 	/* Set the receive filter. */
   4055 	wm_set_filter(sc);
   4056 
   4057 	/* Enable ECC */
   4058 	switch (sc->sc_type) {
   4059 	case WM_T_82571:
   4060 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4061 		reg |= PBA_ECC_CORR_EN;
   4062 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4063 		break;
   4064 	case WM_T_PCH_LPT:
   4065 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4066 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4067 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4068 
   4069 		reg = CSR_READ(sc, WMREG_CTRL);
   4070 		reg |= CTRL_MEHE;
   4071 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4072 		break;
   4073 	default:
   4074 		break;
   4075 	}
   4076 
   4077 	/* On 575 and later set RDT only if RX enabled */
   4078 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4079 		for (i = 0; i < WM_NRXDESC; i++)
   4080 			WM_INIT_RXDESC(sc, i);
   4081 
   4082 	sc->sc_stopping = false;
   4083 
   4084 	/* Start the one second link check clock. */
   4085 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4086 
   4087 	/* ...all done! */
   4088 	ifp->if_flags |= IFF_RUNNING;
   4089 	ifp->if_flags &= ~IFF_OACTIVE;
   4090 
   4091  out:
   4092 	sc->sc_if_flags = ifp->if_flags;
   4093 	if (error)
   4094 		log(LOG_ERR, "%s: interface not running\n",
   4095 		    device_xname(sc->sc_dev));
   4096 	return error;
   4097 }
   4098 
   4099 /*
   4100  * wm_stop:		[ifnet interface function]
   4101  *
   4102  *	Stop transmission on the interface.
   4103  */
   4104 static void
   4105 wm_stop(struct ifnet *ifp, int disable)
   4106 {
   4107 	struct wm_softc *sc = ifp->if_softc;
   4108 
   4109 	WM_BOTH_LOCK(sc);
   4110 	wm_stop_locked(ifp, disable);
   4111 	WM_BOTH_UNLOCK(sc);
   4112 }
   4113 
   4114 static void
   4115 wm_stop_locked(struct ifnet *ifp, int disable)
   4116 {
   4117 	struct wm_softc *sc = ifp->if_softc;
   4118 	struct wm_txsoft *txs;
   4119 	int i;
   4120 
   4121 	KASSERT(WM_BOTH_LOCKED(sc));
   4122 
   4123 	sc->sc_stopping = true;
   4124 
   4125 	/* Stop the one second clock. */
   4126 	callout_stop(&sc->sc_tick_ch);
   4127 
   4128 	/* Stop the 82547 Tx FIFO stall check timer. */
   4129 	if (sc->sc_type == WM_T_82547)
   4130 		callout_stop(&sc->sc_txfifo_ch);
   4131 
   4132 	if (sc->sc_flags & WM_F_HAS_MII) {
   4133 		/* Down the MII. */
   4134 		mii_down(&sc->sc_mii);
   4135 	} else {
   4136 #if 0
   4137 		/* Should we clear PHY's status properly? */
   4138 		wm_reset(sc);
   4139 #endif
   4140 	}
   4141 
   4142 	/* Stop the transmit and receive processes. */
   4143 	CSR_WRITE(sc, WMREG_TCTL, 0);
   4144 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4145 	sc->sc_rctl &= ~RCTL_EN;
   4146 
   4147 	/*
   4148 	 * Clear the interrupt mask to ensure the device cannot assert its
   4149 	 * interrupt line.
   4150 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
   4151 	 * any currently pending or shared interrupt.
   4152 	 */
   4153 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4154 	sc->sc_icr = 0;
   4155 
   4156 	/* Release any queued transmit buffers. */
   4157 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   4158 		txs = &sc->sc_txsoft[i];
   4159 		if (txs->txs_mbuf != NULL) {
   4160 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   4161 			m_freem(txs->txs_mbuf);
   4162 			txs->txs_mbuf = NULL;
   4163 		}
   4164 	}
   4165 
   4166 	/* Mark the interface as down and cancel the watchdog timer. */
   4167 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   4168 	ifp->if_timer = 0;
   4169 
   4170 	if (disable)
   4171 		wm_rxdrain(sc);
   4172 
   4173 #if 0 /* notyet */
   4174 	if (sc->sc_type >= WM_T_82544)
   4175 		CSR_WRITE(sc, WMREG_WUC, 0);
   4176 #endif
   4177 }
   4178 
   4179 /*
   4180  * wm_tx_offload:
   4181  *
   4182  *	Set up TCP/IP checksumming parameters for the
   4183  *	specified packet.
   4184  */
   4185 static int
   4186 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   4187     uint8_t *fieldsp)
   4188 {
   4189 	struct mbuf *m0 = txs->txs_mbuf;
   4190 	struct livengood_tcpip_ctxdesc *t;
   4191 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   4192 	uint32_t ipcse;
   4193 	struct ether_header *eh;
   4194 	int offset, iphl;
   4195 	uint8_t fields;
   4196 
   4197 	/*
   4198 	 * XXX It would be nice if the mbuf pkthdr had offset
   4199 	 * fields for the protocol headers.
   4200 	 */
   4201 
   4202 	eh = mtod(m0, struct ether_header *);
   4203 	switch (htons(eh->ether_type)) {
   4204 	case ETHERTYPE_IP:
   4205 	case ETHERTYPE_IPV6:
   4206 		offset = ETHER_HDR_LEN;
   4207 		break;
   4208 
   4209 	case ETHERTYPE_VLAN:
   4210 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   4211 		break;
   4212 
   4213 	default:
   4214 		/*
   4215 		 * Don't support this protocol or encapsulation.
   4216 		 */
   4217 		*fieldsp = 0;
   4218 		*cmdp = 0;
   4219 		return 0;
   4220 	}
   4221 
   4222 	if ((m0->m_pkthdr.csum_flags &
   4223 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   4224 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   4225 	} else {
   4226 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   4227 	}
   4228 	ipcse = offset + iphl - 1;
   4229 
   4230 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   4231 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   4232 	seg = 0;
   4233 	fields = 0;
   4234 
   4235 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   4236 		int hlen = offset + iphl;
   4237 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   4238 
   4239 		if (__predict_false(m0->m_len <
   4240 				    (hlen + sizeof(struct tcphdr)))) {
   4241 			/*
   4242 			 * TCP/IP headers are not in the first mbuf; we need
   4243 			 * to do this the slow and painful way.  Let's just
   4244 			 * hope this doesn't happen very often.
   4245 			 */
   4246 			struct tcphdr th;
   4247 
   4248 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   4249 
   4250 			m_copydata(m0, hlen, sizeof(th), &th);
   4251 			if (v4) {
   4252 				struct ip ip;
   4253 
   4254 				m_copydata(m0, offset, sizeof(ip), &ip);
   4255 				ip.ip_len = 0;
   4256 				m_copyback(m0,
   4257 				    offset + offsetof(struct ip, ip_len),
   4258 				    sizeof(ip.ip_len), &ip.ip_len);
   4259 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   4260 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   4261 			} else {
   4262 				struct ip6_hdr ip6;
   4263 
   4264 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   4265 				ip6.ip6_plen = 0;
   4266 				m_copyback(m0,
   4267 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   4268 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   4269 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   4270 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   4271 			}
   4272 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   4273 			    sizeof(th.th_sum), &th.th_sum);
   4274 
   4275 			hlen += th.th_off << 2;
   4276 		} else {
   4277 			/*
   4278 			 * TCP/IP headers are in the first mbuf; we can do
   4279 			 * this the easy way.
   4280 			 */
   4281 			struct tcphdr *th;
   4282 
   4283 			if (v4) {
   4284 				struct ip *ip =
   4285 				    (void *)(mtod(m0, char *) + offset);
   4286 				th = (void *)(mtod(m0, char *) + hlen);
   4287 
   4288 				ip->ip_len = 0;
   4289 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   4290 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   4291 			} else {
   4292 				struct ip6_hdr *ip6 =
   4293 				    (void *)(mtod(m0, char *) + offset);
   4294 				th = (void *)(mtod(m0, char *) + hlen);
   4295 
   4296 				ip6->ip6_plen = 0;
   4297 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   4298 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   4299 			}
   4300 			hlen += th->th_off << 2;
   4301 		}
   4302 
   4303 		if (v4) {
   4304 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   4305 			cmdlen |= WTX_TCPIP_CMD_IP;
   4306 		} else {
   4307 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   4308 			ipcse = 0;
   4309 		}
   4310 		cmd |= WTX_TCPIP_CMD_TSE;
   4311 		cmdlen |= WTX_TCPIP_CMD_TSE |
   4312 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   4313 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   4314 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   4315 	}
   4316 
   4317 	/*
   4318 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   4319 	 * offload feature, if we load the context descriptor, we
   4320 	 * MUST provide valid values for IPCSS and TUCSS fields.
   4321 	 */
   4322 
   4323 	ipcs = WTX_TCPIP_IPCSS(offset) |
   4324 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   4325 	    WTX_TCPIP_IPCSE(ipcse);
   4326 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   4327 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   4328 		fields |= WTX_IXSM;
   4329 	}
   4330 
   4331 	offset += iphl;
   4332 
   4333 	if (m0->m_pkthdr.csum_flags &
   4334 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   4335 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   4336 		fields |= WTX_TXSM;
   4337 		tucs = WTX_TCPIP_TUCSS(offset) |
   4338 		    WTX_TCPIP_TUCSO(offset +
   4339 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   4340 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4341 	} else if ((m0->m_pkthdr.csum_flags &
   4342 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   4343 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   4344 		fields |= WTX_TXSM;
   4345 		tucs = WTX_TCPIP_TUCSS(offset) |
   4346 		    WTX_TCPIP_TUCSO(offset +
   4347 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   4348 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4349 	} else {
   4350 		/* Just initialize it to a valid TCP context. */
   4351 		tucs = WTX_TCPIP_TUCSS(offset) |
   4352 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   4353 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4354 	}
   4355 
   4356 	/* Fill in the context descriptor. */
   4357 	t = (struct livengood_tcpip_ctxdesc *)
   4358 	    &sc->sc_txdescs[sc->sc_txnext];
   4359 	t->tcpip_ipcs = htole32(ipcs);
   4360 	t->tcpip_tucs = htole32(tucs);
   4361 	t->tcpip_cmdlen = htole32(cmdlen);
   4362 	t->tcpip_seg = htole32(seg);
   4363 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   4364 
   4365 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   4366 	txs->txs_ndesc++;
   4367 
   4368 	*cmdp = cmd;
   4369 	*fieldsp = fields;
   4370 
   4371 	return 0;
   4372 }
   4373 
   4374 static void
   4375 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   4376 {
   4377 	struct mbuf *m;
   4378 	int i;
   4379 
   4380 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   4381 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   4382 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   4383 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   4384 		    m->m_data, m->m_len, m->m_flags);
   4385 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   4386 	    i, i == 1 ? "" : "s");
   4387 }
   4388 
   4389 /*
   4390  * wm_82547_txfifo_stall:
   4391  *
   4392  *	Callout used to wait for the 82547 Tx FIFO to drain,
   4393  *	reset the FIFO pointers, and restart packet transmission.
   4394  */
   4395 static void
   4396 wm_82547_txfifo_stall(void *arg)
   4397 {
   4398 	struct wm_softc *sc = arg;
   4399 #ifndef WM_MPSAFE
   4400 	int s;
   4401 
   4402 	s = splnet();
   4403 #endif
   4404 	WM_TX_LOCK(sc);
   4405 
   4406 	if (sc->sc_stopping)
   4407 		goto out;
   4408 
   4409 	if (sc->sc_txfifo_stall) {
   4410 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
   4411 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   4412 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   4413 			/*
   4414 			 * Packets have drained.  Stop transmitter, reset
   4415 			 * FIFO pointers, restart transmitter, and kick
   4416 			 * the packet queue.
   4417 			 */
   4418 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   4419 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   4420 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
   4421 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
   4422 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
   4423 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
   4424 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   4425 			CSR_WRITE_FLUSH(sc);
   4426 
   4427 			sc->sc_txfifo_head = 0;
   4428 			sc->sc_txfifo_stall = 0;
   4429 			wm_start_locked(&sc->sc_ethercom.ec_if);
   4430 		} else {
   4431 			/*
   4432 			 * Still waiting for packets to drain; try again in
   4433 			 * another tick.
   4434 			 */
   4435 			callout_schedule(&sc->sc_txfifo_ch, 1);
   4436 		}
   4437 	}
   4438 
   4439 out:
   4440 	WM_TX_UNLOCK(sc);
   4441 #ifndef WM_MPSAFE
   4442 	splx(s);
   4443 #endif
   4444 }
   4445 
   4446 /*
   4447  * wm_82547_txfifo_bugchk:
   4448  *
   4449  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   4450  *	prevent enqueueing a packet that would wrap around the end
   4451  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   4452  *
   4453  *	We do this by checking the amount of space before the end
   4454  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   4455  *	the Tx FIFO, wait for all remaining packets to drain, reset
   4456  *	the internal FIFO pointers to the beginning, and restart
   4457  *	transmission on the interface.
   4458  */
   4459 #define	WM_FIFO_HDR		0x10
   4460 #define	WM_82547_PAD_LEN	0x3e0
   4461 static int
   4462 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   4463 {
   4464 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
   4465 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   4466 
   4467 	/* Just return if already stalled. */
   4468 	if (sc->sc_txfifo_stall)
   4469 		return 1;
   4470 
   4471 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   4472 		/* Stall only occurs in half-duplex mode. */
   4473 		goto send_packet;
   4474 	}
   4475 
   4476 	if (len >= WM_82547_PAD_LEN + space) {
   4477 		sc->sc_txfifo_stall = 1;
   4478 		callout_schedule(&sc->sc_txfifo_ch, 1);
   4479 		return 1;
   4480 	}
   4481 
   4482  send_packet:
   4483 	sc->sc_txfifo_head += len;
   4484 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
   4485 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
   4486 
   4487 	return 0;
   4488 }
   4489 
   4490 /*
   4491  * wm_start:		[ifnet interface function]
   4492  *
   4493  *	Start packet transmission on the interface.
   4494  */
   4495 static void
   4496 wm_start(struct ifnet *ifp)
   4497 {
   4498 	struct wm_softc *sc = ifp->if_softc;
   4499 
   4500 	WM_TX_LOCK(sc);
   4501 	if (!sc->sc_stopping)
   4502 		wm_start_locked(ifp);
   4503 	WM_TX_UNLOCK(sc);
   4504 }
   4505 
   4506 static void
   4507 wm_start_locked(struct ifnet *ifp)
   4508 {
   4509 	struct wm_softc *sc = ifp->if_softc;
   4510 	struct mbuf *m0;
   4511 	struct m_tag *mtag;
   4512 	struct wm_txsoft *txs;
   4513 	bus_dmamap_t dmamap;
   4514 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   4515 	bus_addr_t curaddr;
   4516 	bus_size_t seglen, curlen;
   4517 	uint32_t cksumcmd;
   4518 	uint8_t cksumfields;
   4519 
   4520 	KASSERT(WM_TX_LOCKED(sc));
   4521 
   4522 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   4523 		return;
   4524 
   4525 	/* Remember the previous number of free descriptors. */
   4526 	ofree = sc->sc_txfree;
   4527 
   4528 	/*
   4529 	 * Loop through the send queue, setting up transmit descriptors
   4530 	 * until we drain the queue, or use up all available transmit
   4531 	 * descriptors.
   4532 	 */
   4533 	for (;;) {
   4534 		m0 = NULL;
   4535 
   4536 		/* Get a work queue entry. */
   4537 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   4538 			wm_txintr(sc);
   4539 			if (sc->sc_txsfree == 0) {
   4540 				DPRINTF(WM_DEBUG_TX,
   4541 				    ("%s: TX: no free job descriptors\n",
   4542 					device_xname(sc->sc_dev)));
   4543 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   4544 				break;
   4545 			}
   4546 		}
   4547 
   4548 		/* Grab a packet off the queue. */
   4549 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   4550 		if (m0 == NULL)
   4551 			break;
   4552 
   4553 		DPRINTF(WM_DEBUG_TX,
   4554 		    ("%s: TX: have packet to transmit: %p\n",
   4555 		    device_xname(sc->sc_dev), m0));
   4556 
   4557 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   4558 		dmamap = txs->txs_dmamap;
   4559 
   4560 		use_tso = (m0->m_pkthdr.csum_flags &
   4561 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   4562 
   4563 		/*
   4564 		 * So says the Linux driver:
   4565 		 * The controller does a simple calculation to make sure
   4566 		 * there is enough room in the FIFO before initiating the
   4567 		 * DMA for each buffer.  The calc is:
   4568 		 *	4 = ceil(buffer len / MSS)
   4569 		 * To make sure we don't overrun the FIFO, adjust the max
   4570 		 * buffer len if the MSS drops.
   4571 		 */
   4572 		dmamap->dm_maxsegsz =
   4573 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   4574 		    ? m0->m_pkthdr.segsz << 2
   4575 		    : WTX_MAX_LEN;
   4576 
   4577 		/*
   4578 		 * Load the DMA map.  If this fails, the packet either
   4579 		 * didn't fit in the allotted number of segments, or we
   4580 		 * were short on resources.  For the too-many-segments
   4581 		 * case, we simply report an error and drop the packet,
   4582 		 * since we can't sanely copy a jumbo packet to a single
   4583 		 * buffer.
   4584 		 */
   4585 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   4586 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   4587 		if (error) {
   4588 			if (error == EFBIG) {
   4589 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   4590 				log(LOG_ERR, "%s: Tx packet consumes too many "
   4591 				    "DMA segments, dropping...\n",
   4592 				    device_xname(sc->sc_dev));
   4593 				wm_dump_mbuf_chain(sc, m0);
   4594 				m_freem(m0);
   4595 				continue;
   4596 			}
   4597 			/*  Short on resources, just stop for now. */
   4598 			DPRINTF(WM_DEBUG_TX,
   4599 			    ("%s: TX: dmamap load failed: %d\n",
   4600 			    device_xname(sc->sc_dev), error));
   4601 			break;
   4602 		}
   4603 
   4604 		segs_needed = dmamap->dm_nsegs;
   4605 		if (use_tso) {
   4606 			/* For sentinel descriptor; see below. */
   4607 			segs_needed++;
   4608 		}
   4609 
   4610 		/*
   4611 		 * Ensure we have enough descriptors free to describe
   4612 		 * the packet.  Note, we always reserve one descriptor
   4613 		 * at the end of the ring due to the semantics of the
   4614 		 * TDT register, plus one more in the event we need
   4615 		 * to load offload context.
   4616 		 */
   4617 		if (segs_needed > sc->sc_txfree - 2) {
   4618 			/*
   4619 			 * Not enough free descriptors to transmit this
   4620 			 * packet.  We haven't committed anything yet,
   4621 			 * so just unload the DMA map, put the packet
   4622 			 * pack on the queue, and punt.  Notify the upper
   4623 			 * layer that there are no more slots left.
   4624 			 */
   4625 			DPRINTF(WM_DEBUG_TX,
   4626 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   4627 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   4628 			    segs_needed, sc->sc_txfree - 1));
   4629 			ifp->if_flags |= IFF_OACTIVE;
   4630 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   4631 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   4632 			break;
   4633 		}
   4634 
   4635 		/*
   4636 		 * Check for 82547 Tx FIFO bug.  We need to do this
   4637 		 * once we know we can transmit the packet, since we
   4638 		 * do some internal FIFO space accounting here.
   4639 		 */
   4640 		if (sc->sc_type == WM_T_82547 &&
   4641 		    wm_82547_txfifo_bugchk(sc, m0)) {
   4642 			DPRINTF(WM_DEBUG_TX,
   4643 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   4644 			    device_xname(sc->sc_dev)));
   4645 			ifp->if_flags |= IFF_OACTIVE;
   4646 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   4647 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   4648 			break;
   4649 		}
   4650 
   4651 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   4652 
   4653 		DPRINTF(WM_DEBUG_TX,
   4654 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   4655 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   4656 
   4657 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   4658 
   4659 		/*
   4660 		 * Store a pointer to the packet so that we can free it
   4661 		 * later.
   4662 		 *
   4663 		 * Initially, we consider the number of descriptors the
   4664 		 * packet uses the number of DMA segments.  This may be
   4665 		 * incremented by 1 if we do checksum offload (a descriptor
   4666 		 * is used to set the checksum context).
   4667 		 */
   4668 		txs->txs_mbuf = m0;
   4669 		txs->txs_firstdesc = sc->sc_txnext;
   4670 		txs->txs_ndesc = segs_needed;
   4671 
   4672 		/* Set up offload parameters for this packet. */
   4673 		if (m0->m_pkthdr.csum_flags &
   4674 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   4675 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   4676 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   4677 			if (wm_tx_offload(sc, txs, &cksumcmd,
   4678 					  &cksumfields) != 0) {
   4679 				/* Error message already displayed. */
   4680 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   4681 				continue;
   4682 			}
   4683 		} else {
   4684 			cksumcmd = 0;
   4685 			cksumfields = 0;
   4686 		}
   4687 
   4688 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   4689 
   4690 		/* Sync the DMA map. */
   4691 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   4692 		    BUS_DMASYNC_PREWRITE);
   4693 
   4694 		/* Initialize the transmit descriptor. */
   4695 		for (nexttx = sc->sc_txnext, seg = 0;
   4696 		     seg < dmamap->dm_nsegs; seg++) {
   4697 			for (seglen = dmamap->dm_segs[seg].ds_len,
   4698 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   4699 			     seglen != 0;
   4700 			     curaddr += curlen, seglen -= curlen,
   4701 			     nexttx = WM_NEXTTX(sc, nexttx)) {
   4702 				curlen = seglen;
   4703 
   4704 				/*
   4705 				 * So says the Linux driver:
   4706 				 * Work around for premature descriptor
   4707 				 * write-backs in TSO mode.  Append a
   4708 				 * 4-byte sentinel descriptor.
   4709 				 */
   4710 				if (use_tso &&
   4711 				    seg == dmamap->dm_nsegs - 1 &&
   4712 				    curlen > 8)
   4713 					curlen -= 4;
   4714 
   4715 				wm_set_dma_addr(
   4716 				    &sc->sc_txdescs[nexttx].wtx_addr,
   4717 				    curaddr);
   4718 				sc->sc_txdescs[nexttx].wtx_cmdlen =
   4719 				    htole32(cksumcmd | curlen);
   4720 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
   4721 				    0;
   4722 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
   4723 				    cksumfields;
   4724 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   4725 				lasttx = nexttx;
   4726 
   4727 				DPRINTF(WM_DEBUG_TX,
   4728 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   4729 				     "len %#04zx\n",
   4730 				    device_xname(sc->sc_dev), nexttx,
   4731 				    (uint64_t)curaddr, curlen));
   4732 			}
   4733 		}
   4734 
   4735 		KASSERT(lasttx != -1);
   4736 
   4737 		/*
   4738 		 * Set up the command byte on the last descriptor of
   4739 		 * the packet.  If we're in the interrupt delay window,
   4740 		 * delay the interrupt.
   4741 		 */
   4742 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   4743 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   4744 
   4745 		/*
   4746 		 * If VLANs are enabled and the packet has a VLAN tag, set
   4747 		 * up the descriptor to encapsulate the packet for us.
   4748 		 *
   4749 		 * This is only valid on the last descriptor of the packet.
   4750 		 */
   4751 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   4752 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   4753 			    htole32(WTX_CMD_VLE);
   4754 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
   4755 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   4756 		}
   4757 
   4758 		txs->txs_lastdesc = lasttx;
   4759 
   4760 		DPRINTF(WM_DEBUG_TX,
   4761 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   4762 		    device_xname(sc->sc_dev),
   4763 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   4764 
   4765 		/* Sync the descriptors we're using. */
   4766 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   4767 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   4768 
   4769 		/* Give the packet to the chip. */
   4770 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   4771 
   4772 		DPRINTF(WM_DEBUG_TX,
   4773 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   4774 
   4775 		DPRINTF(WM_DEBUG_TX,
   4776 		    ("%s: TX: finished transmitting packet, job %d\n",
   4777 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   4778 
   4779 		/* Advance the tx pointer. */
   4780 		sc->sc_txfree -= txs->txs_ndesc;
   4781 		sc->sc_txnext = nexttx;
   4782 
   4783 		sc->sc_txsfree--;
   4784 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   4785 
   4786 		/* Pass the packet to any BPF listeners. */
   4787 		bpf_mtap(ifp, m0);
   4788 	}
   4789 
   4790 	if (m0 != NULL) {
   4791 		ifp->if_flags |= IFF_OACTIVE;
   4792 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   4793 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   4794 		m_freem(m0);
   4795 	}
   4796 
   4797 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   4798 		/* No more slots; notify upper layer. */
   4799 		ifp->if_flags |= IFF_OACTIVE;
   4800 	}
   4801 
   4802 	if (sc->sc_txfree != ofree) {
   4803 		/* Set a watchdog timer in case the chip flakes out. */
   4804 		ifp->if_timer = 5;
   4805 	}
   4806 }
   4807 
   4808 /*
   4809  * wm_nq_tx_offload:
   4810  *
   4811  *	Set up TCP/IP checksumming parameters for the
   4812  *	specified packet, for NEWQUEUE devices
   4813  */
   4814 static int
   4815 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   4816     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   4817 {
   4818 	struct mbuf *m0 = txs->txs_mbuf;
   4819 	struct m_tag *mtag;
   4820 	uint32_t vl_len, mssidx, cmdc;
   4821 	struct ether_header *eh;
   4822 	int offset, iphl;
   4823 
   4824 	/*
   4825 	 * XXX It would be nice if the mbuf pkthdr had offset
   4826 	 * fields for the protocol headers.
   4827 	 */
   4828 	*cmdlenp = 0;
   4829 	*fieldsp = 0;
   4830 
   4831 	eh = mtod(m0, struct ether_header *);
   4832 	switch (htons(eh->ether_type)) {
   4833 	case ETHERTYPE_IP:
   4834 	case ETHERTYPE_IPV6:
   4835 		offset = ETHER_HDR_LEN;
   4836 		break;
   4837 
   4838 	case ETHERTYPE_VLAN:
   4839 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   4840 		break;
   4841 
   4842 	default:
   4843 		/* Don't support this protocol or encapsulation. */
   4844 		*do_csum = false;
   4845 		return 0;
   4846 	}
   4847 	*do_csum = true;
   4848 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   4849 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   4850 
   4851 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   4852 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   4853 
   4854 	if ((m0->m_pkthdr.csum_flags &
   4855 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   4856 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   4857 	} else {
   4858 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   4859 	}
   4860 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   4861 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   4862 
   4863 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   4864 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   4865 		     << NQTXC_VLLEN_VLAN_SHIFT);
   4866 		*cmdlenp |= NQTX_CMD_VLE;
   4867 	}
   4868 
   4869 	mssidx = 0;
   4870 
   4871 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   4872 		int hlen = offset + iphl;
   4873 		int tcp_hlen;
   4874 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   4875 
   4876 		if (__predict_false(m0->m_len <
   4877 				    (hlen + sizeof(struct tcphdr)))) {
   4878 			/*
   4879 			 * TCP/IP headers are not in the first mbuf; we need
   4880 			 * to do this the slow and painful way.  Let's just
   4881 			 * hope this doesn't happen very often.
   4882 			 */
   4883 			struct tcphdr th;
   4884 
   4885 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   4886 
   4887 			m_copydata(m0, hlen, sizeof(th), &th);
   4888 			if (v4) {
   4889 				struct ip ip;
   4890 
   4891 				m_copydata(m0, offset, sizeof(ip), &ip);
   4892 				ip.ip_len = 0;
   4893 				m_copyback(m0,
   4894 				    offset + offsetof(struct ip, ip_len),
   4895 				    sizeof(ip.ip_len), &ip.ip_len);
   4896 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   4897 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   4898 			} else {
   4899 				struct ip6_hdr ip6;
   4900 
   4901 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   4902 				ip6.ip6_plen = 0;
   4903 				m_copyback(m0,
   4904 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   4905 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   4906 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   4907 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   4908 			}
   4909 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   4910 			    sizeof(th.th_sum), &th.th_sum);
   4911 
   4912 			tcp_hlen = th.th_off << 2;
   4913 		} else {
   4914 			/*
   4915 			 * TCP/IP headers are in the first mbuf; we can do
   4916 			 * this the easy way.
   4917 			 */
   4918 			struct tcphdr *th;
   4919 
   4920 			if (v4) {
   4921 				struct ip *ip =
   4922 				    (void *)(mtod(m0, char *) + offset);
   4923 				th = (void *)(mtod(m0, char *) + hlen);
   4924 
   4925 				ip->ip_len = 0;
   4926 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   4927 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   4928 			} else {
   4929 				struct ip6_hdr *ip6 =
   4930 				    (void *)(mtod(m0, char *) + offset);
   4931 				th = (void *)(mtod(m0, char *) + hlen);
   4932 
   4933 				ip6->ip6_plen = 0;
   4934 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   4935 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   4936 			}
   4937 			tcp_hlen = th->th_off << 2;
   4938 		}
   4939 		hlen += tcp_hlen;
   4940 		*cmdlenp |= NQTX_CMD_TSE;
   4941 
   4942 		if (v4) {
   4943 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   4944 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   4945 		} else {
   4946 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   4947 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   4948 		}
   4949 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   4950 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   4951 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   4952 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   4953 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   4954 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   4955 	} else {
   4956 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   4957 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   4958 	}
   4959 
   4960 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   4961 		*fieldsp |= NQTXD_FIELDS_IXSM;
   4962 		cmdc |= NQTXC_CMD_IP4;
   4963 	}
   4964 
   4965 	if (m0->m_pkthdr.csum_flags &
   4966 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   4967 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   4968 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   4969 			cmdc |= NQTXC_CMD_TCP;
   4970 		} else {
   4971 			cmdc |= NQTXC_CMD_UDP;
   4972 		}
   4973 		cmdc |= NQTXC_CMD_IP4;
   4974 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   4975 	}
   4976 	if (m0->m_pkthdr.csum_flags &
   4977 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   4978 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   4979 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   4980 			cmdc |= NQTXC_CMD_TCP;
   4981 		} else {
   4982 			cmdc |= NQTXC_CMD_UDP;
   4983 		}
   4984 		cmdc |= NQTXC_CMD_IP6;
   4985 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   4986 	}
   4987 
   4988 	/* Fill in the context descriptor. */
   4989 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
   4990 	    htole32(vl_len);
   4991 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
   4992 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
   4993 	    htole32(cmdc);
   4994 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
   4995 	    htole32(mssidx);
   4996 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   4997 	DPRINTF(WM_DEBUG_TX,
   4998 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   4999 	    sc->sc_txnext, 0, vl_len));
   5000 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   5001 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   5002 	txs->txs_ndesc++;
   5003 	return 0;
   5004 }
   5005 
   5006 /*
   5007  * wm_nq_start:		[ifnet interface function]
   5008  *
   5009  *	Start packet transmission on the interface for NEWQUEUE devices
   5010  */
   5011 static void
   5012 wm_nq_start(struct ifnet *ifp)
   5013 {
   5014 	struct wm_softc *sc = ifp->if_softc;
   5015 
   5016 	WM_TX_LOCK(sc);
   5017 	if (!sc->sc_stopping)
   5018 		wm_nq_start_locked(ifp);
   5019 	WM_TX_UNLOCK(sc);
   5020 }
   5021 
   5022 static void
   5023 wm_nq_start_locked(struct ifnet *ifp)
   5024 {
   5025 	struct wm_softc *sc = ifp->if_softc;
   5026 	struct mbuf *m0;
   5027 	struct m_tag *mtag;
   5028 	struct wm_txsoft *txs;
   5029 	bus_dmamap_t dmamap;
   5030 	int error, nexttx, lasttx = -1, seg, segs_needed;
   5031 	bool do_csum, sent;
   5032 
   5033 	KASSERT(WM_TX_LOCKED(sc));
   5034 
   5035 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   5036 		return;
   5037 
   5038 	sent = false;
   5039 
   5040 	/*
   5041 	 * Loop through the send queue, setting up transmit descriptors
   5042 	 * until we drain the queue, or use up all available transmit
   5043 	 * descriptors.
   5044 	 */
   5045 	for (;;) {
   5046 		m0 = NULL;
   5047 
   5048 		/* Get a work queue entry. */
   5049 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   5050 			wm_txintr(sc);
   5051 			if (sc->sc_txsfree == 0) {
   5052 				DPRINTF(WM_DEBUG_TX,
   5053 				    ("%s: TX: no free job descriptors\n",
   5054 					device_xname(sc->sc_dev)));
   5055 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   5056 				break;
   5057 			}
   5058 		}
   5059 
   5060 		/* Grab a packet off the queue. */
   5061 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   5062 		if (m0 == NULL)
   5063 			break;
   5064 
   5065 		DPRINTF(WM_DEBUG_TX,
   5066 		    ("%s: TX: have packet to transmit: %p\n",
   5067 		    device_xname(sc->sc_dev), m0));
   5068 
   5069 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   5070 		dmamap = txs->txs_dmamap;
   5071 
   5072 		/*
   5073 		 * Load the DMA map.  If this fails, the packet either
   5074 		 * didn't fit in the allotted number of segments, or we
   5075 		 * were short on resources.  For the too-many-segments
   5076 		 * case, we simply report an error and drop the packet,
   5077 		 * since we can't sanely copy a jumbo packet to a single
   5078 		 * buffer.
   5079 		 */
   5080 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   5081 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   5082 		if (error) {
   5083 			if (error == EFBIG) {
   5084 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5085 				log(LOG_ERR, "%s: Tx packet consumes too many "
   5086 				    "DMA segments, dropping...\n",
   5087 				    device_xname(sc->sc_dev));
   5088 				wm_dump_mbuf_chain(sc, m0);
   5089 				m_freem(m0);
   5090 				continue;
   5091 			}
   5092 			/* Short on resources, just stop for now. */
   5093 			DPRINTF(WM_DEBUG_TX,
   5094 			    ("%s: TX: dmamap load failed: %d\n",
   5095 			    device_xname(sc->sc_dev), error));
   5096 			break;
   5097 		}
   5098 
   5099 		segs_needed = dmamap->dm_nsegs;
   5100 
   5101 		/*
   5102 		 * Ensure we have enough descriptors free to describe
   5103 		 * the packet.  Note, we always reserve one descriptor
   5104 		 * at the end of the ring due to the semantics of the
   5105 		 * TDT register, plus one more in the event we need
   5106 		 * to load offload context.
   5107 		 */
   5108 		if (segs_needed > sc->sc_txfree - 2) {
   5109 			/*
   5110 			 * Not enough free descriptors to transmit this
   5111 			 * packet.  We haven't committed anything yet,
   5112 			 * so just unload the DMA map, put the packet
   5113 			 * pack on the queue, and punt.  Notify the upper
   5114 			 * layer that there are no more slots left.
   5115 			 */
   5116 			DPRINTF(WM_DEBUG_TX,
   5117 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   5118 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   5119 			    segs_needed, sc->sc_txfree - 1));
   5120 			ifp->if_flags |= IFF_OACTIVE;
   5121 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   5122 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   5123 			break;
   5124 		}
   5125 
   5126 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   5127 
   5128 		DPRINTF(WM_DEBUG_TX,
   5129 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   5130 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   5131 
   5132 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   5133 
   5134 		/*
   5135 		 * Store a pointer to the packet so that we can free it
   5136 		 * later.
   5137 		 *
   5138 		 * Initially, we consider the number of descriptors the
   5139 		 * packet uses the number of DMA segments.  This may be
   5140 		 * incremented by 1 if we do checksum offload (a descriptor
   5141 		 * is used to set the checksum context).
   5142 		 */
   5143 		txs->txs_mbuf = m0;
   5144 		txs->txs_firstdesc = sc->sc_txnext;
   5145 		txs->txs_ndesc = segs_needed;
   5146 
   5147 		/* Set up offload parameters for this packet. */
   5148 		uint32_t cmdlen, fields, dcmdlen;
   5149 		if (m0->m_pkthdr.csum_flags &
   5150 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   5151 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   5152 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   5153 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   5154 			    &do_csum) != 0) {
   5155 				/* Error message already displayed. */
   5156 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   5157 				continue;
   5158 			}
   5159 		} else {
   5160 			do_csum = false;
   5161 			cmdlen = 0;
   5162 			fields = 0;
   5163 		}
   5164 
   5165 		/* Sync the DMA map. */
   5166 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   5167 		    BUS_DMASYNC_PREWRITE);
   5168 
   5169 		/* Initialize the first transmit descriptor. */
   5170 		nexttx = sc->sc_txnext;
   5171 		if (!do_csum) {
   5172 			/* setup a legacy descriptor */
   5173 			wm_set_dma_addr(
   5174 			    &sc->sc_txdescs[nexttx].wtx_addr,
   5175 			    dmamap->dm_segs[0].ds_addr);
   5176 			sc->sc_txdescs[nexttx].wtx_cmdlen =
   5177 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   5178 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
   5179 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
   5180 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   5181 			    NULL) {
   5182 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
   5183 				    htole32(WTX_CMD_VLE);
   5184 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
   5185 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   5186 			} else {
   5187 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   5188 			}
   5189 			dcmdlen = 0;
   5190 		} else {
   5191 			/* setup an advanced data descriptor */
   5192 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   5193 			    htole64(dmamap->dm_segs[0].ds_addr);
   5194 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   5195 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   5196 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   5197 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
   5198 			    htole32(fields);
   5199 			DPRINTF(WM_DEBUG_TX,
   5200 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   5201 			    device_xname(sc->sc_dev), nexttx,
   5202 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   5203 			DPRINTF(WM_DEBUG_TX,
   5204 			    ("\t 0x%08x%08x\n", fields,
   5205 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   5206 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   5207 		}
   5208 
   5209 		lasttx = nexttx;
   5210 		nexttx = WM_NEXTTX(sc, nexttx);
   5211 		/*
   5212 		 * fill in the next descriptors. legacy or adcanced format
   5213 		 * is the same here
   5214 		 */
   5215 		for (seg = 1; seg < dmamap->dm_nsegs;
   5216 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
   5217 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   5218 			    htole64(dmamap->dm_segs[seg].ds_addr);
   5219 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   5220 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   5221 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   5222 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
   5223 			lasttx = nexttx;
   5224 
   5225 			DPRINTF(WM_DEBUG_TX,
   5226 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   5227 			     "len %#04zx\n",
   5228 			    device_xname(sc->sc_dev), nexttx,
   5229 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   5230 			    dmamap->dm_segs[seg].ds_len));
   5231 		}
   5232 
   5233 		KASSERT(lasttx != -1);
   5234 
   5235 		/*
   5236 		 * Set up the command byte on the last descriptor of
   5237 		 * the packet.  If we're in the interrupt delay window,
   5238 		 * delay the interrupt.
   5239 		 */
   5240 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   5241 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   5242 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   5243 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   5244 
   5245 		txs->txs_lastdesc = lasttx;
   5246 
   5247 		DPRINTF(WM_DEBUG_TX,
   5248 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   5249 		    device_xname(sc->sc_dev),
   5250 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   5251 
   5252 		/* Sync the descriptors we're using. */
   5253 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   5254 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5255 
   5256 		/* Give the packet to the chip. */
   5257 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   5258 		sent = true;
   5259 
   5260 		DPRINTF(WM_DEBUG_TX,
   5261 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   5262 
   5263 		DPRINTF(WM_DEBUG_TX,
   5264 		    ("%s: TX: finished transmitting packet, job %d\n",
   5265 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   5266 
   5267 		/* Advance the tx pointer. */
   5268 		sc->sc_txfree -= txs->txs_ndesc;
   5269 		sc->sc_txnext = nexttx;
   5270 
   5271 		sc->sc_txsfree--;
   5272 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   5273 
   5274 		/* Pass the packet to any BPF listeners. */
   5275 		bpf_mtap(ifp, m0);
   5276 	}
   5277 
   5278 	if (m0 != NULL) {
   5279 		ifp->if_flags |= IFF_OACTIVE;
   5280 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5281 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   5282 		m_freem(m0);
   5283 	}
   5284 
   5285 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   5286 		/* No more slots; notify upper layer. */
   5287 		ifp->if_flags |= IFF_OACTIVE;
   5288 	}
   5289 
   5290 	if (sent) {
   5291 		/* Set a watchdog timer in case the chip flakes out. */
   5292 		ifp->if_timer = 5;
   5293 	}
   5294 }
   5295 
   5296 /* Interrupt */
   5297 
   5298 /*
   5299  * wm_txintr:
   5300  *
   5301  *	Helper; handle transmit interrupts.
   5302  */
   5303 static void
   5304 wm_txintr(struct wm_softc *sc)
   5305 {
   5306 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5307 	struct wm_txsoft *txs;
   5308 	uint8_t status;
   5309 	int i;
   5310 
   5311 	if (sc->sc_stopping)
   5312 		return;
   5313 
   5314 	ifp->if_flags &= ~IFF_OACTIVE;
   5315 
   5316 	/*
   5317 	 * Go through the Tx list and free mbufs for those
   5318 	 * frames which have been transmitted.
   5319 	 */
   5320 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
   5321 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
   5322 		txs = &sc->sc_txsoft[i];
   5323 
   5324 		DPRINTF(WM_DEBUG_TX,
   5325 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   5326 
   5327 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
   5328 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   5329 
   5330 		status =
   5331 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   5332 		if ((status & WTX_ST_DD) == 0) {
   5333 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   5334 			    BUS_DMASYNC_PREREAD);
   5335 			break;
   5336 		}
   5337 
   5338 		DPRINTF(WM_DEBUG_TX,
   5339 		    ("%s: TX: job %d done: descs %d..%d\n",
   5340 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   5341 		    txs->txs_lastdesc));
   5342 
   5343 		/*
   5344 		 * XXX We should probably be using the statistics
   5345 		 * XXX registers, but I don't know if they exist
   5346 		 * XXX on chips before the i82544.
   5347 		 */
   5348 
   5349 #ifdef WM_EVENT_COUNTERS
   5350 		if (status & WTX_ST_TU)
   5351 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   5352 #endif /* WM_EVENT_COUNTERS */
   5353 
   5354 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   5355 			ifp->if_oerrors++;
   5356 			if (status & WTX_ST_LC)
   5357 				log(LOG_WARNING, "%s: late collision\n",
   5358 				    device_xname(sc->sc_dev));
   5359 			else if (status & WTX_ST_EC) {
   5360 				ifp->if_collisions += 16;
   5361 				log(LOG_WARNING, "%s: excessive collisions\n",
   5362 				    device_xname(sc->sc_dev));
   5363 			}
   5364 		} else
   5365 			ifp->if_opackets++;
   5366 
   5367 		sc->sc_txfree += txs->txs_ndesc;
   5368 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   5369 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   5370 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   5371 		m_freem(txs->txs_mbuf);
   5372 		txs->txs_mbuf = NULL;
   5373 	}
   5374 
   5375 	/* Update the dirty transmit buffer pointer. */
   5376 	sc->sc_txsdirty = i;
   5377 	DPRINTF(WM_DEBUG_TX,
   5378 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   5379 
   5380 	/*
   5381 	 * If there are no more pending transmissions, cancel the watchdog
   5382 	 * timer.
   5383 	 */
   5384 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
   5385 		ifp->if_timer = 0;
   5386 }
   5387 
   5388 /*
   5389  * wm_rxintr:
   5390  *
   5391  *	Helper; handle receive interrupts.
   5392  */
   5393 static void
   5394 wm_rxintr(struct wm_softc *sc)
   5395 {
   5396 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5397 	struct wm_rxsoft *rxs;
   5398 	struct mbuf *m;
   5399 	int i, len;
   5400 	uint8_t status, errors;
   5401 	uint16_t vlantag;
   5402 
   5403 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   5404 		rxs = &sc->sc_rxsoft[i];
   5405 
   5406 		DPRINTF(WM_DEBUG_RX,
   5407 		    ("%s: RX: checking descriptor %d\n",
   5408 		    device_xname(sc->sc_dev), i));
   5409 
   5410 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   5411 
   5412 		status = sc->sc_rxdescs[i].wrx_status;
   5413 		errors = sc->sc_rxdescs[i].wrx_errors;
   5414 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   5415 		vlantag = sc->sc_rxdescs[i].wrx_special;
   5416 
   5417 		if ((status & WRX_ST_DD) == 0) {
   5418 			/* We have processed all of the receive descriptors. */
   5419 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   5420 			break;
   5421 		}
   5422 
   5423 		if (__predict_false(sc->sc_rxdiscard)) {
   5424 			DPRINTF(WM_DEBUG_RX,
   5425 			    ("%s: RX: discarding contents of descriptor %d\n",
   5426 			    device_xname(sc->sc_dev), i));
   5427 			WM_INIT_RXDESC(sc, i);
   5428 			if (status & WRX_ST_EOP) {
   5429 				/* Reset our state. */
   5430 				DPRINTF(WM_DEBUG_RX,
   5431 				    ("%s: RX: resetting rxdiscard -> 0\n",
   5432 				    device_xname(sc->sc_dev)));
   5433 				sc->sc_rxdiscard = 0;
   5434 			}
   5435 			continue;
   5436 		}
   5437 
   5438 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5439 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   5440 
   5441 		m = rxs->rxs_mbuf;
   5442 
   5443 		/*
   5444 		 * Add a new receive buffer to the ring, unless of
   5445 		 * course the length is zero. Treat the latter as a
   5446 		 * failed mapping.
   5447 		 */
   5448 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
   5449 			/*
   5450 			 * Failed, throw away what we've done so
   5451 			 * far, and discard the rest of the packet.
   5452 			 */
   5453 			ifp->if_ierrors++;
   5454 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5455 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5456 			WM_INIT_RXDESC(sc, i);
   5457 			if ((status & WRX_ST_EOP) == 0)
   5458 				sc->sc_rxdiscard = 1;
   5459 			if (sc->sc_rxhead != NULL)
   5460 				m_freem(sc->sc_rxhead);
   5461 			WM_RXCHAIN_RESET(sc);
   5462 			DPRINTF(WM_DEBUG_RX,
   5463 			    ("%s: RX: Rx buffer allocation failed, "
   5464 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   5465 			    sc->sc_rxdiscard ? " (discard)" : ""));
   5466 			continue;
   5467 		}
   5468 
   5469 		m->m_len = len;
   5470 		sc->sc_rxlen += len;
   5471 		DPRINTF(WM_DEBUG_RX,
   5472 		    ("%s: RX: buffer at %p len %d\n",
   5473 		    device_xname(sc->sc_dev), m->m_data, len));
   5474 
   5475 		/* If this is not the end of the packet, keep looking. */
   5476 		if ((status & WRX_ST_EOP) == 0) {
   5477 			WM_RXCHAIN_LINK(sc, m);
   5478 			DPRINTF(WM_DEBUG_RX,
   5479 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   5480 			    device_xname(sc->sc_dev), sc->sc_rxlen));
   5481 			continue;
   5482 		}
   5483 
   5484 		/*
   5485 		 * Okay, we have the entire packet now.  The chip is
   5486 		 * configured to include the FCS except I350 and I21[01]
   5487 		 * (not all chips can be configured to strip it),
   5488 		 * so we need to trim it.
   5489 		 * May need to adjust length of previous mbuf in the
   5490 		 * chain if the current mbuf is too short.
   5491 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   5492 		 * is always set in I350, so we don't trim it.
   5493 		 */
   5494 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   5495 		    && (sc->sc_type != WM_T_I210)
   5496 		    && (sc->sc_type != WM_T_I211)) {
   5497 			if (m->m_len < ETHER_CRC_LEN) {
   5498 				sc->sc_rxtail->m_len
   5499 				    -= (ETHER_CRC_LEN - m->m_len);
   5500 				m->m_len = 0;
   5501 			} else
   5502 				m->m_len -= ETHER_CRC_LEN;
   5503 			len = sc->sc_rxlen - ETHER_CRC_LEN;
   5504 		} else
   5505 			len = sc->sc_rxlen;
   5506 
   5507 		WM_RXCHAIN_LINK(sc, m);
   5508 
   5509 		*sc->sc_rxtailp = NULL;
   5510 		m = sc->sc_rxhead;
   5511 
   5512 		WM_RXCHAIN_RESET(sc);
   5513 
   5514 		DPRINTF(WM_DEBUG_RX,
   5515 		    ("%s: RX: have entire packet, len -> %d\n",
   5516 		    device_xname(sc->sc_dev), len));
   5517 
   5518 		/* If an error occurred, update stats and drop the packet. */
   5519 		if (errors &
   5520 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   5521 			if (errors & WRX_ER_SE)
   5522 				log(LOG_WARNING, "%s: symbol error\n",
   5523 				    device_xname(sc->sc_dev));
   5524 			else if (errors & WRX_ER_SEQ)
   5525 				log(LOG_WARNING, "%s: receive sequence error\n",
   5526 				    device_xname(sc->sc_dev));
   5527 			else if (errors & WRX_ER_CE)
   5528 				log(LOG_WARNING, "%s: CRC error\n",
   5529 				    device_xname(sc->sc_dev));
   5530 			m_freem(m);
   5531 			continue;
   5532 		}
   5533 
   5534 		/* No errors.  Receive the packet. */
   5535 		m->m_pkthdr.rcvif = ifp;
   5536 		m->m_pkthdr.len = len;
   5537 
   5538 		/*
   5539 		 * If VLANs are enabled, VLAN packets have been unwrapped
   5540 		 * for us.  Associate the tag with the packet.
   5541 		 */
   5542 		/* XXXX should check for i350 and i354 */
   5543 		if ((status & WRX_ST_VP) != 0) {
   5544 			VLAN_INPUT_TAG(ifp, m,
   5545 			    le16toh(vlantag),
   5546 			    continue);
   5547 		}
   5548 
   5549 		/* Set up checksum info for this packet. */
   5550 		if ((status & WRX_ST_IXSM) == 0) {
   5551 			if (status & WRX_ST_IPCS) {
   5552 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   5553 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   5554 				if (errors & WRX_ER_IPE)
   5555 					m->m_pkthdr.csum_flags |=
   5556 					    M_CSUM_IPv4_BAD;
   5557 			}
   5558 			if (status & WRX_ST_TCPCS) {
   5559 				/*
   5560 				 * Note: we don't know if this was TCP or UDP,
   5561 				 * so we just set both bits, and expect the
   5562 				 * upper layers to deal.
   5563 				 */
   5564 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   5565 				m->m_pkthdr.csum_flags |=
   5566 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   5567 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   5568 				if (errors & WRX_ER_TCPE)
   5569 					m->m_pkthdr.csum_flags |=
   5570 					    M_CSUM_TCP_UDP_BAD;
   5571 			}
   5572 		}
   5573 
   5574 		ifp->if_ipackets++;
   5575 
   5576 		WM_RX_UNLOCK(sc);
   5577 
   5578 		/* Pass this up to any BPF listeners. */
   5579 		bpf_mtap(ifp, m);
   5580 
   5581 		/* Pass it on. */
   5582 		(*ifp->if_input)(ifp, m);
   5583 
   5584 		WM_RX_LOCK(sc);
   5585 
   5586 		if (sc->sc_stopping)
   5587 			break;
   5588 	}
   5589 
   5590 	/* Update the receive pointer. */
   5591 	sc->sc_rxptr = i;
   5592 
   5593 	DPRINTF(WM_DEBUG_RX,
   5594 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   5595 }
   5596 
   5597 /*
   5598  * wm_linkintr_gmii:
   5599  *
   5600  *	Helper; handle link interrupts for GMII.
   5601  */
   5602 static void
   5603 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   5604 {
   5605 
   5606 	KASSERT(WM_TX_LOCKED(sc));
   5607 
   5608 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   5609 		__func__));
   5610 
   5611 	if (icr & ICR_LSC) {
   5612 		DPRINTF(WM_DEBUG_LINK,
   5613 		    ("%s: LINK: LSC -> mii_pollstat\n",
   5614 			device_xname(sc->sc_dev)));
   5615 		mii_pollstat(&sc->sc_mii);
   5616 		if (sc->sc_type == WM_T_82543) {
   5617 			int miistatus, active;
   5618 
   5619 			/*
   5620 			 * With 82543, we need to force speed and
   5621 			 * duplex on the MAC equal to what the PHY
   5622 			 * speed and duplex configuration is.
   5623 			 */
   5624 			miistatus = sc->sc_mii.mii_media_status;
   5625 
   5626 			if (miistatus & IFM_ACTIVE) {
   5627 				active = sc->sc_mii.mii_media_active;
   5628 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   5629 				switch (IFM_SUBTYPE(active)) {
   5630 				case IFM_10_T:
   5631 					sc->sc_ctrl |= CTRL_SPEED_10;
   5632 					break;
   5633 				case IFM_100_TX:
   5634 					sc->sc_ctrl |= CTRL_SPEED_100;
   5635 					break;
   5636 				case IFM_1000_T:
   5637 					sc->sc_ctrl |= CTRL_SPEED_1000;
   5638 					break;
   5639 				default:
   5640 					/*
   5641 					 * fiber?
   5642 					 * Shoud not enter here.
   5643 					 */
   5644 					printf("unknown media (%x)\n",
   5645 					    active);
   5646 					break;
   5647 				}
   5648 				if (active & IFM_FDX)
   5649 					sc->sc_ctrl |= CTRL_FD;
   5650 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5651 			}
   5652 		} else if ((sc->sc_type == WM_T_ICH8)
   5653 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   5654 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   5655 		} else if (sc->sc_type == WM_T_PCH) {
   5656 			wm_k1_gig_workaround_hv(sc,
   5657 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   5658 		}
   5659 
   5660 		if ((sc->sc_phytype == WMPHY_82578)
   5661 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   5662 			== IFM_1000_T)) {
   5663 
   5664 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   5665 				delay(200*1000); /* XXX too big */
   5666 
   5667 				/* Link stall fix for link up */
   5668 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   5669 				    HV_MUX_DATA_CTRL,
   5670 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   5671 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   5672 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   5673 				    HV_MUX_DATA_CTRL,
   5674 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   5675 			}
   5676 		}
   5677 	} else if (icr & ICR_RXSEQ) {
   5678 		DPRINTF(WM_DEBUG_LINK,
   5679 		    ("%s: LINK Receive sequence error\n",
   5680 			device_xname(sc->sc_dev)));
   5681 	}
   5682 }
   5683 
   5684 /*
   5685  * wm_linkintr_tbi:
   5686  *
   5687  *	Helper; handle link interrupts for TBI mode.
   5688  */
   5689 static void
   5690 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   5691 {
   5692 	uint32_t status;
   5693 
   5694 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   5695 		__func__));
   5696 
   5697 	status = CSR_READ(sc, WMREG_STATUS);
   5698 	if (icr & ICR_LSC) {
   5699 		if (status & STATUS_LU) {
   5700 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   5701 			    device_xname(sc->sc_dev),
   5702 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   5703 			/*
   5704 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   5705 			 * so we should update sc->sc_ctrl
   5706 			 */
   5707 
   5708 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5709 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   5710 			sc->sc_fcrtl &= ~FCRTL_XONE;
   5711 			if (status & STATUS_FD)
   5712 				sc->sc_tctl |=
   5713 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5714 			else
   5715 				sc->sc_tctl |=
   5716 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   5717 			if (sc->sc_ctrl & CTRL_TFCE)
   5718 				sc->sc_fcrtl |= FCRTL_XONE;
   5719 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5720 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   5721 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   5722 				      sc->sc_fcrtl);
   5723 			sc->sc_tbi_linkup = 1;
   5724 		} else {
   5725 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   5726 			    device_xname(sc->sc_dev)));
   5727 			sc->sc_tbi_linkup = 0;
   5728 		}
   5729 		wm_tbi_set_linkled(sc);
   5730 	} else if (icr & ICR_RXSEQ) {
   5731 		DPRINTF(WM_DEBUG_LINK,
   5732 		    ("%s: LINK: Receive sequence error\n",
   5733 		    device_xname(sc->sc_dev)));
   5734 	}
   5735 }
   5736 
   5737 /*
   5738  * wm_linkintr:
   5739  *
   5740  *	Helper; handle link interrupts.
   5741  */
   5742 static void
   5743 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   5744 {
   5745 
   5746 	if (sc->sc_flags & WM_F_HAS_MII)
   5747 		wm_linkintr_gmii(sc, icr);
   5748 	else
   5749 		wm_linkintr_tbi(sc, icr);
   5750 }
   5751 
   5752 /*
   5753  * wm_intr:
   5754  *
   5755  *	Interrupt service routine.
   5756  */
   5757 static int
   5758 wm_intr(void *arg)
   5759 {
   5760 	struct wm_softc *sc = arg;
   5761 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5762 	uint32_t icr;
   5763 	int handled = 0;
   5764 
   5765 	while (1 /* CONSTCOND */) {
   5766 		icr = CSR_READ(sc, WMREG_ICR);
   5767 		if ((icr & sc->sc_icr) == 0)
   5768 			break;
   5769 		rnd_add_uint32(&sc->rnd_source, icr);
   5770 
   5771 		WM_RX_LOCK(sc);
   5772 
   5773 		if (sc->sc_stopping) {
   5774 			WM_RX_UNLOCK(sc);
   5775 			break;
   5776 		}
   5777 
   5778 		handled = 1;
   5779 
   5780 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   5781 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   5782 			DPRINTF(WM_DEBUG_RX,
   5783 			    ("%s: RX: got Rx intr 0x%08x\n",
   5784 			    device_xname(sc->sc_dev),
   5785 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   5786 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   5787 		}
   5788 #endif
   5789 		wm_rxintr(sc);
   5790 
   5791 		WM_RX_UNLOCK(sc);
   5792 		WM_TX_LOCK(sc);
   5793 
   5794 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   5795 		if (icr & ICR_TXDW) {
   5796 			DPRINTF(WM_DEBUG_TX,
   5797 			    ("%s: TX: got TXDW interrupt\n",
   5798 			    device_xname(sc->sc_dev)));
   5799 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   5800 		}
   5801 #endif
   5802 		wm_txintr(sc);
   5803 
   5804 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
   5805 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   5806 			wm_linkintr(sc, icr);
   5807 		}
   5808 
   5809 		WM_TX_UNLOCK(sc);
   5810 
   5811 		if (icr & ICR_RXO) {
   5812 #if defined(WM_DEBUG)
   5813 			log(LOG_WARNING, "%s: Receive overrun\n",
   5814 			    device_xname(sc->sc_dev));
   5815 #endif /* defined(WM_DEBUG) */
   5816 		}
   5817 	}
   5818 
   5819 	if (handled) {
   5820 		/* Try to get more packets going. */
   5821 		ifp->if_start(ifp);
   5822 	}
   5823 
   5824 	return handled;
   5825 }
   5826 
   5827 /*
   5828  * Media related.
   5829  * GMII, SGMII, TBI (and SERDES)
   5830  */
   5831 
   5832 /* GMII related */
   5833 
   5834 /*
   5835  * wm_gmii_reset:
   5836  *
   5837  *	Reset the PHY.
   5838  */
   5839 static void
   5840 wm_gmii_reset(struct wm_softc *sc)
   5841 {
   5842 	uint32_t reg;
   5843 	int rv;
   5844 
   5845 	/* get phy semaphore */
   5846 	switch (sc->sc_type) {
   5847 	case WM_T_82571:
   5848 	case WM_T_82572:
   5849 	case WM_T_82573:
   5850 	case WM_T_82574:
   5851 	case WM_T_82583:
   5852 		 /* XXX should get sw semaphore, too */
   5853 		rv = wm_get_swsm_semaphore(sc);
   5854 		break;
   5855 	case WM_T_82575:
   5856 	case WM_T_82576:
   5857 	case WM_T_82580:
   5858 	case WM_T_82580ER:
   5859 	case WM_T_I350:
   5860 	case WM_T_I354:
   5861 	case WM_T_I210:
   5862 	case WM_T_I211:
   5863 	case WM_T_80003:
   5864 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   5865 		break;
   5866 	case WM_T_ICH8:
   5867 	case WM_T_ICH9:
   5868 	case WM_T_ICH10:
   5869 	case WM_T_PCH:
   5870 	case WM_T_PCH2:
   5871 	case WM_T_PCH_LPT:
   5872 		rv = wm_get_swfwhw_semaphore(sc);
   5873 		break;
   5874 	default:
   5875 		/* nothing to do*/
   5876 		rv = 0;
   5877 		break;
   5878 	}
   5879 	if (rv != 0) {
   5880 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   5881 		    __func__);
   5882 		return;
   5883 	}
   5884 
   5885 	switch (sc->sc_type) {
   5886 	case WM_T_82542_2_0:
   5887 	case WM_T_82542_2_1:
   5888 		/* null */
   5889 		break;
   5890 	case WM_T_82543:
   5891 		/*
   5892 		 * With 82543, we need to force speed and duplex on the MAC
   5893 		 * equal to what the PHY speed and duplex configuration is.
   5894 		 * In addition, we need to perform a hardware reset on the PHY
   5895 		 * to take it out of reset.
   5896 		 */
   5897 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   5898 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5899 
   5900 		/* The PHY reset pin is active-low. */
   5901 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5902 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   5903 		    CTRL_EXT_SWDPIN(4));
   5904 		reg |= CTRL_EXT_SWDPIO(4);
   5905 
   5906 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5907 		CSR_WRITE_FLUSH(sc);
   5908 		delay(10*1000);
   5909 
   5910 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   5911 		CSR_WRITE_FLUSH(sc);
   5912 		delay(150);
   5913 #if 0
   5914 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   5915 #endif
   5916 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   5917 		break;
   5918 	case WM_T_82544:	/* reset 10000us */
   5919 	case WM_T_82540:
   5920 	case WM_T_82545:
   5921 	case WM_T_82545_3:
   5922 	case WM_T_82546:
   5923 	case WM_T_82546_3:
   5924 	case WM_T_82541:
   5925 	case WM_T_82541_2:
   5926 	case WM_T_82547:
   5927 	case WM_T_82547_2:
   5928 	case WM_T_82571:	/* reset 100us */
   5929 	case WM_T_82572:
   5930 	case WM_T_82573:
   5931 	case WM_T_82574:
   5932 	case WM_T_82575:
   5933 	case WM_T_82576:
   5934 	case WM_T_82580:
   5935 	case WM_T_82580ER:
   5936 	case WM_T_I350:
   5937 	case WM_T_I354:
   5938 	case WM_T_I210:
   5939 	case WM_T_I211:
   5940 	case WM_T_82583:
   5941 	case WM_T_80003:
   5942 		/* generic reset */
   5943 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   5944 		CSR_WRITE_FLUSH(sc);
   5945 		delay(20000);
   5946 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5947 		CSR_WRITE_FLUSH(sc);
   5948 		delay(20000);
   5949 
   5950 		if ((sc->sc_type == WM_T_82541)
   5951 		    || (sc->sc_type == WM_T_82541_2)
   5952 		    || (sc->sc_type == WM_T_82547)
   5953 		    || (sc->sc_type == WM_T_82547_2)) {
   5954 			/* workaround for igp are done in igp_reset() */
   5955 			/* XXX add code to set LED after phy reset */
   5956 		}
   5957 		break;
   5958 	case WM_T_ICH8:
   5959 	case WM_T_ICH9:
   5960 	case WM_T_ICH10:
   5961 	case WM_T_PCH:
   5962 	case WM_T_PCH2:
   5963 	case WM_T_PCH_LPT:
   5964 		/* generic reset */
   5965 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   5966 		CSR_WRITE_FLUSH(sc);
   5967 		delay(100);
   5968 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5969 		CSR_WRITE_FLUSH(sc);
   5970 		delay(150);
   5971 		break;
   5972 	default:
   5973 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   5974 		    __func__);
   5975 		break;
   5976 	}
   5977 
   5978 	/* release PHY semaphore */
   5979 	switch (sc->sc_type) {
   5980 	case WM_T_82571:
   5981 	case WM_T_82572:
   5982 	case WM_T_82573:
   5983 	case WM_T_82574:
   5984 	case WM_T_82583:
   5985 		 /* XXX should put sw semaphore, too */
   5986 		wm_put_swsm_semaphore(sc);
   5987 		break;
   5988 	case WM_T_82575:
   5989 	case WM_T_82576:
   5990 	case WM_T_82580:
   5991 	case WM_T_82580ER:
   5992 	case WM_T_I350:
   5993 	case WM_T_I354:
   5994 	case WM_T_I210:
   5995 	case WM_T_I211:
   5996 	case WM_T_80003:
   5997 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   5998 		break;
   5999 	case WM_T_ICH8:
   6000 	case WM_T_ICH9:
   6001 	case WM_T_ICH10:
   6002 	case WM_T_PCH:
   6003 	case WM_T_PCH2:
   6004 	case WM_T_PCH_LPT:
   6005 		wm_put_swfwhw_semaphore(sc);
   6006 		break;
   6007 	default:
   6008 		/* nothing to do*/
   6009 		rv = 0;
   6010 		break;
   6011 	}
   6012 
   6013 	/* get_cfg_done */
   6014 	wm_get_cfg_done(sc);
   6015 
   6016 	/* extra setup */
   6017 	switch (sc->sc_type) {
   6018 	case WM_T_82542_2_0:
   6019 	case WM_T_82542_2_1:
   6020 	case WM_T_82543:
   6021 	case WM_T_82544:
   6022 	case WM_T_82540:
   6023 	case WM_T_82545:
   6024 	case WM_T_82545_3:
   6025 	case WM_T_82546:
   6026 	case WM_T_82546_3:
   6027 	case WM_T_82541_2:
   6028 	case WM_T_82547_2:
   6029 	case WM_T_82571:
   6030 	case WM_T_82572:
   6031 	case WM_T_82573:
   6032 	case WM_T_82574:
   6033 	case WM_T_82575:
   6034 	case WM_T_82576:
   6035 	case WM_T_82580:
   6036 	case WM_T_82580ER:
   6037 	case WM_T_I350:
   6038 	case WM_T_I354:
   6039 	case WM_T_I210:
   6040 	case WM_T_I211:
   6041 	case WM_T_82583:
   6042 	case WM_T_80003:
   6043 		/* null */
   6044 		break;
   6045 	case WM_T_82541:
   6046 	case WM_T_82547:
   6047 		/* XXX Configure actively LED after PHY reset */
   6048 		break;
   6049 	case WM_T_ICH8:
   6050 	case WM_T_ICH9:
   6051 	case WM_T_ICH10:
   6052 	case WM_T_PCH:
   6053 	case WM_T_PCH2:
   6054 	case WM_T_PCH_LPT:
   6055 		/* Allow time for h/w to get to a quiescent state afer reset */
   6056 		delay(10*1000);
   6057 
   6058 		if (sc->sc_type == WM_T_PCH)
   6059 			wm_hv_phy_workaround_ich8lan(sc);
   6060 
   6061 		if (sc->sc_type == WM_T_PCH2)
   6062 			wm_lv_phy_workaround_ich8lan(sc);
   6063 
   6064 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   6065 			/*
   6066 			 * dummy read to clear the phy wakeup bit after lcd
   6067 			 * reset
   6068 			 */
   6069 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   6070 		}
   6071 
   6072 		/*
   6073 		 * XXX Configure the LCD with th extended configuration region
   6074 		 * in NVM
   6075 		 */
   6076 
   6077 		/* Configure the LCD with the OEM bits in NVM */
   6078 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   6079 		    || (sc->sc_type == WM_T_PCH_LPT)) {
   6080 			/*
   6081 			 * Disable LPLU.
   6082 			 * XXX It seems that 82567 has LPLU, too.
   6083 			 */
   6084 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   6085 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
   6086 			reg |= HV_OEM_BITS_ANEGNOW;
   6087 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   6088 		}
   6089 		break;
   6090 	default:
   6091 		panic("%s: unknown type\n", __func__);
   6092 		break;
   6093 	}
   6094 }
   6095 
   6096 /*
   6097  * wm_get_phy_id_82575:
   6098  *
   6099  * Return PHY ID. Return -1 if it failed.
   6100  */
   6101 static int
   6102 wm_get_phy_id_82575(struct wm_softc *sc)
   6103 {
   6104 	uint32_t reg;
   6105 	int phyid = -1;
   6106 
   6107 	/* XXX */
   6108 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   6109 		return -1;
   6110 
   6111 	if (wm_sgmii_uses_mdio(sc)) {
   6112 		switch (sc->sc_type) {
   6113 		case WM_T_82575:
   6114 		case WM_T_82576:
   6115 			reg = CSR_READ(sc, WMREG_MDIC);
   6116 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   6117 			break;
   6118 		case WM_T_82580:
   6119 		case WM_T_I350:
   6120 		case WM_T_I354:
   6121 		case WM_T_I210:
   6122 		case WM_T_I211:
   6123 			reg = CSR_READ(sc, WMREG_MDICNFG);
   6124 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   6125 			break;
   6126 		default:
   6127 			return -1;
   6128 		}
   6129 	}
   6130 
   6131 	return phyid;
   6132 }
   6133 
   6134 
   6135 /*
   6136  * wm_gmii_mediainit:
   6137  *
   6138  *	Initialize media for use on 1000BASE-T devices.
   6139  */
   6140 static void
   6141 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   6142 {
   6143 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6144 	struct mii_data *mii = &sc->sc_mii;
   6145 	uint32_t reg;
   6146 
   6147 	/* We have GMII. */
   6148 	sc->sc_flags |= WM_F_HAS_MII;
   6149 
   6150 	if (sc->sc_type == WM_T_80003)
   6151 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   6152 	else
   6153 		sc->sc_tipg = TIPG_1000T_DFLT;
   6154 
   6155 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   6156 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   6157 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   6158 	    || (sc->sc_type == WM_T_I211)) {
   6159 		reg = CSR_READ(sc, WMREG_PHPM);
   6160 		reg &= ~PHPM_GO_LINK_D;
   6161 		CSR_WRITE(sc, WMREG_PHPM, reg);
   6162 	}
   6163 
   6164 	/*
   6165 	 * Let the chip set speed/duplex on its own based on
   6166 	 * signals from the PHY.
   6167 	 * XXXbouyer - I'm not sure this is right for the 80003,
   6168 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   6169 	 */
   6170 	sc->sc_ctrl |= CTRL_SLU;
   6171 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6172 
   6173 	/* Initialize our media structures and probe the GMII. */
   6174 	mii->mii_ifp = ifp;
   6175 
   6176 	/*
   6177 	 * Determine the PHY access method.
   6178 	 *
   6179 	 *  For SGMII, use SGMII specific method.
   6180 	 *
   6181 	 *  For some devices, we can determine the PHY access method
   6182 	 * from sc_type.
   6183 	 *
   6184 	 *  For ICH8 variants, it's difficult to detemine the PHY access
   6185 	 * method by sc_type, so use the PCI product ID for some devices.
   6186 	 * For other ICH8 variants, try to use igp's method. If the PHY
   6187 	 * can't detect, then use bm's method.
   6188 	 */
   6189 	switch (prodid) {
   6190 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   6191 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   6192 		/* 82577 */
   6193 		sc->sc_phytype = WMPHY_82577;
   6194 		mii->mii_readreg = wm_gmii_hv_readreg;
   6195 		mii->mii_writereg = wm_gmii_hv_writereg;
   6196 		break;
   6197 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   6198 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   6199 		/* 82578 */
   6200 		sc->sc_phytype = WMPHY_82578;
   6201 		mii->mii_readreg = wm_gmii_hv_readreg;
   6202 		mii->mii_writereg = wm_gmii_hv_writereg;
   6203 		break;
   6204 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   6205 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   6206 		/* 82579 */
   6207 		sc->sc_phytype = WMPHY_82579;
   6208 		mii->mii_readreg = wm_gmii_hv_readreg;
   6209 		mii->mii_writereg = wm_gmii_hv_writereg;
   6210 		break;
   6211 	case PCI_PRODUCT_INTEL_I217_LM:
   6212 	case PCI_PRODUCT_INTEL_I217_V:
   6213 	case PCI_PRODUCT_INTEL_I218_LM:
   6214 	case PCI_PRODUCT_INTEL_I218_V:
   6215 		/* I21[78] */
   6216 		mii->mii_readreg = wm_gmii_hv_readreg;
   6217 		mii->mii_writereg = wm_gmii_hv_writereg;
   6218 		break;
   6219 	case PCI_PRODUCT_INTEL_82801I_BM:
   6220 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   6221 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   6222 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   6223 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   6224 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   6225 		/* 82567 */
   6226 		sc->sc_phytype = WMPHY_BM;
   6227 		mii->mii_readreg = wm_gmii_bm_readreg;
   6228 		mii->mii_writereg = wm_gmii_bm_writereg;
   6229 		break;
   6230 	default:
   6231 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   6232 		    && !wm_sgmii_uses_mdio(sc)){
   6233 			mii->mii_readreg = wm_sgmii_readreg;
   6234 			mii->mii_writereg = wm_sgmii_writereg;
   6235 		} else if (sc->sc_type >= WM_T_80003) {
   6236 			mii->mii_readreg = wm_gmii_i80003_readreg;
   6237 			mii->mii_writereg = wm_gmii_i80003_writereg;
   6238 		} else if (sc->sc_type >= WM_T_I210) {
   6239 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6240 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6241 		} else if (sc->sc_type >= WM_T_82580) {
   6242 			sc->sc_phytype = WMPHY_82580;
   6243 			mii->mii_readreg = wm_gmii_82580_readreg;
   6244 			mii->mii_writereg = wm_gmii_82580_writereg;
   6245 		} else if (sc->sc_type >= WM_T_82544) {
   6246 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6247 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6248 		} else {
   6249 			mii->mii_readreg = wm_gmii_i82543_readreg;
   6250 			mii->mii_writereg = wm_gmii_i82543_writereg;
   6251 		}
   6252 		break;
   6253 	}
   6254 	mii->mii_statchg = wm_gmii_statchg;
   6255 
   6256 	wm_gmii_reset(sc);
   6257 
   6258 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   6259 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   6260 	    wm_gmii_mediastatus);
   6261 
   6262 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   6263 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   6264 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6265 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   6266 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   6267 			/* Attach only one port */
   6268 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   6269 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6270 		} else {
   6271 			int i, id;
   6272 			uint32_t ctrl_ext;
   6273 
   6274 			id = wm_get_phy_id_82575(sc);
   6275 			if (id != -1) {
   6276 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   6277 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   6278 			}
   6279 			if ((id == -1)
   6280 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6281 				/* Power on sgmii phy if it is disabled */
   6282 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   6283 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   6284 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   6285 				CSR_WRITE_FLUSH(sc);
   6286 				delay(300*1000); /* XXX too long */
   6287 
   6288 				/* from 1 to 8 */
   6289 				for (i = 1; i < 8; i++)
   6290 					mii_attach(sc->sc_dev, &sc->sc_mii,
   6291 					    0xffffffff, i, MII_OFFSET_ANY,
   6292 					    MIIF_DOPAUSE);
   6293 
   6294 				/* restore previous sfp cage power state */
   6295 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   6296 			}
   6297 		}
   6298 	} else {
   6299 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6300 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6301 	}
   6302 
   6303 	/*
   6304 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   6305 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   6306 	 */
   6307 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   6308 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6309 		wm_set_mdio_slow_mode_hv(sc);
   6310 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6311 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6312 	}
   6313 
   6314 	/*
   6315 	 * (For ICH8 variants)
   6316 	 * If PHY detection failed, use BM's r/w function and retry.
   6317 	 */
   6318 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6319 		/* if failed, retry with *_bm_* */
   6320 		mii->mii_readreg = wm_gmii_bm_readreg;
   6321 		mii->mii_writereg = wm_gmii_bm_writereg;
   6322 
   6323 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6324 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6325 	}
   6326 
   6327 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6328 		/* Any PHY wasn't find */
   6329 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   6330 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   6331 		sc->sc_phytype = WMPHY_NONE;
   6332 	} else {
   6333 		/*
   6334 		 * PHY Found!
   6335 		 * Check PHY type.
   6336 		 */
   6337 		uint32_t model;
   6338 		struct mii_softc *child;
   6339 
   6340 		child = LIST_FIRST(&mii->mii_phys);
   6341 		if (device_is_a(child->mii_dev, "igphy")) {
   6342 			struct igphy_softc *isc = (struct igphy_softc *)child;
   6343 
   6344 			model = isc->sc_mii.mii_mpd_model;
   6345 			if (model == MII_MODEL_yyINTEL_I82566)
   6346 				sc->sc_phytype = WMPHY_IGP_3;
   6347 		}
   6348 
   6349 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   6350 	}
   6351 }
   6352 
   6353 /*
   6354  * wm_gmii_mediastatus:	[ifmedia interface function]
   6355  *
   6356  *	Get the current interface media status on a 1000BASE-T device.
   6357  */
   6358 static void
   6359 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   6360 {
   6361 	struct wm_softc *sc = ifp->if_softc;
   6362 
   6363 	ether_mediastatus(ifp, ifmr);
   6364 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   6365 	    | sc->sc_flowflags;
   6366 }
   6367 
   6368 /*
   6369  * wm_gmii_mediachange:	[ifmedia interface function]
   6370  *
   6371  *	Set hardware to newly-selected media on a 1000BASE-T device.
   6372  */
   6373 static int
   6374 wm_gmii_mediachange(struct ifnet *ifp)
   6375 {
   6376 	struct wm_softc *sc = ifp->if_softc;
   6377 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6378 	int rc;
   6379 
   6380 	if ((ifp->if_flags & IFF_UP) == 0)
   6381 		return 0;
   6382 
   6383 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   6384 	sc->sc_ctrl |= CTRL_SLU;
   6385 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   6386 	    || (sc->sc_type > WM_T_82543)) {
   6387 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   6388 	} else {
   6389 		sc->sc_ctrl &= ~CTRL_ASDE;
   6390 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   6391 		if (ife->ifm_media & IFM_FDX)
   6392 			sc->sc_ctrl |= CTRL_FD;
   6393 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   6394 		case IFM_10_T:
   6395 			sc->sc_ctrl |= CTRL_SPEED_10;
   6396 			break;
   6397 		case IFM_100_TX:
   6398 			sc->sc_ctrl |= CTRL_SPEED_100;
   6399 			break;
   6400 		case IFM_1000_T:
   6401 			sc->sc_ctrl |= CTRL_SPEED_1000;
   6402 			break;
   6403 		default:
   6404 			panic("wm_gmii_mediachange: bad media 0x%x",
   6405 			    ife->ifm_media);
   6406 		}
   6407 	}
   6408 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6409 	if (sc->sc_type <= WM_T_82543)
   6410 		wm_gmii_reset(sc);
   6411 
   6412 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   6413 		return 0;
   6414 	return rc;
   6415 }
   6416 
   6417 #define	MDI_IO		CTRL_SWDPIN(2)
   6418 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   6419 #define	MDI_CLK		CTRL_SWDPIN(3)
   6420 
   6421 static void
   6422 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   6423 {
   6424 	uint32_t i, v;
   6425 
   6426 	v = CSR_READ(sc, WMREG_CTRL);
   6427 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6428 	v |= MDI_DIR | CTRL_SWDPIO(3);
   6429 
   6430 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   6431 		if (data & i)
   6432 			v |= MDI_IO;
   6433 		else
   6434 			v &= ~MDI_IO;
   6435 		CSR_WRITE(sc, WMREG_CTRL, v);
   6436 		CSR_WRITE_FLUSH(sc);
   6437 		delay(10);
   6438 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6439 		CSR_WRITE_FLUSH(sc);
   6440 		delay(10);
   6441 		CSR_WRITE(sc, WMREG_CTRL, v);
   6442 		CSR_WRITE_FLUSH(sc);
   6443 		delay(10);
   6444 	}
   6445 }
   6446 
   6447 static uint32_t
   6448 wm_i82543_mii_recvbits(struct wm_softc *sc)
   6449 {
   6450 	uint32_t v, i, data = 0;
   6451 
   6452 	v = CSR_READ(sc, WMREG_CTRL);
   6453 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6454 	v |= CTRL_SWDPIO(3);
   6455 
   6456 	CSR_WRITE(sc, WMREG_CTRL, v);
   6457 	CSR_WRITE_FLUSH(sc);
   6458 	delay(10);
   6459 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6460 	CSR_WRITE_FLUSH(sc);
   6461 	delay(10);
   6462 	CSR_WRITE(sc, WMREG_CTRL, v);
   6463 	CSR_WRITE_FLUSH(sc);
   6464 	delay(10);
   6465 
   6466 	for (i = 0; i < 16; i++) {
   6467 		data <<= 1;
   6468 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6469 		CSR_WRITE_FLUSH(sc);
   6470 		delay(10);
   6471 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   6472 			data |= 1;
   6473 		CSR_WRITE(sc, WMREG_CTRL, v);
   6474 		CSR_WRITE_FLUSH(sc);
   6475 		delay(10);
   6476 	}
   6477 
   6478 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6479 	CSR_WRITE_FLUSH(sc);
   6480 	delay(10);
   6481 	CSR_WRITE(sc, WMREG_CTRL, v);
   6482 	CSR_WRITE_FLUSH(sc);
   6483 	delay(10);
   6484 
   6485 	return data;
   6486 }
   6487 
   6488 #undef MDI_IO
   6489 #undef MDI_DIR
   6490 #undef MDI_CLK
   6491 
   6492 /*
   6493  * wm_gmii_i82543_readreg:	[mii interface function]
   6494  *
   6495  *	Read a PHY register on the GMII (i82543 version).
   6496  */
   6497 static int
   6498 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   6499 {
   6500 	struct wm_softc *sc = device_private(self);
   6501 	int rv;
   6502 
   6503 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6504 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   6505 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   6506 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   6507 
   6508 	DPRINTF(WM_DEBUG_GMII,
   6509 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   6510 	    device_xname(sc->sc_dev), phy, reg, rv));
   6511 
   6512 	return rv;
   6513 }
   6514 
   6515 /*
   6516  * wm_gmii_i82543_writereg:	[mii interface function]
   6517  *
   6518  *	Write a PHY register on the GMII (i82543 version).
   6519  */
   6520 static void
   6521 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   6522 {
   6523 	struct wm_softc *sc = device_private(self);
   6524 
   6525 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6526 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   6527 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   6528 	    (MII_COMMAND_START << 30), 32);
   6529 }
   6530 
   6531 /*
   6532  * wm_gmii_i82544_readreg:	[mii interface function]
   6533  *
   6534  *	Read a PHY register on the GMII.
   6535  */
   6536 static int
   6537 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   6538 {
   6539 	struct wm_softc *sc = device_private(self);
   6540 	uint32_t mdic = 0;
   6541 	int i, rv;
   6542 
   6543 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   6544 	    MDIC_REGADD(reg));
   6545 
   6546 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   6547 		mdic = CSR_READ(sc, WMREG_MDIC);
   6548 		if (mdic & MDIC_READY)
   6549 			break;
   6550 		delay(50);
   6551 	}
   6552 
   6553 	if ((mdic & MDIC_READY) == 0) {
   6554 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   6555 		    device_xname(sc->sc_dev), phy, reg);
   6556 		rv = 0;
   6557 	} else if (mdic & MDIC_E) {
   6558 #if 0 /* This is normal if no PHY is present. */
   6559 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   6560 		    device_xname(sc->sc_dev), phy, reg);
   6561 #endif
   6562 		rv = 0;
   6563 	} else {
   6564 		rv = MDIC_DATA(mdic);
   6565 		if (rv == 0xffff)
   6566 			rv = 0;
   6567 	}
   6568 
   6569 	return rv;
   6570 }
   6571 
   6572 /*
   6573  * wm_gmii_i82544_writereg:	[mii interface function]
   6574  *
   6575  *	Write a PHY register on the GMII.
   6576  */
   6577 static void
   6578 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   6579 {
   6580 	struct wm_softc *sc = device_private(self);
   6581 	uint32_t mdic = 0;
   6582 	int i;
   6583 
   6584 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   6585 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   6586 
   6587 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   6588 		mdic = CSR_READ(sc, WMREG_MDIC);
   6589 		if (mdic & MDIC_READY)
   6590 			break;
   6591 		delay(50);
   6592 	}
   6593 
   6594 	if ((mdic & MDIC_READY) == 0)
   6595 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   6596 		    device_xname(sc->sc_dev), phy, reg);
   6597 	else if (mdic & MDIC_E)
   6598 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   6599 		    device_xname(sc->sc_dev), phy, reg);
   6600 }
   6601 
   6602 /*
   6603  * wm_gmii_i80003_readreg:	[mii interface function]
   6604  *
   6605  *	Read a PHY register on the kumeran
   6606  * This could be handled by the PHY layer if we didn't have to lock the
   6607  * ressource ...
   6608  */
   6609 static int
   6610 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   6611 {
   6612 	struct wm_softc *sc = device_private(self);
   6613 	int sem;
   6614 	int rv;
   6615 
   6616 	if (phy != 1) /* only one PHY on kumeran bus */
   6617 		return 0;
   6618 
   6619 	sem = swfwphysem[sc->sc_funcid];
   6620 	if (wm_get_swfw_semaphore(sc, sem)) {
   6621 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6622 		    __func__);
   6623 		return 0;
   6624 	}
   6625 
   6626 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   6627 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   6628 		    reg >> GG82563_PAGE_SHIFT);
   6629 	} else {
   6630 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   6631 		    reg >> GG82563_PAGE_SHIFT);
   6632 	}
   6633 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   6634 	delay(200);
   6635 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   6636 	delay(200);
   6637 
   6638 	wm_put_swfw_semaphore(sc, sem);
   6639 	return rv;
   6640 }
   6641 
   6642 /*
   6643  * wm_gmii_i80003_writereg:	[mii interface function]
   6644  *
   6645  *	Write a PHY register on the kumeran.
   6646  * This could be handled by the PHY layer if we didn't have to lock the
   6647  * ressource ...
   6648  */
   6649 static void
   6650 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   6651 {
   6652 	struct wm_softc *sc = device_private(self);
   6653 	int sem;
   6654 
   6655 	if (phy != 1) /* only one PHY on kumeran bus */
   6656 		return;
   6657 
   6658 	sem = swfwphysem[sc->sc_funcid];
   6659 	if (wm_get_swfw_semaphore(sc, sem)) {
   6660 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6661 		    __func__);
   6662 		return;
   6663 	}
   6664 
   6665 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   6666 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   6667 		    reg >> GG82563_PAGE_SHIFT);
   6668 	} else {
   6669 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   6670 		    reg >> GG82563_PAGE_SHIFT);
   6671 	}
   6672 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   6673 	delay(200);
   6674 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   6675 	delay(200);
   6676 
   6677 	wm_put_swfw_semaphore(sc, sem);
   6678 }
   6679 
   6680 /*
   6681  * wm_gmii_bm_readreg:	[mii interface function]
   6682  *
   6683  *	Read a PHY register on the kumeran
   6684  * This could be handled by the PHY layer if we didn't have to lock the
   6685  * ressource ...
   6686  */
   6687 static int
   6688 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   6689 {
   6690 	struct wm_softc *sc = device_private(self);
   6691 	int sem;
   6692 	int rv;
   6693 
   6694 	sem = swfwphysem[sc->sc_funcid];
   6695 	if (wm_get_swfw_semaphore(sc, sem)) {
   6696 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6697 		    __func__);
   6698 		return 0;
   6699 	}
   6700 
   6701 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   6702 		if (phy == 1)
   6703 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   6704 			    reg);
   6705 		else
   6706 			wm_gmii_i82544_writereg(self, phy,
   6707 			    GG82563_PHY_PAGE_SELECT,
   6708 			    reg >> GG82563_PAGE_SHIFT);
   6709 	}
   6710 
   6711 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   6712 	wm_put_swfw_semaphore(sc, sem);
   6713 	return rv;
   6714 }
   6715 
   6716 /*
   6717  * wm_gmii_bm_writereg:	[mii interface function]
   6718  *
   6719  *	Write a PHY register on the kumeran.
   6720  * This could be handled by the PHY layer if we didn't have to lock the
   6721  * ressource ...
   6722  */
   6723 static void
   6724 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   6725 {
   6726 	struct wm_softc *sc = device_private(self);
   6727 	int sem;
   6728 
   6729 	sem = swfwphysem[sc->sc_funcid];
   6730 	if (wm_get_swfw_semaphore(sc, sem)) {
   6731 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6732 		    __func__);
   6733 		return;
   6734 	}
   6735 
   6736 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   6737 		if (phy == 1)
   6738 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   6739 			    reg);
   6740 		else
   6741 			wm_gmii_i82544_writereg(self, phy,
   6742 			    GG82563_PHY_PAGE_SELECT,
   6743 			    reg >> GG82563_PAGE_SHIFT);
   6744 	}
   6745 
   6746 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   6747 	wm_put_swfw_semaphore(sc, sem);
   6748 }
   6749 
   6750 static void
   6751 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   6752 {
   6753 	struct wm_softc *sc = device_private(self);
   6754 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   6755 	uint16_t wuce;
   6756 
   6757 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   6758 	if (sc->sc_type == WM_T_PCH) {
   6759 		/* XXX e1000 driver do nothing... why? */
   6760 	}
   6761 
   6762 	/* Set page 769 */
   6763 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6764 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   6765 
   6766 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   6767 
   6768 	wuce &= ~BM_WUC_HOST_WU_BIT;
   6769 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   6770 	    wuce | BM_WUC_ENABLE_BIT);
   6771 
   6772 	/* Select page 800 */
   6773 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6774 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   6775 
   6776 	/* Write page 800 */
   6777 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   6778 
   6779 	if (rd)
   6780 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   6781 	else
   6782 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   6783 
   6784 	/* Set page 769 */
   6785 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6786 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   6787 
   6788 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   6789 }
   6790 
   6791 /*
   6792  * wm_gmii_hv_readreg:	[mii interface function]
   6793  *
   6794  *	Read a PHY register on the kumeran
   6795  * This could be handled by the PHY layer if we didn't have to lock the
   6796  * ressource ...
   6797  */
   6798 static int
   6799 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   6800 {
   6801 	struct wm_softc *sc = device_private(self);
   6802 	uint16_t page = BM_PHY_REG_PAGE(reg);
   6803 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   6804 	uint16_t val;
   6805 	int rv;
   6806 
   6807 	if (wm_get_swfwhw_semaphore(sc)) {
   6808 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6809 		    __func__);
   6810 		return 0;
   6811 	}
   6812 
   6813 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   6814 	if (sc->sc_phytype == WMPHY_82577) {
   6815 		/* XXX must write */
   6816 	}
   6817 
   6818 	/* Page 800 works differently than the rest so it has its own func */
   6819 	if (page == BM_WUC_PAGE) {
   6820 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   6821 		return val;
   6822 	}
   6823 
   6824 	/*
   6825 	 * Lower than page 768 works differently than the rest so it has its
   6826 	 * own func
   6827 	 */
   6828 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   6829 		printf("gmii_hv_readreg!!!\n");
   6830 		return 0;
   6831 	}
   6832 
   6833 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   6834 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6835 		    page << BME1000_PAGE_SHIFT);
   6836 	}
   6837 
   6838 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   6839 	wm_put_swfwhw_semaphore(sc);
   6840 	return rv;
   6841 }
   6842 
   6843 /*
   6844  * wm_gmii_hv_writereg:	[mii interface function]
   6845  *
   6846  *	Write a PHY register on the kumeran.
   6847  * This could be handled by the PHY layer if we didn't have to lock the
   6848  * ressource ...
   6849  */
   6850 static void
   6851 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   6852 {
   6853 	struct wm_softc *sc = device_private(self);
   6854 	uint16_t page = BM_PHY_REG_PAGE(reg);
   6855 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   6856 
   6857 	if (wm_get_swfwhw_semaphore(sc)) {
   6858 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6859 		    __func__);
   6860 		return;
   6861 	}
   6862 
   6863 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   6864 
   6865 	/* Page 800 works differently than the rest so it has its own func */
   6866 	if (page == BM_WUC_PAGE) {
   6867 		uint16_t tmp;
   6868 
   6869 		tmp = val;
   6870 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   6871 		return;
   6872 	}
   6873 
   6874 	/*
   6875 	 * Lower than page 768 works differently than the rest so it has its
   6876 	 * own func
   6877 	 */
   6878 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   6879 		printf("gmii_hv_writereg!!!\n");
   6880 		return;
   6881 	}
   6882 
   6883 	/*
   6884 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   6885 	 * Power Down (whenever bit 11 of the PHY control register is set)
   6886 	 */
   6887 
   6888 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   6889 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6890 		    page << BME1000_PAGE_SHIFT);
   6891 	}
   6892 
   6893 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   6894 	wm_put_swfwhw_semaphore(sc);
   6895 }
   6896 
   6897 /*
   6898  * wm_gmii_82580_readreg:	[mii interface function]
   6899  *
   6900  *	Read a PHY register on the 82580 and I350.
   6901  * This could be handled by the PHY layer if we didn't have to lock the
   6902  * ressource ...
   6903  */
   6904 static int
   6905 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   6906 {
   6907 	struct wm_softc *sc = device_private(self);
   6908 	int sem;
   6909 	int rv;
   6910 
   6911 	sem = swfwphysem[sc->sc_funcid];
   6912 	if (wm_get_swfw_semaphore(sc, sem)) {
   6913 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6914 		    __func__);
   6915 		return 0;
   6916 	}
   6917 
   6918 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   6919 
   6920 	wm_put_swfw_semaphore(sc, sem);
   6921 	return rv;
   6922 }
   6923 
   6924 /*
   6925  * wm_gmii_82580_writereg:	[mii interface function]
   6926  *
   6927  *	Write a PHY register on the 82580 and I350.
   6928  * This could be handled by the PHY layer if we didn't have to lock the
   6929  * ressource ...
   6930  */
   6931 static void
   6932 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   6933 {
   6934 	struct wm_softc *sc = device_private(self);
   6935 	int sem;
   6936 
   6937 	sem = swfwphysem[sc->sc_funcid];
   6938 	if (wm_get_swfw_semaphore(sc, sem)) {
   6939 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6940 		    __func__);
   6941 		return;
   6942 	}
   6943 
   6944 	wm_gmii_i82544_writereg(self, phy, reg, val);
   6945 
   6946 	wm_put_swfw_semaphore(sc, sem);
   6947 }
   6948 
   6949 /*
   6950  * wm_gmii_statchg:	[mii interface function]
   6951  *
   6952  *	Callback from MII layer when media changes.
   6953  */
   6954 static void
   6955 wm_gmii_statchg(struct ifnet *ifp)
   6956 {
   6957 	struct wm_softc *sc = ifp->if_softc;
   6958 	struct mii_data *mii = &sc->sc_mii;
   6959 
   6960 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   6961 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   6962 	sc->sc_fcrtl &= ~FCRTL_XONE;
   6963 
   6964 	/*
   6965 	 * Get flow control negotiation result.
   6966 	 */
   6967 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   6968 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   6969 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   6970 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   6971 	}
   6972 
   6973 	if (sc->sc_flowflags & IFM_FLOW) {
   6974 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   6975 			sc->sc_ctrl |= CTRL_TFCE;
   6976 			sc->sc_fcrtl |= FCRTL_XONE;
   6977 		}
   6978 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   6979 			sc->sc_ctrl |= CTRL_RFCE;
   6980 	}
   6981 
   6982 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6983 		DPRINTF(WM_DEBUG_LINK,
   6984 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   6985 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6986 	} else {
   6987 		DPRINTF(WM_DEBUG_LINK,
   6988 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   6989 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   6990 	}
   6991 
   6992 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6993 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6994 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   6995 						 : WMREG_FCRTL, sc->sc_fcrtl);
   6996 	if (sc->sc_type == WM_T_80003) {
   6997 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   6998 		case IFM_1000_T:
   6999 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   7000 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   7001 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   7002 			break;
   7003 		default:
   7004 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   7005 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   7006 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   7007 			break;
   7008 		}
   7009 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7010 	}
   7011 }
   7012 
   7013 /*
   7014  * wm_kmrn_readreg:
   7015  *
   7016  *	Read a kumeran register
   7017  */
   7018 static int
   7019 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   7020 {
   7021 	int rv;
   7022 
   7023 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
   7024 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   7025 			aprint_error_dev(sc->sc_dev,
   7026 			    "%s: failed to get semaphore\n", __func__);
   7027 			return 0;
   7028 		}
   7029 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
   7030 		if (wm_get_swfwhw_semaphore(sc)) {
   7031 			aprint_error_dev(sc->sc_dev,
   7032 			    "%s: failed to get semaphore\n", __func__);
   7033 			return 0;
   7034 		}
   7035 	}
   7036 
   7037 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   7038 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   7039 	    KUMCTRLSTA_REN);
   7040 	CSR_WRITE_FLUSH(sc);
   7041 	delay(2);
   7042 
   7043 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   7044 
   7045 	if (sc->sc_flags == WM_F_LOCK_SWFW)
   7046 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7047 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
   7048 		wm_put_swfwhw_semaphore(sc);
   7049 
   7050 	return rv;
   7051 }
   7052 
   7053 /*
   7054  * wm_kmrn_writereg:
   7055  *
   7056  *	Write a kumeran register
   7057  */
   7058 static void
   7059 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   7060 {
   7061 
   7062 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
   7063 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   7064 			aprint_error_dev(sc->sc_dev,
   7065 			    "%s: failed to get semaphore\n", __func__);
   7066 			return;
   7067 		}
   7068 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
   7069 		if (wm_get_swfwhw_semaphore(sc)) {
   7070 			aprint_error_dev(sc->sc_dev,
   7071 			    "%s: failed to get semaphore\n", __func__);
   7072 			return;
   7073 		}
   7074 	}
   7075 
   7076 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   7077 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   7078 	    (val & KUMCTRLSTA_MASK));
   7079 
   7080 	if (sc->sc_flags == WM_F_LOCK_SWFW)
   7081 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7082 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
   7083 		wm_put_swfwhw_semaphore(sc);
   7084 }
   7085 
   7086 /* SGMII related */
   7087 
   7088 /*
   7089  * wm_sgmii_uses_mdio
   7090  *
   7091  * Check whether the transaction is to the internal PHY or the external
   7092  * MDIO interface. Return true if it's MDIO.
   7093  */
   7094 static bool
   7095 wm_sgmii_uses_mdio(struct wm_softc *sc)
   7096 {
   7097 	uint32_t reg;
   7098 	bool ismdio = false;
   7099 
   7100 	switch (sc->sc_type) {
   7101 	case WM_T_82575:
   7102 	case WM_T_82576:
   7103 		reg = CSR_READ(sc, WMREG_MDIC);
   7104 		ismdio = ((reg & MDIC_DEST) != 0);
   7105 		break;
   7106 	case WM_T_82580:
   7107 	case WM_T_82580ER:
   7108 	case WM_T_I350:
   7109 	case WM_T_I354:
   7110 	case WM_T_I210:
   7111 	case WM_T_I211:
   7112 		reg = CSR_READ(sc, WMREG_MDICNFG);
   7113 		ismdio = ((reg & MDICNFG_DEST) != 0);
   7114 		break;
   7115 	default:
   7116 		break;
   7117 	}
   7118 
   7119 	return ismdio;
   7120 }
   7121 
   7122 /*
   7123  * wm_sgmii_readreg:	[mii interface function]
   7124  *
   7125  *	Read a PHY register on the SGMII
   7126  * This could be handled by the PHY layer if we didn't have to lock the
   7127  * ressource ...
   7128  */
   7129 static int
   7130 wm_sgmii_readreg(device_t self, int phy, int reg)
   7131 {
   7132 	struct wm_softc *sc = device_private(self);
   7133 	uint32_t i2ccmd;
   7134 	int i, rv;
   7135 
   7136 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7137 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7138 		    __func__);
   7139 		return 0;
   7140 	}
   7141 
   7142 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7143 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7144 	    | I2CCMD_OPCODE_READ;
   7145 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7146 
   7147 	/* Poll the ready bit */
   7148 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7149 		delay(50);
   7150 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7151 		if (i2ccmd & I2CCMD_READY)
   7152 			break;
   7153 	}
   7154 	if ((i2ccmd & I2CCMD_READY) == 0)
   7155 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   7156 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7157 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7158 
   7159 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   7160 
   7161 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7162 	return rv;
   7163 }
   7164 
   7165 /*
   7166  * wm_sgmii_writereg:	[mii interface function]
   7167  *
   7168  *	Write a PHY register on the SGMII.
   7169  * This could be handled by the PHY layer if we didn't have to lock the
   7170  * ressource ...
   7171  */
   7172 static void
   7173 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   7174 {
   7175 	struct wm_softc *sc = device_private(self);
   7176 	uint32_t i2ccmd;
   7177 	int i;
   7178 
   7179 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7180 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7181 		    __func__);
   7182 		return;
   7183 	}
   7184 
   7185 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7186 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7187 	    | I2CCMD_OPCODE_WRITE;
   7188 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7189 
   7190 	/* Poll the ready bit */
   7191 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7192 		delay(50);
   7193 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7194 		if (i2ccmd & I2CCMD_READY)
   7195 			break;
   7196 	}
   7197 	if ((i2ccmd & I2CCMD_READY) == 0)
   7198 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   7199 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7200 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7201 
   7202 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   7203 }
   7204 
   7205 /* TBI related */
   7206 
   7207 /* XXX Currently TBI only */
   7208 static int
   7209 wm_check_for_link(struct wm_softc *sc)
   7210 {
   7211 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7212 	uint32_t rxcw;
   7213 	uint32_t ctrl;
   7214 	uint32_t status;
   7215 	uint32_t sig;
   7216 
   7217 	if (sc->sc_mediatype & WMP_F_SERDES) {
   7218 		sc->sc_tbi_linkup = 1;
   7219 		return 0;
   7220 	}
   7221 
   7222 	rxcw = CSR_READ(sc, WMREG_RXCW);
   7223 	ctrl = CSR_READ(sc, WMREG_CTRL);
   7224 	status = CSR_READ(sc, WMREG_STATUS);
   7225 
   7226 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   7227 
   7228 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   7229 		device_xname(sc->sc_dev), __func__,
   7230 		((ctrl & CTRL_SWDPIN(1)) == sig),
   7231 		((status & STATUS_LU) != 0),
   7232 		((rxcw & RXCW_C) != 0)
   7233 		    ));
   7234 
   7235 	/*
   7236 	 * SWDPIN   LU RXCW
   7237 	 *      0    0    0
   7238 	 *      0    0    1	(should not happen)
   7239 	 *      0    1    0	(should not happen)
   7240 	 *      0    1    1	(should not happen)
   7241 	 *      1    0    0	Disable autonego and force linkup
   7242 	 *      1    0    1	got /C/ but not linkup yet
   7243 	 *      1    1    0	(linkup)
   7244 	 *      1    1    1	If IFM_AUTO, back to autonego
   7245 	 *
   7246 	 */
   7247 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   7248 	    && ((status & STATUS_LU) == 0)
   7249 	    && ((rxcw & RXCW_C) == 0)) {
   7250 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   7251 			__func__));
   7252 		sc->sc_tbi_linkup = 0;
   7253 		/* Disable auto-negotiation in the TXCW register */
   7254 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   7255 
   7256 		/*
   7257 		 * Force link-up and also force full-duplex.
   7258 		 *
   7259 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   7260 		 * so we should update sc->sc_ctrl
   7261 		 */
   7262 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   7263 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7264 	} else if (((status & STATUS_LU) != 0)
   7265 	    && ((rxcw & RXCW_C) != 0)
   7266 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   7267 		sc->sc_tbi_linkup = 1;
   7268 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   7269 			__func__));
   7270 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7271 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   7272 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   7273 	    && ((rxcw & RXCW_C) != 0)) {
   7274 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   7275 	} else {
   7276 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   7277 			status));
   7278 	}
   7279 
   7280 	return 0;
   7281 }
   7282 
   7283 /*
   7284  * wm_tbi_mediainit:
   7285  *
   7286  *	Initialize media for use on 1000BASE-X devices.
   7287  */
   7288 static void
   7289 wm_tbi_mediainit(struct wm_softc *sc)
   7290 {
   7291 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7292 	const char *sep = "";
   7293 
   7294 	if (sc->sc_type < WM_T_82543)
   7295 		sc->sc_tipg = TIPG_WM_DFLT;
   7296 	else
   7297 		sc->sc_tipg = TIPG_LG_DFLT;
   7298 
   7299 	sc->sc_tbi_anegticks = 5;
   7300 
   7301 	/* Initialize our media structures */
   7302 	sc->sc_mii.mii_ifp = ifp;
   7303 
   7304 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   7305 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
   7306 	    wm_tbi_mediastatus);
   7307 
   7308 	/*
   7309 	 * SWD Pins:
   7310 	 *
   7311 	 *	0 = Link LED (output)
   7312 	 *	1 = Loss Of Signal (input)
   7313 	 */
   7314 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   7315 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   7316 	if (sc->sc_mediatype & WMP_F_SERDES)
   7317 		sc->sc_ctrl &= ~CTRL_LRST;
   7318 
   7319 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7320 
   7321 #define	ADD(ss, mm, dd)							\
   7322 do {									\
   7323 	aprint_normal("%s%s", sep, ss);					\
   7324 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   7325 	sep = ", ";							\
   7326 } while (/*CONSTCOND*/0)
   7327 
   7328 	aprint_normal_dev(sc->sc_dev, "");
   7329 
   7330 	/* Only 82545 is LX */
   7331 	if (sc->sc_type == WM_T_82545) {
   7332 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   7333 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
   7334 	} else {
   7335 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   7336 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   7337 	}
   7338 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   7339 	aprint_normal("\n");
   7340 
   7341 #undef ADD
   7342 
   7343 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   7344 }
   7345 
   7346 /*
   7347  * wm_tbi_mediastatus:	[ifmedia interface function]
   7348  *
   7349  *	Get the current interface media status on a 1000BASE-X device.
   7350  */
   7351 static void
   7352 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   7353 {
   7354 	struct wm_softc *sc = ifp->if_softc;
   7355 	uint32_t ctrl, status;
   7356 
   7357 	ifmr->ifm_status = IFM_AVALID;
   7358 	ifmr->ifm_active = IFM_ETHER;
   7359 
   7360 	status = CSR_READ(sc, WMREG_STATUS);
   7361 	if ((status & STATUS_LU) == 0) {
   7362 		ifmr->ifm_active |= IFM_NONE;
   7363 		return;
   7364 	}
   7365 
   7366 	ifmr->ifm_status |= IFM_ACTIVE;
   7367 	/* Only 82545 is LX */
   7368 	if (sc->sc_type == WM_T_82545)
   7369 		ifmr->ifm_active |= IFM_1000_LX;
   7370 	else
   7371 		ifmr->ifm_active |= IFM_1000_SX;
   7372 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   7373 		ifmr->ifm_active |= IFM_FDX;
   7374 	else
   7375 		ifmr->ifm_active |= IFM_HDX;
   7376 	ctrl = CSR_READ(sc, WMREG_CTRL);
   7377 	if (ctrl & CTRL_RFCE)
   7378 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   7379 	if (ctrl & CTRL_TFCE)
   7380 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   7381 }
   7382 
   7383 /*
   7384  * wm_tbi_mediachange:	[ifmedia interface function]
   7385  *
   7386  *	Set hardware to newly-selected media on a 1000BASE-X device.
   7387  */
   7388 static int
   7389 wm_tbi_mediachange(struct ifnet *ifp)
   7390 {
   7391 	struct wm_softc *sc = ifp->if_softc;
   7392 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7393 	uint32_t status;
   7394 	int i;
   7395 
   7396 	if (sc->sc_mediatype & WMP_F_SERDES)
   7397 		return 0;
   7398 
   7399 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   7400 	    || (sc->sc_type >= WM_T_82575))
   7401 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   7402 
   7403 	/* XXX power_up_serdes_link_82575() */
   7404 
   7405 	sc->sc_ctrl &= ~CTRL_LRST;
   7406 	sc->sc_txcw = TXCW_ANE;
   7407 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   7408 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   7409 	else if (ife->ifm_media & IFM_FDX)
   7410 		sc->sc_txcw |= TXCW_FD;
   7411 	else
   7412 		sc->sc_txcw |= TXCW_HD;
   7413 
   7414 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   7415 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   7416 
   7417 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   7418 		    device_xname(sc->sc_dev), sc->sc_txcw));
   7419 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7420 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7421 	CSR_WRITE_FLUSH(sc);
   7422 	delay(1000);
   7423 
   7424 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   7425 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   7426 
   7427 	/*
   7428 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   7429 	 * optics detect a signal, 0 if they don't.
   7430 	 */
   7431 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   7432 		/* Have signal; wait for the link to come up. */
   7433 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   7434 			delay(10000);
   7435 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   7436 				break;
   7437 		}
   7438 
   7439 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   7440 			    device_xname(sc->sc_dev),i));
   7441 
   7442 		status = CSR_READ(sc, WMREG_STATUS);
   7443 		DPRINTF(WM_DEBUG_LINK,
   7444 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   7445 			device_xname(sc->sc_dev),status, STATUS_LU));
   7446 		if (status & STATUS_LU) {
   7447 			/* Link is up. */
   7448 			DPRINTF(WM_DEBUG_LINK,
   7449 			    ("%s: LINK: set media -> link up %s\n",
   7450 			    device_xname(sc->sc_dev),
   7451 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7452 
   7453 			/*
   7454 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7455 			 * so we should update sc->sc_ctrl
   7456 			 */
   7457 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7458 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7459 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7460 			if (status & STATUS_FD)
   7461 				sc->sc_tctl |=
   7462 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7463 			else
   7464 				sc->sc_tctl |=
   7465 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7466 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   7467 				sc->sc_fcrtl |= FCRTL_XONE;
   7468 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7469 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7470 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7471 				      sc->sc_fcrtl);
   7472 			sc->sc_tbi_linkup = 1;
   7473 		} else {
   7474 			if (i == WM_LINKUP_TIMEOUT)
   7475 				wm_check_for_link(sc);
   7476 			/* Link is down. */
   7477 			DPRINTF(WM_DEBUG_LINK,
   7478 			    ("%s: LINK: set media -> link down\n",
   7479 			    device_xname(sc->sc_dev)));
   7480 			sc->sc_tbi_linkup = 0;
   7481 		}
   7482 	} else {
   7483 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   7484 		    device_xname(sc->sc_dev)));
   7485 		sc->sc_tbi_linkup = 0;
   7486 	}
   7487 
   7488 	wm_tbi_set_linkled(sc);
   7489 
   7490 	return 0;
   7491 }
   7492 
   7493 /*
   7494  * wm_tbi_set_linkled:
   7495  *
   7496  *	Update the link LED on 1000BASE-X devices.
   7497  */
   7498 static void
   7499 wm_tbi_set_linkled(struct wm_softc *sc)
   7500 {
   7501 
   7502 	if (sc->sc_tbi_linkup)
   7503 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7504 	else
   7505 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7506 
   7507 	/* 82540 or newer devices are active low */
   7508 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7509 
   7510 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7511 }
   7512 
   7513 /*
   7514  * wm_tbi_check_link:
   7515  *
   7516  *	Check the link on 1000BASE-X devices.
   7517  */
   7518 static void
   7519 wm_tbi_check_link(struct wm_softc *sc)
   7520 {
   7521 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7522 	uint32_t status;
   7523 
   7524 	KASSERT(WM_TX_LOCKED(sc));
   7525 
   7526 	if (sc->sc_mediatype & WMP_F_SERDES) {
   7527 		sc->sc_tbi_linkup = 1;
   7528 		return;
   7529 	}
   7530 
   7531 	status = CSR_READ(sc, WMREG_STATUS);
   7532 
   7533 	/* XXX is this needed? */
   7534 	(void)CSR_READ(sc, WMREG_RXCW);
   7535 	(void)CSR_READ(sc, WMREG_CTRL);
   7536 
   7537 	/* set link status */
   7538 	if ((status & STATUS_LU) == 0) {
   7539 		DPRINTF(WM_DEBUG_LINK,
   7540 		    ("%s: LINK: checklink -> down\n",
   7541 			device_xname(sc->sc_dev)));
   7542 		sc->sc_tbi_linkup = 0;
   7543 	} else if (sc->sc_tbi_linkup == 0) {
   7544 		DPRINTF(WM_DEBUG_LINK,
   7545 		    ("%s: LINK: checklink -> up %s\n",
   7546 			device_xname(sc->sc_dev),
   7547 			(status & STATUS_FD) ? "FDX" : "HDX"));
   7548 		sc->sc_tbi_linkup = 1;
   7549 	}
   7550 
   7551 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
   7552 	    && ((status & STATUS_LU) == 0)) {
   7553 		sc->sc_tbi_linkup = 0;
   7554 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7555 			/* If the timer expired, retry autonegotiation */
   7556 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
   7557 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   7558 				sc->sc_tbi_ticks = 0;
   7559 				/*
   7560 				 * Reset the link, and let autonegotiation do
   7561 				 * its thing
   7562 				 */
   7563 				sc->sc_ctrl |= CTRL_LRST;
   7564 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7565 				CSR_WRITE_FLUSH(sc);
   7566 				delay(1000);
   7567 				sc->sc_ctrl &= ~CTRL_LRST;
   7568 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7569 				CSR_WRITE_FLUSH(sc);
   7570 				delay(1000);
   7571 				CSR_WRITE(sc, WMREG_TXCW,
   7572 				    sc->sc_txcw & ~TXCW_ANE);
   7573 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7574 			}
   7575 		}
   7576 	}
   7577 
   7578 	wm_tbi_set_linkled(sc);
   7579 }
   7580 
   7581 /* SFP related */
   7582 
   7583 static int
   7584 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   7585 {
   7586 	uint32_t i2ccmd;
   7587 	int i;
   7588 
   7589 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   7590 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7591 
   7592 	/* Poll the ready bit */
   7593 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7594 		delay(50);
   7595 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7596 		if (i2ccmd & I2CCMD_READY)
   7597 			break;
   7598 	}
   7599 	if ((i2ccmd & I2CCMD_READY) == 0)
   7600 		return -1;
   7601 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7602 		return -1;
   7603 
   7604 	*data = i2ccmd & 0x00ff;
   7605 
   7606 	return 0;
   7607 }
   7608 
   7609 static uint32_t
   7610 wm_sfp_get_media_type(struct wm_softc *sc)
   7611 {
   7612 	uint32_t ctrl_ext;
   7613 	uint8_t val = 0;
   7614 	int timeout = 3;
   7615 	uint32_t mediatype = WMP_F_UNKNOWN;
   7616 	int rv = -1;
   7617 
   7618 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   7619 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   7620 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   7621 	CSR_WRITE_FLUSH(sc);
   7622 
   7623 	/* Read SFP module data */
   7624 	while (timeout) {
   7625 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   7626 		if (rv == 0)
   7627 			break;
   7628 		delay(100*1000); /* XXX too big */
   7629 		timeout--;
   7630 	}
   7631 	if (rv != 0)
   7632 		goto out;
   7633 	switch (val) {
   7634 	case SFF_SFP_ID_SFF:
   7635 		aprint_normal_dev(sc->sc_dev,
   7636 		    "Module/Connector soldered to board\n");
   7637 		break;
   7638 	case SFF_SFP_ID_SFP:
   7639 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   7640 		break;
   7641 	case SFF_SFP_ID_UNKNOWN:
   7642 		goto out;
   7643 	default:
   7644 		break;
   7645 	}
   7646 
   7647 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   7648 	if (rv != 0) {
   7649 		goto out;
   7650 	}
   7651 
   7652 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   7653 		mediatype = WMP_F_SERDES;
   7654 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   7655 		sc->sc_flags |= WM_F_SGMII;
   7656 		mediatype = WMP_F_COPPER;
   7657 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   7658 		sc->sc_flags |= WM_F_SGMII;
   7659 		mediatype = WMP_F_SERDES;
   7660 	}
   7661 
   7662 out:
   7663 	/* Restore I2C interface setting */
   7664 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   7665 
   7666 	return mediatype;
   7667 }
   7668 /*
   7669  * NVM related.
   7670  * Microwire, SPI (w/wo EERD) and Flash.
   7671  */
   7672 
   7673 /* Both spi and uwire */
   7674 
   7675 /*
   7676  * wm_eeprom_sendbits:
   7677  *
   7678  *	Send a series of bits to the EEPROM.
   7679  */
   7680 static void
   7681 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   7682 {
   7683 	uint32_t reg;
   7684 	int x;
   7685 
   7686 	reg = CSR_READ(sc, WMREG_EECD);
   7687 
   7688 	for (x = nbits; x > 0; x--) {
   7689 		if (bits & (1U << (x - 1)))
   7690 			reg |= EECD_DI;
   7691 		else
   7692 			reg &= ~EECD_DI;
   7693 		CSR_WRITE(sc, WMREG_EECD, reg);
   7694 		CSR_WRITE_FLUSH(sc);
   7695 		delay(2);
   7696 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   7697 		CSR_WRITE_FLUSH(sc);
   7698 		delay(2);
   7699 		CSR_WRITE(sc, WMREG_EECD, reg);
   7700 		CSR_WRITE_FLUSH(sc);
   7701 		delay(2);
   7702 	}
   7703 }
   7704 
   7705 /*
   7706  * wm_eeprom_recvbits:
   7707  *
   7708  *	Receive a series of bits from the EEPROM.
   7709  */
   7710 static void
   7711 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   7712 {
   7713 	uint32_t reg, val;
   7714 	int x;
   7715 
   7716 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   7717 
   7718 	val = 0;
   7719 	for (x = nbits; x > 0; x--) {
   7720 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   7721 		CSR_WRITE_FLUSH(sc);
   7722 		delay(2);
   7723 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   7724 			val |= (1U << (x - 1));
   7725 		CSR_WRITE(sc, WMREG_EECD, reg);
   7726 		CSR_WRITE_FLUSH(sc);
   7727 		delay(2);
   7728 	}
   7729 	*valp = val;
   7730 }
   7731 
   7732 /* Microwire */
   7733 
   7734 /*
   7735  * wm_nvm_read_uwire:
   7736  *
   7737  *	Read a word from the EEPROM using the MicroWire protocol.
   7738  */
   7739 static int
   7740 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   7741 {
   7742 	uint32_t reg, val;
   7743 	int i;
   7744 
   7745 	for (i = 0; i < wordcnt; i++) {
   7746 		/* Clear SK and DI. */
   7747 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   7748 		CSR_WRITE(sc, WMREG_EECD, reg);
   7749 
   7750 		/*
   7751 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   7752 		 * and Xen.
   7753 		 *
   7754 		 * We use this workaround only for 82540 because qemu's
   7755 		 * e1000 act as 82540.
   7756 		 */
   7757 		if (sc->sc_type == WM_T_82540) {
   7758 			reg |= EECD_SK;
   7759 			CSR_WRITE(sc, WMREG_EECD, reg);
   7760 			reg &= ~EECD_SK;
   7761 			CSR_WRITE(sc, WMREG_EECD, reg);
   7762 			CSR_WRITE_FLUSH(sc);
   7763 			delay(2);
   7764 		}
   7765 		/* XXX: end of workaround */
   7766 
   7767 		/* Set CHIP SELECT. */
   7768 		reg |= EECD_CS;
   7769 		CSR_WRITE(sc, WMREG_EECD, reg);
   7770 		CSR_WRITE_FLUSH(sc);
   7771 		delay(2);
   7772 
   7773 		/* Shift in the READ command. */
   7774 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   7775 
   7776 		/* Shift in address. */
   7777 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   7778 
   7779 		/* Shift out the data. */
   7780 		wm_eeprom_recvbits(sc, &val, 16);
   7781 		data[i] = val & 0xffff;
   7782 
   7783 		/* Clear CHIP SELECT. */
   7784 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   7785 		CSR_WRITE(sc, WMREG_EECD, reg);
   7786 		CSR_WRITE_FLUSH(sc);
   7787 		delay(2);
   7788 	}
   7789 
   7790 	return 0;
   7791 }
   7792 
   7793 /* SPI */
   7794 
   7795 /*
   7796  * Set SPI and FLASH related information from the EECD register.
   7797  * For 82541 and 82547, the word size is taken from EEPROM.
   7798  */
   7799 static int
   7800 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   7801 {
   7802 	int size;
   7803 	uint32_t reg;
   7804 	uint16_t data;
   7805 
   7806 	reg = CSR_READ(sc, WMREG_EECD);
   7807 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   7808 
   7809 	/* Read the size of NVM from EECD by default */
   7810 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   7811 	switch (sc->sc_type) {
   7812 	case WM_T_82541:
   7813 	case WM_T_82541_2:
   7814 	case WM_T_82547:
   7815 	case WM_T_82547_2:
   7816 		/* Set dummy value to access EEPROM */
   7817 		sc->sc_nvm_wordsize = 64;
   7818 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   7819 		reg = data;
   7820 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   7821 		if (size == 0)
   7822 			size = 6; /* 64 word size */
   7823 		else
   7824 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   7825 		break;
   7826 	case WM_T_80003:
   7827 	case WM_T_82571:
   7828 	case WM_T_82572:
   7829 	case WM_T_82573: /* SPI case */
   7830 	case WM_T_82574: /* SPI case */
   7831 	case WM_T_82583: /* SPI case */
   7832 		size += NVM_WORD_SIZE_BASE_SHIFT;
   7833 		if (size > 14)
   7834 			size = 14;
   7835 		break;
   7836 	case WM_T_82575:
   7837 	case WM_T_82576:
   7838 	case WM_T_82580:
   7839 	case WM_T_82580ER:
   7840 	case WM_T_I350:
   7841 	case WM_T_I354:
   7842 	case WM_T_I210:
   7843 	case WM_T_I211:
   7844 		size += NVM_WORD_SIZE_BASE_SHIFT;
   7845 		if (size > 15)
   7846 			size = 15;
   7847 		break;
   7848 	default:
   7849 		aprint_error_dev(sc->sc_dev,
   7850 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   7851 		return -1;
   7852 		break;
   7853 	}
   7854 
   7855 	sc->sc_nvm_wordsize = 1 << size;
   7856 
   7857 	return 0;
   7858 }
   7859 
   7860 /*
   7861  * wm_nvm_ready_spi:
   7862  *
   7863  *	Wait for a SPI EEPROM to be ready for commands.
   7864  */
   7865 static int
   7866 wm_nvm_ready_spi(struct wm_softc *sc)
   7867 {
   7868 	uint32_t val;
   7869 	int usec;
   7870 
   7871 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   7872 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   7873 		wm_eeprom_recvbits(sc, &val, 8);
   7874 		if ((val & SPI_SR_RDY) == 0)
   7875 			break;
   7876 	}
   7877 	if (usec >= SPI_MAX_RETRIES) {
   7878 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   7879 		return 1;
   7880 	}
   7881 	return 0;
   7882 }
   7883 
   7884 /*
   7885  * wm_nvm_read_spi:
   7886  *
   7887  *	Read a work from the EEPROM using the SPI protocol.
   7888  */
   7889 static int
   7890 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   7891 {
   7892 	uint32_t reg, val;
   7893 	int i;
   7894 	uint8_t opc;
   7895 
   7896 	/* Clear SK and CS. */
   7897 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   7898 	CSR_WRITE(sc, WMREG_EECD, reg);
   7899 	CSR_WRITE_FLUSH(sc);
   7900 	delay(2);
   7901 
   7902 	if (wm_nvm_ready_spi(sc))
   7903 		return 1;
   7904 
   7905 	/* Toggle CS to flush commands. */
   7906 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   7907 	CSR_WRITE_FLUSH(sc);
   7908 	delay(2);
   7909 	CSR_WRITE(sc, WMREG_EECD, reg);
   7910 	CSR_WRITE_FLUSH(sc);
   7911 	delay(2);
   7912 
   7913 	opc = SPI_OPC_READ;
   7914 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   7915 		opc |= SPI_OPC_A8;
   7916 
   7917 	wm_eeprom_sendbits(sc, opc, 8);
   7918 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   7919 
   7920 	for (i = 0; i < wordcnt; i++) {
   7921 		wm_eeprom_recvbits(sc, &val, 16);
   7922 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   7923 	}
   7924 
   7925 	/* Raise CS and clear SK. */
   7926 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   7927 	CSR_WRITE(sc, WMREG_EECD, reg);
   7928 	CSR_WRITE_FLUSH(sc);
   7929 	delay(2);
   7930 
   7931 	return 0;
   7932 }
   7933 
   7934 /* Using with EERD */
   7935 
   7936 static int
   7937 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   7938 {
   7939 	uint32_t attempts = 100000;
   7940 	uint32_t i, reg = 0;
   7941 	int32_t done = -1;
   7942 
   7943 	for (i = 0; i < attempts; i++) {
   7944 		reg = CSR_READ(sc, rw);
   7945 
   7946 		if (reg & EERD_DONE) {
   7947 			done = 0;
   7948 			break;
   7949 		}
   7950 		delay(5);
   7951 	}
   7952 
   7953 	return done;
   7954 }
   7955 
   7956 static int
   7957 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   7958     uint16_t *data)
   7959 {
   7960 	int i, eerd = 0;
   7961 	int error = 0;
   7962 
   7963 	for (i = 0; i < wordcnt; i++) {
   7964 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   7965 
   7966 		CSR_WRITE(sc, WMREG_EERD, eerd);
   7967 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   7968 		if (error != 0)
   7969 			break;
   7970 
   7971 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   7972 	}
   7973 
   7974 	return error;
   7975 }
   7976 
   7977 /* Flash */
   7978 
   7979 static int
   7980 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   7981 {
   7982 	uint32_t eecd;
   7983 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   7984 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   7985 	uint8_t sig_byte = 0;
   7986 
   7987 	switch (sc->sc_type) {
   7988 	case WM_T_ICH8:
   7989 	case WM_T_ICH9:
   7990 		eecd = CSR_READ(sc, WMREG_EECD);
   7991 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   7992 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   7993 			return 0;
   7994 		}
   7995 		/* FALLTHROUGH */
   7996 	default:
   7997 		/* Default to 0 */
   7998 		*bank = 0;
   7999 
   8000 		/* Check bank 0 */
   8001 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   8002 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   8003 			*bank = 0;
   8004 			return 0;
   8005 		}
   8006 
   8007 		/* Check bank 1 */
   8008 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   8009 		    &sig_byte);
   8010 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   8011 			*bank = 1;
   8012 			return 0;
   8013 		}
   8014 	}
   8015 
   8016 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   8017 		device_xname(sc->sc_dev)));
   8018 	return -1;
   8019 }
   8020 
   8021 /******************************************************************************
   8022  * This function does initial flash setup so that a new read/write/erase cycle
   8023  * can be started.
   8024  *
   8025  * sc - The pointer to the hw structure
   8026  ****************************************************************************/
   8027 static int32_t
   8028 wm_ich8_cycle_init(struct wm_softc *sc)
   8029 {
   8030 	uint16_t hsfsts;
   8031 	int32_t error = 1;
   8032 	int32_t i     = 0;
   8033 
   8034 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8035 
   8036 	/* May be check the Flash Des Valid bit in Hw status */
   8037 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   8038 		return error;
   8039 	}
   8040 
   8041 	/* Clear FCERR in Hw status by writing 1 */
   8042 	/* Clear DAEL in Hw status by writing a 1 */
   8043 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   8044 
   8045 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   8046 
   8047 	/*
   8048 	 * Either we should have a hardware SPI cycle in progress bit to check
   8049 	 * against, in order to start a new cycle or FDONE bit should be
   8050 	 * changed in the hardware so that it is 1 after harware reset, which
   8051 	 * can then be used as an indication whether a cycle is in progress or
   8052 	 * has been completed .. we should also have some software semaphore
   8053 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   8054 	 * threads access to those bits can be sequentiallized or a way so that
   8055 	 * 2 threads dont start the cycle at the same time
   8056 	 */
   8057 
   8058 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   8059 		/*
   8060 		 * There is no cycle running at present, so we can start a
   8061 		 * cycle
   8062 		 */
   8063 
   8064 		/* Begin by setting Flash Cycle Done. */
   8065 		hsfsts |= HSFSTS_DONE;
   8066 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   8067 		error = 0;
   8068 	} else {
   8069 		/*
   8070 		 * otherwise poll for sometime so the current cycle has a
   8071 		 * chance to end before giving up.
   8072 		 */
   8073 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   8074 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8075 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   8076 				error = 0;
   8077 				break;
   8078 			}
   8079 			delay(1);
   8080 		}
   8081 		if (error == 0) {
   8082 			/*
   8083 			 * Successful in waiting for previous cycle to timeout,
   8084 			 * now set the Flash Cycle Done.
   8085 			 */
   8086 			hsfsts |= HSFSTS_DONE;
   8087 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   8088 		}
   8089 	}
   8090 	return error;
   8091 }
   8092 
   8093 /******************************************************************************
   8094  * This function starts a flash cycle and waits for its completion
   8095  *
   8096  * sc - The pointer to the hw structure
   8097  ****************************************************************************/
   8098 static int32_t
   8099 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   8100 {
   8101 	uint16_t hsflctl;
   8102 	uint16_t hsfsts;
   8103 	int32_t error = 1;
   8104 	uint32_t i = 0;
   8105 
   8106 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   8107 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   8108 	hsflctl |= HSFCTL_GO;
   8109 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   8110 
   8111 	/* Wait till FDONE bit is set to 1 */
   8112 	do {
   8113 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8114 		if (hsfsts & HSFSTS_DONE)
   8115 			break;
   8116 		delay(1);
   8117 		i++;
   8118 	} while (i < timeout);
   8119 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   8120 		error = 0;
   8121 
   8122 	return error;
   8123 }
   8124 
   8125 /******************************************************************************
   8126  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   8127  *
   8128  * sc - The pointer to the hw structure
   8129  * index - The index of the byte or word to read.
   8130  * size - Size of data to read, 1=byte 2=word
   8131  * data - Pointer to the word to store the value read.
   8132  *****************************************************************************/
   8133 static int32_t
   8134 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   8135     uint32_t size, uint16_t *data)
   8136 {
   8137 	uint16_t hsfsts;
   8138 	uint16_t hsflctl;
   8139 	uint32_t flash_linear_address;
   8140 	uint32_t flash_data = 0;
   8141 	int32_t error = 1;
   8142 	int32_t count = 0;
   8143 
   8144 	if (size < 1  || size > 2 || data == 0x0 ||
   8145 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   8146 		return error;
   8147 
   8148 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   8149 	    sc->sc_ich8_flash_base;
   8150 
   8151 	do {
   8152 		delay(1);
   8153 		/* Steps */
   8154 		error = wm_ich8_cycle_init(sc);
   8155 		if (error)
   8156 			break;
   8157 
   8158 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   8159 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   8160 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   8161 		    & HSFCTL_BCOUNT_MASK;
   8162 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   8163 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   8164 
   8165 		/*
   8166 		 * Write the last 24 bits of index into Flash Linear address
   8167 		 * field in Flash Address
   8168 		 */
   8169 		/* TODO: TBD maybe check the index against the size of flash */
   8170 
   8171 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   8172 
   8173 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   8174 
   8175 		/*
   8176 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   8177 		 * the whole sequence a few more times, else read in (shift in)
   8178 		 * the Flash Data0, the order is least significant byte first
   8179 		 * msb to lsb
   8180 		 */
   8181 		if (error == 0) {
   8182 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   8183 			if (size == 1)
   8184 				*data = (uint8_t)(flash_data & 0x000000FF);
   8185 			else if (size == 2)
   8186 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   8187 			break;
   8188 		} else {
   8189 			/*
   8190 			 * If we've gotten here, then things are probably
   8191 			 * completely hosed, but if the error condition is
   8192 			 * detected, it won't hurt to give it another try...
   8193 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   8194 			 */
   8195 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8196 			if (hsfsts & HSFSTS_ERR) {
   8197 				/* Repeat for some time before giving up. */
   8198 				continue;
   8199 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   8200 				break;
   8201 		}
   8202 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   8203 
   8204 	return error;
   8205 }
   8206 
   8207 /******************************************************************************
   8208  * Reads a single byte from the NVM using the ICH8 flash access registers.
   8209  *
   8210  * sc - pointer to wm_hw structure
   8211  * index - The index of the byte to read.
   8212  * data - Pointer to a byte to store the value read.
   8213  *****************************************************************************/
   8214 static int32_t
   8215 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   8216 {
   8217 	int32_t status;
   8218 	uint16_t word = 0;
   8219 
   8220 	status = wm_read_ich8_data(sc, index, 1, &word);
   8221 	if (status == 0)
   8222 		*data = (uint8_t)word;
   8223 	else
   8224 		*data = 0;
   8225 
   8226 	return status;
   8227 }
   8228 
   8229 /******************************************************************************
   8230  * Reads a word from the NVM using the ICH8 flash access registers.
   8231  *
   8232  * sc - pointer to wm_hw structure
   8233  * index - The starting byte index of the word to read.
   8234  * data - Pointer to a word to store the value read.
   8235  *****************************************************************************/
   8236 static int32_t
   8237 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   8238 {
   8239 	int32_t status;
   8240 
   8241 	status = wm_read_ich8_data(sc, index, 2, data);
   8242 	return status;
   8243 }
   8244 
   8245 /******************************************************************************
   8246  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   8247  * register.
   8248  *
   8249  * sc - Struct containing variables accessed by shared code
   8250  * offset - offset of word in the EEPROM to read
   8251  * data - word read from the EEPROM
   8252  * words - number of words to read
   8253  *****************************************************************************/
   8254 static int
   8255 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   8256 {
   8257 	int32_t  error = 0;
   8258 	uint32_t flash_bank = 0;
   8259 	uint32_t act_offset = 0;
   8260 	uint32_t bank_offset = 0;
   8261 	uint16_t word = 0;
   8262 	uint16_t i = 0;
   8263 
   8264 	/*
   8265 	 * We need to know which is the valid flash bank.  In the event
   8266 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   8267 	 * managing flash_bank.  So it cannot be trusted and needs
   8268 	 * to be updated with each read.
   8269 	 */
   8270 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   8271 	if (error) {
   8272 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   8273 			device_xname(sc->sc_dev)));
   8274 		flash_bank = 0;
   8275 	}
   8276 
   8277 	/*
   8278 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   8279 	 * size
   8280 	 */
   8281 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   8282 
   8283 	error = wm_get_swfwhw_semaphore(sc);
   8284 	if (error) {
   8285 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8286 		    __func__);
   8287 		return error;
   8288 	}
   8289 
   8290 	for (i = 0; i < words; i++) {
   8291 		/* The NVM part needs a byte offset, hence * 2 */
   8292 		act_offset = bank_offset + ((offset + i) * 2);
   8293 		error = wm_read_ich8_word(sc, act_offset, &word);
   8294 		if (error) {
   8295 			aprint_error_dev(sc->sc_dev,
   8296 			    "%s: failed to read NVM\n", __func__);
   8297 			break;
   8298 		}
   8299 		data[i] = word;
   8300 	}
   8301 
   8302 	wm_put_swfwhw_semaphore(sc);
   8303 	return error;
   8304 }
   8305 
   8306 /* Lock, detecting NVM type, validate checksum and read */
   8307 
   8308 /*
   8309  * wm_nvm_acquire:
   8310  *
   8311  *	Perform the EEPROM handshake required on some chips.
   8312  */
   8313 static int
   8314 wm_nvm_acquire(struct wm_softc *sc)
   8315 {
   8316 	uint32_t reg;
   8317 	int x;
   8318 	int ret = 0;
   8319 
   8320 	/* always success */
   8321 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   8322 		return 0;
   8323 
   8324 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8325 		ret = wm_get_swfwhw_semaphore(sc);
   8326 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8327 		/* This will also do wm_get_swsm_semaphore() if needed */
   8328 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   8329 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8330 		ret = wm_get_swsm_semaphore(sc);
   8331 	}
   8332 
   8333 	if (ret) {
   8334 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8335 			__func__);
   8336 		return 1;
   8337 	}
   8338 
   8339 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   8340 		reg = CSR_READ(sc, WMREG_EECD);
   8341 
   8342 		/* Request EEPROM access. */
   8343 		reg |= EECD_EE_REQ;
   8344 		CSR_WRITE(sc, WMREG_EECD, reg);
   8345 
   8346 		/* ..and wait for it to be granted. */
   8347 		for (x = 0; x < 1000; x++) {
   8348 			reg = CSR_READ(sc, WMREG_EECD);
   8349 			if (reg & EECD_EE_GNT)
   8350 				break;
   8351 			delay(5);
   8352 		}
   8353 		if ((reg & EECD_EE_GNT) == 0) {
   8354 			aprint_error_dev(sc->sc_dev,
   8355 			    "could not acquire EEPROM GNT\n");
   8356 			reg &= ~EECD_EE_REQ;
   8357 			CSR_WRITE(sc, WMREG_EECD, reg);
   8358 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8359 				wm_put_swfwhw_semaphore(sc);
   8360 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   8361 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   8362 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   8363 				wm_put_swsm_semaphore(sc);
   8364 			return 1;
   8365 		}
   8366 	}
   8367 
   8368 	return 0;
   8369 }
   8370 
   8371 /*
   8372  * wm_nvm_release:
   8373  *
   8374  *	Release the EEPROM mutex.
   8375  */
   8376 static void
   8377 wm_nvm_release(struct wm_softc *sc)
   8378 {
   8379 	uint32_t reg;
   8380 
   8381 	/* always success */
   8382 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   8383 		return;
   8384 
   8385 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   8386 		reg = CSR_READ(sc, WMREG_EECD);
   8387 		reg &= ~EECD_EE_REQ;
   8388 		CSR_WRITE(sc, WMREG_EECD, reg);
   8389 	}
   8390 
   8391 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8392 		wm_put_swfwhw_semaphore(sc);
   8393 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8394 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   8395 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   8396 		wm_put_swsm_semaphore(sc);
   8397 }
   8398 
   8399 static int
   8400 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   8401 {
   8402 	uint32_t eecd = 0;
   8403 
   8404 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   8405 	    || sc->sc_type == WM_T_82583) {
   8406 		eecd = CSR_READ(sc, WMREG_EECD);
   8407 
   8408 		/* Isolate bits 15 & 16 */
   8409 		eecd = ((eecd >> 15) & 0x03);
   8410 
   8411 		/* If both bits are set, device is Flash type */
   8412 		if (eecd == 0x03)
   8413 			return 0;
   8414 	}
   8415 	return 1;
   8416 }
   8417 
   8418 /*
   8419  * wm_nvm_validate_checksum
   8420  *
   8421  * The checksum is defined as the sum of the first 64 (16 bit) words.
   8422  */
   8423 static int
   8424 wm_nvm_validate_checksum(struct wm_softc *sc)
   8425 {
   8426 	uint16_t checksum;
   8427 	uint16_t eeprom_data;
   8428 #ifdef WM_DEBUG
   8429 	uint16_t csum_wordaddr, valid_checksum;
   8430 #endif
   8431 	int i;
   8432 
   8433 	checksum = 0;
   8434 
   8435 	/* Don't check for I211 */
   8436 	if (sc->sc_type == WM_T_I211)
   8437 		return 0;
   8438 
   8439 #ifdef WM_DEBUG
   8440 	if (sc->sc_type == WM_T_PCH_LPT) {
   8441 		csum_wordaddr = NVM_OFF_COMPAT;
   8442 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   8443 	} else {
   8444 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   8445 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   8446 	}
   8447 
   8448 	/* Dump EEPROM image for debug */
   8449 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   8450 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   8451 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   8452 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   8453 		if ((eeprom_data & valid_checksum) == 0) {
   8454 			DPRINTF(WM_DEBUG_NVM,
   8455 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   8456 				device_xname(sc->sc_dev), eeprom_data,
   8457 				    valid_checksum));
   8458 		}
   8459 	}
   8460 
   8461 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   8462 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   8463 		for (i = 0; i < NVM_SIZE; i++) {
   8464 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   8465 				printf("XX ");
   8466 			else
   8467 				printf("%04x ", eeprom_data);
   8468 			if (i % 8 == 7)
   8469 				printf("\n");
   8470 		}
   8471 	}
   8472 
   8473 #endif /* WM_DEBUG */
   8474 
   8475 	for (i = 0; i < NVM_SIZE; i++) {
   8476 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   8477 			return 1;
   8478 		checksum += eeprom_data;
   8479 	}
   8480 
   8481 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   8482 #ifdef WM_DEBUG
   8483 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   8484 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   8485 #endif
   8486 	}
   8487 
   8488 	return 0;
   8489 }
   8490 
   8491 /*
   8492  * wm_nvm_read:
   8493  *
   8494  *	Read data from the serial EEPROM.
   8495  */
   8496 static int
   8497 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   8498 {
   8499 	int rv;
   8500 
   8501 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   8502 		return 1;
   8503 
   8504 	if (wm_nvm_acquire(sc))
   8505 		return 1;
   8506 
   8507 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   8508 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   8509 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   8510 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   8511 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   8512 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   8513 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   8514 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   8515 	else
   8516 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   8517 
   8518 	wm_nvm_release(sc);
   8519 	return rv;
   8520 }
   8521 
   8522 /*
   8523  * Hardware semaphores.
   8524  * Very complexed...
   8525  */
   8526 
   8527 static int
   8528 wm_get_swsm_semaphore(struct wm_softc *sc)
   8529 {
   8530 	int32_t timeout;
   8531 	uint32_t swsm;
   8532 
   8533 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8534 		/* Get the SW semaphore. */
   8535 		timeout = sc->sc_nvm_wordsize + 1;
   8536 		while (timeout) {
   8537 			swsm = CSR_READ(sc, WMREG_SWSM);
   8538 
   8539 			if ((swsm & SWSM_SMBI) == 0)
   8540 				break;
   8541 
   8542 			delay(50);
   8543 			timeout--;
   8544 		}
   8545 
   8546 		if (timeout == 0) {
   8547 			aprint_error_dev(sc->sc_dev,
   8548 			    "could not acquire SWSM SMBI\n");
   8549 			return 1;
   8550 		}
   8551 	}
   8552 
   8553 	/* Get the FW semaphore. */
   8554 	timeout = sc->sc_nvm_wordsize + 1;
   8555 	while (timeout) {
   8556 		swsm = CSR_READ(sc, WMREG_SWSM);
   8557 		swsm |= SWSM_SWESMBI;
   8558 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   8559 		/* If we managed to set the bit we got the semaphore. */
   8560 		swsm = CSR_READ(sc, WMREG_SWSM);
   8561 		if (swsm & SWSM_SWESMBI)
   8562 			break;
   8563 
   8564 		delay(50);
   8565 		timeout--;
   8566 	}
   8567 
   8568 	if (timeout == 0) {
   8569 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   8570 		/* Release semaphores */
   8571 		wm_put_swsm_semaphore(sc);
   8572 		return 1;
   8573 	}
   8574 	return 0;
   8575 }
   8576 
   8577 static void
   8578 wm_put_swsm_semaphore(struct wm_softc *sc)
   8579 {
   8580 	uint32_t swsm;
   8581 
   8582 	swsm = CSR_READ(sc, WMREG_SWSM);
   8583 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   8584 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   8585 }
   8586 
   8587 static int
   8588 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   8589 {
   8590 	uint32_t swfw_sync;
   8591 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   8592 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   8593 	int timeout = 200;
   8594 
   8595 	for (timeout = 0; timeout < 200; timeout++) {
   8596 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8597 			if (wm_get_swsm_semaphore(sc)) {
   8598 				aprint_error_dev(sc->sc_dev,
   8599 				    "%s: failed to get semaphore\n",
   8600 				    __func__);
   8601 				return 1;
   8602 			}
   8603 		}
   8604 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   8605 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   8606 			swfw_sync |= swmask;
   8607 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   8608 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   8609 				wm_put_swsm_semaphore(sc);
   8610 			return 0;
   8611 		}
   8612 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   8613 			wm_put_swsm_semaphore(sc);
   8614 		delay(5000);
   8615 	}
   8616 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   8617 	    device_xname(sc->sc_dev), mask, swfw_sync);
   8618 	return 1;
   8619 }
   8620 
   8621 static void
   8622 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   8623 {
   8624 	uint32_t swfw_sync;
   8625 
   8626 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8627 		while (wm_get_swsm_semaphore(sc) != 0)
   8628 			continue;
   8629 	}
   8630 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   8631 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   8632 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   8633 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   8634 		wm_put_swsm_semaphore(sc);
   8635 }
   8636 
   8637 static int
   8638 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   8639 {
   8640 	uint32_t ext_ctrl;
   8641 	int timeout = 200;
   8642 
   8643 	for (timeout = 0; timeout < 200; timeout++) {
   8644 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   8645 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
   8646 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   8647 
   8648 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   8649 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
   8650 			return 0;
   8651 		delay(5000);
   8652 	}
   8653 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   8654 	    device_xname(sc->sc_dev), ext_ctrl);
   8655 	return 1;
   8656 }
   8657 
   8658 static void
   8659 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   8660 {
   8661 	uint32_t ext_ctrl;
   8662 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   8663 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
   8664 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   8665 }
   8666 
   8667 static int
   8668 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   8669 {
   8670 	int i = 0;
   8671 	uint32_t reg;
   8672 
   8673 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8674 	do {
   8675 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   8676 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   8677 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8678 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   8679 			break;
   8680 		delay(2*1000);
   8681 		i++;
   8682 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   8683 
   8684 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   8685 		wm_put_hw_semaphore_82573(sc);
   8686 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   8687 		    device_xname(sc->sc_dev));
   8688 		return -1;
   8689 	}
   8690 
   8691 	return 0;
   8692 }
   8693 
   8694 static void
   8695 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   8696 {
   8697 	uint32_t reg;
   8698 
   8699 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8700 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   8701 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   8702 }
   8703 
   8704 /*
   8705  * Management mode and power management related subroutines.
   8706  * BMC, AMT, suspend/resume and EEE.
   8707  */
   8708 
   8709 static int
   8710 wm_check_mng_mode(struct wm_softc *sc)
   8711 {
   8712 	int rv;
   8713 
   8714 	switch (sc->sc_type) {
   8715 	case WM_T_ICH8:
   8716 	case WM_T_ICH9:
   8717 	case WM_T_ICH10:
   8718 	case WM_T_PCH:
   8719 	case WM_T_PCH2:
   8720 	case WM_T_PCH_LPT:
   8721 		rv = wm_check_mng_mode_ich8lan(sc);
   8722 		break;
   8723 	case WM_T_82574:
   8724 	case WM_T_82583:
   8725 		rv = wm_check_mng_mode_82574(sc);
   8726 		break;
   8727 	case WM_T_82571:
   8728 	case WM_T_82572:
   8729 	case WM_T_82573:
   8730 	case WM_T_80003:
   8731 		rv = wm_check_mng_mode_generic(sc);
   8732 		break;
   8733 	default:
   8734 		/* noting to do */
   8735 		rv = 0;
   8736 		break;
   8737 	}
   8738 
   8739 	return rv;
   8740 }
   8741 
   8742 static int
   8743 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   8744 {
   8745 	uint32_t fwsm;
   8746 
   8747 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8748 
   8749 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   8750 		return 1;
   8751 
   8752 	return 0;
   8753 }
   8754 
   8755 static int
   8756 wm_check_mng_mode_82574(struct wm_softc *sc)
   8757 {
   8758 	uint16_t data;
   8759 
   8760 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   8761 
   8762 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   8763 		return 1;
   8764 
   8765 	return 0;
   8766 }
   8767 
   8768 static int
   8769 wm_check_mng_mode_generic(struct wm_softc *sc)
   8770 {
   8771 	uint32_t fwsm;
   8772 
   8773 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8774 
   8775 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   8776 		return 1;
   8777 
   8778 	return 0;
   8779 }
   8780 
   8781 static int
   8782 wm_enable_mng_pass_thru(struct wm_softc *sc)
   8783 {
   8784 	uint32_t manc, fwsm, factps;
   8785 
   8786 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   8787 		return 0;
   8788 
   8789 	manc = CSR_READ(sc, WMREG_MANC);
   8790 
   8791 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   8792 		device_xname(sc->sc_dev), manc));
   8793 	if ((manc & MANC_RECV_TCO_EN) == 0)
   8794 		return 0;
   8795 
   8796 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   8797 		fwsm = CSR_READ(sc, WMREG_FWSM);
   8798 		factps = CSR_READ(sc, WMREG_FACTPS);
   8799 		if (((factps & FACTPS_MNGCG) == 0)
   8800 		    && ((fwsm & FWSM_MODE_MASK)
   8801 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   8802 			return 1;
   8803 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   8804 		uint16_t data;
   8805 
   8806 		factps = CSR_READ(sc, WMREG_FACTPS);
   8807 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   8808 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   8809 			device_xname(sc->sc_dev), factps, data));
   8810 		if (((factps & FACTPS_MNGCG) == 0)
   8811 		    && ((data & NVM_CFG2_MNGM_MASK)
   8812 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   8813 			return 1;
   8814 	} else if (((manc & MANC_SMBUS_EN) != 0)
   8815 	    && ((manc & MANC_ASF_EN) == 0))
   8816 		return 1;
   8817 
   8818 	return 0;
   8819 }
   8820 
   8821 static int
   8822 wm_check_reset_block(struct wm_softc *sc)
   8823 {
   8824 	uint32_t reg;
   8825 
   8826 	switch (sc->sc_type) {
   8827 	case WM_T_ICH8:
   8828 	case WM_T_ICH9:
   8829 	case WM_T_ICH10:
   8830 	case WM_T_PCH:
   8831 	case WM_T_PCH2:
   8832 	case WM_T_PCH_LPT:
   8833 		reg = CSR_READ(sc, WMREG_FWSM);
   8834 		if ((reg & FWSM_RSPCIPHY) != 0)
   8835 			return 0;
   8836 		else
   8837 			return -1;
   8838 		break;
   8839 	case WM_T_82571:
   8840 	case WM_T_82572:
   8841 	case WM_T_82573:
   8842 	case WM_T_82574:
   8843 	case WM_T_82583:
   8844 	case WM_T_80003:
   8845 		reg = CSR_READ(sc, WMREG_MANC);
   8846 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   8847 			return -1;
   8848 		else
   8849 			return 0;
   8850 		break;
   8851 	default:
   8852 		/* no problem */
   8853 		break;
   8854 	}
   8855 
   8856 	return 0;
   8857 }
   8858 
   8859 static void
   8860 wm_get_hw_control(struct wm_softc *sc)
   8861 {
   8862 	uint32_t reg;
   8863 
   8864 	switch (sc->sc_type) {
   8865 	case WM_T_82573:
   8866 		reg = CSR_READ(sc, WMREG_SWSM);
   8867 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   8868 		break;
   8869 	case WM_T_82571:
   8870 	case WM_T_82572:
   8871 	case WM_T_82574:
   8872 	case WM_T_82583:
   8873 	case WM_T_80003:
   8874 	case WM_T_ICH8:
   8875 	case WM_T_ICH9:
   8876 	case WM_T_ICH10:
   8877 	case WM_T_PCH:
   8878 	case WM_T_PCH2:
   8879 	case WM_T_PCH_LPT:
   8880 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8881 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   8882 		break;
   8883 	default:
   8884 		break;
   8885 	}
   8886 }
   8887 
   8888 static void
   8889 wm_release_hw_control(struct wm_softc *sc)
   8890 {
   8891 	uint32_t reg;
   8892 
   8893 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   8894 		return;
   8895 
   8896 	if (sc->sc_type == WM_T_82573) {
   8897 		reg = CSR_READ(sc, WMREG_SWSM);
   8898 		reg &= ~SWSM_DRV_LOAD;
   8899 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   8900 	} else {
   8901 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8902 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   8903 	}
   8904 }
   8905 
   8906 static void
   8907 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   8908 {
   8909 	uint32_t reg;
   8910 
   8911 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8912 
   8913 	if (on != 0)
   8914 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   8915 	else
   8916 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   8917 
   8918 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   8919 }
   8920 
   8921 static void
   8922 wm_smbustopci(struct wm_softc *sc)
   8923 {
   8924 	uint32_t fwsm;
   8925 
   8926 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8927 	if (((fwsm & FWSM_FW_VALID) == 0)
   8928 	    && ((wm_check_reset_block(sc) == 0))) {
   8929 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   8930 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   8931 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8932 		CSR_WRITE_FLUSH(sc);
   8933 		delay(10);
   8934 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   8935 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8936 		CSR_WRITE_FLUSH(sc);
   8937 		delay(50*1000);
   8938 
   8939 		/*
   8940 		 * Gate automatic PHY configuration by hardware on non-managed
   8941 		 * 82579
   8942 		 */
   8943 		if (sc->sc_type == WM_T_PCH2)
   8944 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   8945 	}
   8946 }
   8947 
   8948 static void
   8949 wm_init_manageability(struct wm_softc *sc)
   8950 {
   8951 
   8952 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   8953 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   8954 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   8955 
   8956 		/* Disable hardware interception of ARP */
   8957 		manc &= ~MANC_ARP_EN;
   8958 
   8959 		/* Enable receiving management packets to the host */
   8960 		if (sc->sc_type >= WM_T_82571) {
   8961 			manc |= MANC_EN_MNG2HOST;
   8962 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   8963 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   8964 
   8965 		}
   8966 
   8967 		CSR_WRITE(sc, WMREG_MANC, manc);
   8968 	}
   8969 }
   8970 
   8971 static void
   8972 wm_release_manageability(struct wm_softc *sc)
   8973 {
   8974 
   8975 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   8976 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   8977 
   8978 		manc |= MANC_ARP_EN;
   8979 		if (sc->sc_type >= WM_T_82571)
   8980 			manc &= ~MANC_EN_MNG2HOST;
   8981 
   8982 		CSR_WRITE(sc, WMREG_MANC, manc);
   8983 	}
   8984 }
   8985 
   8986 static void
   8987 wm_get_wakeup(struct wm_softc *sc)
   8988 {
   8989 
   8990 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   8991 	switch (sc->sc_type) {
   8992 	case WM_T_82573:
   8993 	case WM_T_82583:
   8994 		sc->sc_flags |= WM_F_HAS_AMT;
   8995 		/* FALLTHROUGH */
   8996 	case WM_T_80003:
   8997 	case WM_T_82541:
   8998 	case WM_T_82547:
   8999 	case WM_T_82571:
   9000 	case WM_T_82572:
   9001 	case WM_T_82574:
   9002 	case WM_T_82575:
   9003 	case WM_T_82576:
   9004 	case WM_T_82580:
   9005 	case WM_T_82580ER:
   9006 	case WM_T_I350:
   9007 	case WM_T_I354:
   9008 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   9009 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   9010 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   9011 		break;
   9012 	case WM_T_ICH8:
   9013 	case WM_T_ICH9:
   9014 	case WM_T_ICH10:
   9015 	case WM_T_PCH:
   9016 	case WM_T_PCH2:
   9017 	case WM_T_PCH_LPT:
   9018 		sc->sc_flags |= WM_F_HAS_AMT;
   9019 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   9020 		break;
   9021 	default:
   9022 		break;
   9023 	}
   9024 
   9025 	/* 1: HAS_MANAGE */
   9026 	if (wm_enable_mng_pass_thru(sc) != 0)
   9027 		sc->sc_flags |= WM_F_HAS_MANAGE;
   9028 
   9029 #ifdef WM_DEBUG
   9030 	printf("\n");
   9031 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   9032 		printf("HAS_AMT,");
   9033 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   9034 		printf("ARC_SUBSYS_VALID,");
   9035 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   9036 		printf("ASF_FIRMWARE_PRES,");
   9037 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   9038 		printf("HAS_MANAGE,");
   9039 	printf("\n");
   9040 #endif
   9041 	/*
   9042 	 * Note that the WOL flags is set after the resetting of the eeprom
   9043 	 * stuff
   9044 	 */
   9045 }
   9046 
   9047 #ifdef WM_WOL
   9048 /* WOL in the newer chipset interfaces (pchlan) */
   9049 static void
   9050 wm_enable_phy_wakeup(struct wm_softc *sc)
   9051 {
   9052 #if 0
   9053 	uint16_t preg;
   9054 
   9055 	/* Copy MAC RARs to PHY RARs */
   9056 
   9057 	/* Copy MAC MTA to PHY MTA */
   9058 
   9059 	/* Configure PHY Rx Control register */
   9060 
   9061 	/* Enable PHY wakeup in MAC register */
   9062 
   9063 	/* Configure and enable PHY wakeup in PHY registers */
   9064 
   9065 	/* Activate PHY wakeup */
   9066 
   9067 	/* XXX */
   9068 #endif
   9069 }
   9070 
   9071 /* Power down workaround on D3 */
   9072 static void
   9073 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   9074 {
   9075 	uint32_t reg;
   9076 	int i;
   9077 
   9078 	for (i = 0; i < 2; i++) {
   9079 		/* Disable link */
   9080 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   9081 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   9082 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   9083 
   9084 		/*
   9085 		 * Call gig speed drop workaround on Gig disable before
   9086 		 * accessing any PHY registers
   9087 		 */
   9088 		if (sc->sc_type == WM_T_ICH8)
   9089 			wm_gig_downshift_workaround_ich8lan(sc);
   9090 
   9091 		/* Write VR power-down enable */
   9092 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   9093 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   9094 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   9095 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   9096 
   9097 		/* Read it back and test */
   9098 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   9099 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   9100 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   9101 			break;
   9102 
   9103 		/* Issue PHY reset and repeat at most one more time */
   9104 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9105 	}
   9106 }
   9107 
   9108 static void
   9109 wm_enable_wakeup(struct wm_softc *sc)
   9110 {
   9111 	uint32_t reg, pmreg;
   9112 	pcireg_t pmode;
   9113 
   9114 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   9115 		&pmreg, NULL) == 0)
   9116 		return;
   9117 
   9118 	/* Advertise the wakeup capability */
   9119 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   9120 	    | CTRL_SWDPIN(3));
   9121 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   9122 
   9123 	/* ICH workaround */
   9124 	switch (sc->sc_type) {
   9125 	case WM_T_ICH8:
   9126 	case WM_T_ICH9:
   9127 	case WM_T_ICH10:
   9128 	case WM_T_PCH:
   9129 	case WM_T_PCH2:
   9130 	case WM_T_PCH_LPT:
   9131 		/* Disable gig during WOL */
   9132 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   9133 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   9134 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   9135 		if (sc->sc_type == WM_T_PCH)
   9136 			wm_gmii_reset(sc);
   9137 
   9138 		/* Power down workaround */
   9139 		if (sc->sc_phytype == WMPHY_82577) {
   9140 			struct mii_softc *child;
   9141 
   9142 			/* Assume that the PHY is copper */
   9143 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9144 			if (child->mii_mpd_rev <= 2)
   9145 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   9146 				    (768 << 5) | 25, 0x0444); /* magic num */
   9147 		}
   9148 		break;
   9149 	default:
   9150 		break;
   9151 	}
   9152 
   9153 	/* Keep the laser running on fiber adapters */
   9154 	if (((sc->sc_mediatype & WMP_F_FIBER) != 0)
   9155 	    || (sc->sc_mediatype & WMP_F_SERDES) != 0) {
   9156 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9157 		reg |= CTRL_EXT_SWDPIN(3);
   9158 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9159 	}
   9160 
   9161 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   9162 #if 0	/* for the multicast packet */
   9163 	reg |= WUFC_MC;
   9164 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   9165 #endif
   9166 
   9167 	if (sc->sc_type == WM_T_PCH) {
   9168 		wm_enable_phy_wakeup(sc);
   9169 	} else {
   9170 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   9171 		CSR_WRITE(sc, WMREG_WUFC, reg);
   9172 	}
   9173 
   9174 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   9175 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   9176 		|| (sc->sc_type == WM_T_PCH2))
   9177 		    && (sc->sc_phytype == WMPHY_IGP_3))
   9178 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   9179 
   9180 	/* Request PME */
   9181 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   9182 #if 0
   9183 	/* Disable WOL */
   9184 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   9185 #else
   9186 	/* For WOL */
   9187 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   9188 #endif
   9189 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   9190 }
   9191 #endif /* WM_WOL */
   9192 
   9193 /* EEE */
   9194 
   9195 static void
   9196 wm_set_eee_i350(struct wm_softc *sc)
   9197 {
   9198 	uint32_t ipcnfg, eeer;
   9199 
   9200 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   9201 	eeer = CSR_READ(sc, WMREG_EEER);
   9202 
   9203 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   9204 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   9205 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   9206 		    | EEER_LPI_FC);
   9207 	} else {
   9208 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   9209 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   9210 		    | EEER_LPI_FC);
   9211 	}
   9212 
   9213 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   9214 	CSR_WRITE(sc, WMREG_EEER, eeer);
   9215 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   9216 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   9217 }
   9218 
   9219 /*
   9220  * Workarounds (mainly PHY related).
   9221  * Basically, PHY's workarounds are in the PHY drivers.
   9222  */
   9223 
   9224 /* Work-around for 82566 Kumeran PCS lock loss */
   9225 static void
   9226 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   9227 {
   9228 	int miistatus, active, i;
   9229 	int reg;
   9230 
   9231 	miistatus = sc->sc_mii.mii_media_status;
   9232 
   9233 	/* If the link is not up, do nothing */
   9234 	if ((miistatus & IFM_ACTIVE) != 0)
   9235 		return;
   9236 
   9237 	active = sc->sc_mii.mii_media_active;
   9238 
   9239 	/* Nothing to do if the link is other than 1Gbps */
   9240 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   9241 		return;
   9242 
   9243 	for (i = 0; i < 10; i++) {
   9244 		/* read twice */
   9245 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   9246 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   9247 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
   9248 			goto out;	/* GOOD! */
   9249 
   9250 		/* Reset the PHY */
   9251 		wm_gmii_reset(sc);
   9252 		delay(5*1000);
   9253 	}
   9254 
   9255 	/* Disable GigE link negotiation */
   9256 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   9257 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   9258 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   9259 
   9260 	/*
   9261 	 * Call gig speed drop workaround on Gig disable before accessing
   9262 	 * any PHY registers.
   9263 	 */
   9264 	wm_gig_downshift_workaround_ich8lan(sc);
   9265 
   9266 out:
   9267 	return;
   9268 }
   9269 
   9270 /* WOL from S5 stops working */
   9271 static void
   9272 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   9273 {
   9274 	uint16_t kmrn_reg;
   9275 
   9276 	/* Only for igp3 */
   9277 	if (sc->sc_phytype == WMPHY_IGP_3) {
   9278 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   9279 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   9280 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   9281 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   9282 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   9283 	}
   9284 }
   9285 
   9286 /*
   9287  * Workaround for pch's PHYs
   9288  * XXX should be moved to new PHY driver?
   9289  */
   9290 static void
   9291 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   9292 {
   9293 	if (sc->sc_phytype == WMPHY_82577)
   9294 		wm_set_mdio_slow_mode_hv(sc);
   9295 
   9296 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   9297 
   9298 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   9299 
   9300 	/* 82578 */
   9301 	if (sc->sc_phytype == WMPHY_82578) {
   9302 		/* PCH rev. < 3 */
   9303 		if (sc->sc_rev < 3) {
   9304 			/* XXX 6 bit shift? Why? Is it page2? */
   9305 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   9306 			    0x66c0);
   9307 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   9308 			    0xffff);
   9309 		}
   9310 
   9311 		/* XXX phy rev. < 2 */
   9312 	}
   9313 
   9314 	/* Select page 0 */
   9315 
   9316 	/* XXX acquire semaphore */
   9317 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   9318 	/* XXX release semaphore */
   9319 
   9320 	/*
   9321 	 * Configure the K1 Si workaround during phy reset assuming there is
   9322 	 * link so that it disables K1 if link is in 1Gbps.
   9323 	 */
   9324 	wm_k1_gig_workaround_hv(sc, 1);
   9325 }
   9326 
   9327 static void
   9328 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   9329 {
   9330 
   9331 	wm_set_mdio_slow_mode_hv(sc);
   9332 }
   9333 
   9334 static void
   9335 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   9336 {
   9337 	int k1_enable = sc->sc_nvm_k1_enabled;
   9338 
   9339 	/* XXX acquire semaphore */
   9340 
   9341 	if (link) {
   9342 		k1_enable = 0;
   9343 
   9344 		/* Link stall fix for link up */
   9345 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   9346 	} else {
   9347 		/* Link stall fix for link down */
   9348 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   9349 	}
   9350 
   9351 	wm_configure_k1_ich8lan(sc, k1_enable);
   9352 
   9353 	/* XXX release semaphore */
   9354 }
   9355 
   9356 static void
   9357 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   9358 {
   9359 	uint32_t reg;
   9360 
   9361 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   9362 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   9363 	    reg | HV_KMRN_MDIO_SLOW);
   9364 }
   9365 
   9366 static void
   9367 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   9368 {
   9369 	uint32_t ctrl, ctrl_ext, tmp;
   9370 	uint16_t kmrn_reg;
   9371 
   9372 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   9373 
   9374 	if (k1_enable)
   9375 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   9376 	else
   9377 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   9378 
   9379 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   9380 
   9381 	delay(20);
   9382 
   9383 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9384 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9385 
   9386 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   9387 	tmp |= CTRL_FRCSPD;
   9388 
   9389 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   9390 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   9391 	CSR_WRITE_FLUSH(sc);
   9392 	delay(20);
   9393 
   9394 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   9395 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9396 	CSR_WRITE_FLUSH(sc);
   9397 	delay(20);
   9398 }
   9399 
   9400 /* special case - for 82575 - need to do manual init ... */
   9401 static void
   9402 wm_reset_init_script_82575(struct wm_softc *sc)
   9403 {
   9404 	/*
   9405 	 * remark: this is untested code - we have no board without EEPROM
   9406 	 *  same setup as mentioned int the freeBSD driver for the i82575
   9407 	 */
   9408 
   9409 	/* SerDes configuration via SERDESCTRL */
   9410 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   9411 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   9412 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   9413 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   9414 
   9415 	/* CCM configuration via CCMCTL register */
   9416 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   9417 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   9418 
   9419 	/* PCIe lanes configuration */
   9420 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   9421 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   9422 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   9423 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   9424 
   9425 	/* PCIe PLL Configuration */
   9426 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   9427 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   9428 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   9429 }
   9430