Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.284
      1 /*	$NetBSD: if_wm.c,v 1.284 2014/07/31 02:54:46 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Rework how parameters are loaded from the EEPROM.
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.284 2014/07/31 02:54:46 msaitoh Exp $");
     80 
     81 #include <sys/param.h>
     82 #include <sys/systm.h>
     83 #include <sys/callout.h>
     84 #include <sys/mbuf.h>
     85 #include <sys/malloc.h>
     86 #include <sys/kernel.h>
     87 #include <sys/socket.h>
     88 #include <sys/ioctl.h>
     89 #include <sys/errno.h>
     90 #include <sys/device.h>
     91 #include <sys/queue.h>
     92 #include <sys/syslog.h>
     93 
     94 #include <sys/rnd.h>
     95 
     96 #include <net/if.h>
     97 #include <net/if_dl.h>
     98 #include <net/if_media.h>
     99 #include <net/if_ether.h>
    100 
    101 #include <net/bpf.h>
    102 
    103 #include <netinet/in.h>			/* XXX for struct ip */
    104 #include <netinet/in_systm.h>		/* XXX for struct ip */
    105 #include <netinet/ip.h>			/* XXX for struct ip */
    106 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    107 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    108 
    109 #include <sys/bus.h>
    110 #include <sys/intr.h>
    111 #include <machine/endian.h>
    112 
    113 #include <dev/mii/mii.h>
    114 #include <dev/mii/miivar.h>
    115 #include <dev/mii/miidevs.h>
    116 #include <dev/mii/mii_bitbang.h>
    117 #include <dev/mii/ikphyreg.h>
    118 #include <dev/mii/igphyreg.h>
    119 #include <dev/mii/igphyvar.h>
    120 #include <dev/mii/inbmphyreg.h>
    121 
    122 #include <dev/pci/pcireg.h>
    123 #include <dev/pci/pcivar.h>
    124 #include <dev/pci/pcidevs.h>
    125 
    126 #include <dev/pci/if_wmreg.h>
    127 #include <dev/pci/if_wmvar.h>
    128 
    129 #ifdef WM_DEBUG
    130 #define	WM_DEBUG_LINK		0x01
    131 #define	WM_DEBUG_TX		0x02
    132 #define	WM_DEBUG_RX		0x04
    133 #define	WM_DEBUG_GMII		0x08
    134 #define	WM_DEBUG_MANAGE		0x10
    135 #define	WM_DEBUG_NVM		0x20
    136 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    137     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    138 
    139 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    140 #else
    141 #define	DPRINTF(x, y)	/* nothing */
    142 #endif /* WM_DEBUG */
    143 
    144 #ifdef NET_MPSAFE
    145 #define WM_MPSAFE	1
    146 #endif
    147 
    148 /*
    149  * Transmit descriptor list size.  Due to errata, we can only have
    150  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    151  * on >= 82544.  We tell the upper layers that they can queue a lot
    152  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    153  * of them at a time.
    154  *
    155  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    156  * chains containing many small mbufs have been observed in zero-copy
    157  * situations with jumbo frames.
    158  */
    159 #define	WM_NTXSEGS		256
    160 #define	WM_IFQUEUELEN		256
    161 #define	WM_TXQUEUELEN_MAX	64
    162 #define	WM_TXQUEUELEN_MAX_82547	16
    163 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
    164 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
    165 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
    166 #define	WM_NTXDESC_82542	256
    167 #define	WM_NTXDESC_82544	4096
    168 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
    169 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
    170 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
    171 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
    172 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
    173 
    174 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    175 
    176 /*
    177  * Receive descriptor list size.  We have one Rx buffer for normal
    178  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    179  * packet.  We allocate 256 receive descriptors, each with a 2k
    180  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    181  */
    182 #define	WM_NRXDESC		256
    183 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    184 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    185 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    186 
    187 /*
    188  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    189  * a single clump that maps to a single DMA segment to make several things
    190  * easier.
    191  */
    192 struct wm_control_data_82544 {
    193 	/*
    194 	 * The receive descriptors.
    195 	 */
    196 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    197 
    198 	/*
    199 	 * The transmit descriptors.  Put these at the end, because
    200 	 * we might use a smaller number of them.
    201 	 */
    202 	union {
    203 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
    204 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
    205 	} wdc_u;
    206 };
    207 
    208 struct wm_control_data_82542 {
    209 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    210 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
    211 };
    212 
    213 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
    214 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
    215 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    216 
    217 /*
    218  * Software state for transmit jobs.
    219  */
    220 struct wm_txsoft {
    221 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    222 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    223 	int txs_firstdesc;		/* first descriptor in packet */
    224 	int txs_lastdesc;		/* last descriptor in packet */
    225 	int txs_ndesc;			/* # of descriptors used */
    226 };
    227 
    228 /*
    229  * Software state for receive buffers.  Each descriptor gets a
    230  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    231  * more than one buffer, we chain them together.
    232  */
    233 struct wm_rxsoft {
    234 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    235 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    236 };
    237 
    238 #define WM_LINKUP_TIMEOUT	50
    239 
    240 static uint16_t swfwphysem[] = {
    241 	SWFW_PHY0_SM,
    242 	SWFW_PHY1_SM,
    243 	SWFW_PHY2_SM,
    244 	SWFW_PHY3_SM
    245 };
    246 
    247 /*
    248  * Software state per device.
    249  */
    250 struct wm_softc {
    251 	device_t sc_dev;		/* generic device information */
    252 	bus_space_tag_t sc_st;		/* bus space tag */
    253 	bus_space_handle_t sc_sh;	/* bus space handle */
    254 	bus_size_t sc_ss;		/* bus space size */
    255 	bus_space_tag_t sc_iot;		/* I/O space tag */
    256 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    257 	bus_size_t sc_ios;		/* I/O space size */
    258 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    259 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    260 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    261 
    262 	struct ethercom sc_ethercom;	/* ethernet common data */
    263 	struct mii_data sc_mii;		/* MII/media information */
    264 
    265 	pci_chipset_tag_t sc_pc;
    266 	pcitag_t sc_pcitag;
    267 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    268 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    269 
    270 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
    271 	wm_chip_type sc_type;		/* MAC type */
    272 	int sc_rev;			/* MAC revision */
    273 	wm_phy_type sc_phytype;		/* PHY type */
    274 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    275 	int sc_flags;			/* flags; see below */
    276 	int sc_if_flags;		/* last if_flags */
    277 	int sc_flowflags;		/* 802.3x flow control flags */
    278 	int sc_align_tweak;
    279 
    280 	void *sc_ih;			/* interrupt cookie */
    281 	callout_t sc_tick_ch;		/* tick callout */
    282 	bool sc_stopping;
    283 
    284 	int sc_ee_addrbits;		/* EEPROM address bits */
    285 	int sc_ich8_flash_base;
    286 	int sc_ich8_flash_bank_size;
    287 	int sc_nvm_k1_enabled;
    288 
    289 	/* Software state for the transmit and receive descriptors. */
    290 	int sc_txnum;			/* must be a power of two */
    291 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
    292 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    293 
    294 	/* Control data structures. */
    295 	int sc_ntxdesc;			/* must be a power of two */
    296 	struct wm_control_data_82544 *sc_control_data;
    297 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    298 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
    299 	int sc_cd_rseg;			/* real number of control segment */
    300 	size_t sc_cd_size;		/* control data size */
    301 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    302 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
    303 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
    304 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    305 
    306 #ifdef WM_EVENT_COUNTERS
    307 	/* Event counters. */
    308 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    309 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    310 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    311 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    312 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    313 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    314 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    315 
    316 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    317 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    318 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    319 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    320 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    321 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    322 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    323 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    324 
    325 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    326 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    327 
    328 	struct evcnt sc_ev_tu;		/* Tx underrun */
    329 
    330 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    331 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    332 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    333 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    334 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    335 #endif /* WM_EVENT_COUNTERS */
    336 
    337 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    338 
    339 	int	sc_txfree;		/* number of free Tx descriptors */
    340 	int	sc_txnext;		/* next ready Tx descriptor */
    341 
    342 	int	sc_txsfree;		/* number of free Tx jobs */
    343 	int	sc_txsnext;		/* next free Tx job */
    344 	int	sc_txsdirty;		/* dirty Tx jobs */
    345 
    346 	/* These 5 variables are used only on the 82547. */
    347 	int	sc_txfifo_size;		/* Tx FIFO size */
    348 	int	sc_txfifo_head;		/* current head of FIFO */
    349 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
    350 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
    351 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    352 
    353 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    354 
    355 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    356 	int	sc_rxdiscard;
    357 	int	sc_rxlen;
    358 	struct mbuf *sc_rxhead;
    359 	struct mbuf *sc_rxtail;
    360 	struct mbuf **sc_rxtailp;
    361 
    362 	uint32_t sc_ctrl;		/* prototype CTRL register */
    363 #if 0
    364 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    365 #endif
    366 	uint32_t sc_icr;		/* prototype interrupt bits */
    367 	uint32_t sc_itr;		/* prototype intr throttling reg */
    368 	uint32_t sc_tctl;		/* prototype TCTL register */
    369 	uint32_t sc_rctl;		/* prototype RCTL register */
    370 	uint32_t sc_txcw;		/* prototype TXCW register */
    371 	uint32_t sc_tipg;		/* prototype TIPG register */
    372 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    373 	uint32_t sc_pba;		/* prototype PBA register */
    374 
    375 	int sc_tbi_linkup;		/* TBI link status */
    376 	int sc_tbi_anegticks;		/* autonegotiation ticks */
    377 	int sc_tbi_ticks;		/* tbi ticks */
    378 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
    379 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
    380 
    381 	int sc_mchash_type;		/* multicast filter offset */
    382 
    383 	krndsource_t rnd_source;	/* random source */
    384 
    385 	kmutex_t *sc_tx_lock;		/* lock for tx operations */
    386 	kmutex_t *sc_rx_lock;		/* lock for rx operations */
    387 };
    388 
    389 #define WM_TX_LOCK(_sc)		if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
    390 #define WM_TX_UNLOCK(_sc)	if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
    391 #define WM_TX_LOCKED(_sc)	(!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
    392 #define WM_RX_LOCK(_sc)		if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
    393 #define WM_RX_UNLOCK(_sc)	if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
    394 #define WM_RX_LOCKED(_sc)	(!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
    395 #define WM_BOTH_LOCK(_sc)	do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
    396 #define WM_BOTH_UNLOCK(_sc)	do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
    397 #define WM_BOTH_LOCKED(_sc)	(WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
    398 
    399 #ifdef WM_MPSAFE
    400 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    401 #else
    402 #define CALLOUT_FLAGS	0
    403 #endif
    404 
    405 #define	WM_RXCHAIN_RESET(sc)						\
    406 do {									\
    407 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    408 	*(sc)->sc_rxtailp = NULL;					\
    409 	(sc)->sc_rxlen = 0;						\
    410 } while (/*CONSTCOND*/0)
    411 
    412 #define	WM_RXCHAIN_LINK(sc, m)						\
    413 do {									\
    414 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    415 	(sc)->sc_rxtailp = &(m)->m_next;				\
    416 } while (/*CONSTCOND*/0)
    417 
    418 #ifdef WM_EVENT_COUNTERS
    419 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    420 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    421 #else
    422 #define	WM_EVCNT_INCR(ev)	/* nothing */
    423 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    424 #endif
    425 
    426 #define	CSR_READ(sc, reg)						\
    427 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    428 #define	CSR_WRITE(sc, reg, val)						\
    429 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    430 #define	CSR_WRITE_FLUSH(sc)						\
    431 	(void) CSR_READ((sc), WMREG_STATUS)
    432 
    433 #define ICH8_FLASH_READ32(sc, reg) \
    434 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    435 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    436 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    437 
    438 #define ICH8_FLASH_READ16(sc, reg) \
    439 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    440 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    441 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    442 
    443 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    444 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    445 
    446 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
    447 #define	WM_CDTXADDR_HI(sc, x)						\
    448 	(sizeof(bus_addr_t) == 8 ?					\
    449 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
    450 
    451 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
    452 #define	WM_CDRXADDR_HI(sc, x)						\
    453 	(sizeof(bus_addr_t) == 8 ?					\
    454 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
    455 
    456 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    457 do {									\
    458 	int __x, __n;							\
    459 									\
    460 	__x = (x);							\
    461 	__n = (n);							\
    462 									\
    463 	/* If it will wrap around, sync to the end of the ring. */	\
    464 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
    465 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    466 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    467 		    (WM_NTXDESC(sc) - __x), (ops));			\
    468 		__n -= (WM_NTXDESC(sc) - __x);				\
    469 		__x = 0;						\
    470 	}								\
    471 									\
    472 	/* Now sync whatever is left. */				\
    473 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    474 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    475 } while (/*CONSTCOND*/0)
    476 
    477 #define	WM_CDRXSYNC(sc, x, ops)						\
    478 do {									\
    479 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    480 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    481 } while (/*CONSTCOND*/0)
    482 
    483 #define	WM_INIT_RXDESC(sc, x)						\
    484 do {									\
    485 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    486 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    487 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    488 									\
    489 	/*								\
    490 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    491 	 * so that the payload after the Ethernet header is aligned	\
    492 	 * to a 4-byte boundary.					\
    493 	 *								\
    494 	 * XXX BRAINDAMAGE ALERT!					\
    495 	 * The stupid chip uses the same size for every buffer, which	\
    496 	 * is set in the Receive Control register.  We are using the 2K	\
    497 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    498 	 * reason, we can't "scoot" packets longer than the standard	\
    499 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    500 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    501 	 * the upper layer copy the headers.				\
    502 	 */								\
    503 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    504 									\
    505 	wm_set_dma_addr(&__rxd->wrx_addr,				\
    506 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
    507 	__rxd->wrx_len = 0;						\
    508 	__rxd->wrx_cksum = 0;						\
    509 	__rxd->wrx_status = 0;						\
    510 	__rxd->wrx_errors = 0;						\
    511 	__rxd->wrx_special = 0;						\
    512 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    513 									\
    514 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    515 } while (/*CONSTCOND*/0)
    516 
    517 /*
    518  * Register read/write functions.
    519  * Other than CSR_{READ|WRITE}().
    520  */
    521 #if 0
    522 static inline uint32_t wm_io_read(struct wm_softc *, int);
    523 #endif
    524 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    525 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    526 	uint32_t, uint32_t);
    527 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    528 
    529 /*
    530  * Device driver interface functions and commonly used functions.
    531  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    532  */
    533 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    534 static int	wm_match(device_t, cfdata_t, void *);
    535 static void	wm_attach(device_t, device_t, void *);
    536 static int	wm_detach(device_t, int);
    537 static bool	wm_suspend(device_t, const pmf_qual_t *);
    538 static bool	wm_resume(device_t, const pmf_qual_t *);
    539 static void	wm_watchdog(struct ifnet *);
    540 static void	wm_tick(void *);
    541 static int	wm_ifflags_cb(struct ethercom *);
    542 static int	wm_ioctl(struct ifnet *, u_long, void *);
    543 /* MAC address related */
    544 static int	wm_check_alt_mac_addr(struct wm_softc *);
    545 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    546 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    547 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    548 static void	wm_set_filter(struct wm_softc *);
    549 /* Reset and init related */
    550 static void	wm_set_vlan(struct wm_softc *);
    551 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    552 static void	wm_get_auto_rd_done(struct wm_softc *);
    553 static void	wm_lan_init_done(struct wm_softc *);
    554 static void	wm_get_cfg_done(struct wm_softc *);
    555 static void	wm_reset(struct wm_softc *);
    556 static int	wm_add_rxbuf(struct wm_softc *, int);
    557 static void	wm_rxdrain(struct wm_softc *);
    558 static int	wm_init(struct ifnet *);
    559 static int	wm_init_locked(struct ifnet *);
    560 static void	wm_stop(struct ifnet *, int);
    561 static void	wm_stop_locked(struct ifnet *, int);
    562 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    563     uint32_t *, uint8_t *);
    564 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    565 static void	wm_82547_txfifo_stall(void *);
    566 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    567 /* Start */
    568 static void	wm_start(struct ifnet *);
    569 static void	wm_start_locked(struct ifnet *);
    570 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    571     uint32_t *, uint32_t *, bool *);
    572 static void	wm_nq_start(struct ifnet *);
    573 static void	wm_nq_start_locked(struct ifnet *);
    574 /* Interrupt */
    575 static void	wm_txintr(struct wm_softc *);
    576 static void	wm_rxintr(struct wm_softc *);
    577 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    578 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    579 static void	wm_linkintr(struct wm_softc *, uint32_t);
    580 static int	wm_intr(void *);
    581 
    582 /*
    583  * Media related.
    584  * GMII, SGMII, TBI (and SERDES)
    585  */
    586 /* GMII related */
    587 static void	wm_gmii_reset(struct wm_softc *);
    588 static int	wm_get_phy_id_82575(struct wm_softc *);
    589 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    590 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    591 static int	wm_gmii_mediachange(struct ifnet *);
    592 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    593 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    594 static int	wm_gmii_i82543_readreg(device_t, int, int);
    595 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    596 static int	wm_gmii_i82544_readreg(device_t, int, int);
    597 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    598 static int	wm_gmii_i80003_readreg(device_t, int, int);
    599 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    600 static int	wm_gmii_bm_readreg(device_t, int, int);
    601 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    602 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    603 static int	wm_gmii_hv_readreg(device_t, int, int);
    604 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    605 static int	wm_gmii_82580_readreg(device_t, int, int);
    606 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    607 static void	wm_gmii_statchg(struct ifnet *);
    608 static int	wm_kmrn_readreg(struct wm_softc *, int);
    609 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    610 /* SGMII */
    611 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    612 static int	wm_sgmii_readreg(device_t, int, int);
    613 static void	wm_sgmii_writereg(device_t, int, int, int);
    614 /* TBI related */
    615 static int	wm_check_for_link(struct wm_softc *);
    616 static void	wm_tbi_mediainit(struct wm_softc *);
    617 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    618 static int	wm_tbi_mediachange(struct ifnet *);
    619 static void	wm_tbi_set_linkled(struct wm_softc *);
    620 static void	wm_tbi_check_link(struct wm_softc *);
    621 
    622 /*
    623  * NVM related.
    624  * Microwire, SPI (w/wo EERD) and Flash.
    625  */
    626 /* Both spi and uwire */
    627 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    628 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    629 /* Microwire */
    630 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    631 /* SPI */
    632 static void	wm_set_spiaddrbits(struct wm_softc *);
    633 static int	wm_nvm_ready_spi(struct wm_softc *);
    634 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    635 /* Using with EERD */
    636 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    637 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    638 /* Flash */
    639 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    640     unsigned int *);
    641 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    642 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    643 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    644 	uint16_t *);
    645 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    646 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    647 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    648 /* Lock, detecting NVM type, validate checksum and read */
    649 static int	wm_nvm_acquire(struct wm_softc *);
    650 static void	wm_nvm_release(struct wm_softc *);
    651 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    652 static int	wm_nvm_validate_checksum(struct wm_softc *);
    653 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    654 
    655 /*
    656  * Hardware semaphores.
    657  * Very complexed...
    658  */
    659 static int	wm_get_swsm_semaphore(struct wm_softc *);
    660 static void	wm_put_swsm_semaphore(struct wm_softc *);
    661 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    662 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    663 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    664 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    665 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    666 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    667 
    668 /*
    669  * Management mode and power management related subroutines.
    670  * BMC, AMT, suspend/resume and EEE.
    671  */
    672 static int	wm_check_mng_mode(struct wm_softc *);
    673 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    674 static int	wm_check_mng_mode_82574(struct wm_softc *);
    675 static int	wm_check_mng_mode_generic(struct wm_softc *);
    676 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    677 static int	wm_check_reset_block(struct wm_softc *);
    678 static void	wm_get_hw_control(struct wm_softc *);
    679 static void	wm_release_hw_control(struct wm_softc *);
    680 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    681 static void	wm_smbustopci(struct wm_softc *);
    682 static void	wm_init_manageability(struct wm_softc *);
    683 static void	wm_release_manageability(struct wm_softc *);
    684 static void	wm_get_wakeup(struct wm_softc *);
    685 #ifdef WM_WOL
    686 static void	wm_enable_phy_wakeup(struct wm_softc *);
    687 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    688 static void	wm_enable_wakeup(struct wm_softc *);
    689 #endif
    690 /* EEE */
    691 static void	wm_set_eee_i350(struct wm_softc *);
    692 
    693 /*
    694  * Workarounds (mainly PHY related).
    695  * Basically, PHY's workarounds are in the PHY drivers.
    696  */
    697 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    698 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    699 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    700 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    701 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    702 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    703 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    704 static void	wm_reset_init_script_82575(struct wm_softc *);
    705 
    706 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    707     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    708 
    709 /*
    710  * Devices supported by this driver.
    711  */
    712 static const struct wm_product {
    713 	pci_vendor_id_t		wmp_vendor;
    714 	pci_product_id_t	wmp_product;
    715 	const char		*wmp_name;
    716 	wm_chip_type		wmp_type;
    717 	int			wmp_flags;
    718 #define	WMP_F_1000X		0x01
    719 #define	WMP_F_1000T		0x02
    720 #define	WMP_F_SERDES		0x04
    721 } wm_products[] = {
    722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    723 	  "Intel i82542 1000BASE-X Ethernet",
    724 	  WM_T_82542_2_1,	WMP_F_1000X },
    725 
    726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    727 	  "Intel i82543GC 1000BASE-X Ethernet",
    728 	  WM_T_82543,		WMP_F_1000X },
    729 
    730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    731 	  "Intel i82543GC 1000BASE-T Ethernet",
    732 	  WM_T_82543,		WMP_F_1000T },
    733 
    734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    735 	  "Intel i82544EI 1000BASE-T Ethernet",
    736 	  WM_T_82544,		WMP_F_1000T },
    737 
    738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    739 	  "Intel i82544EI 1000BASE-X Ethernet",
    740 	  WM_T_82544,		WMP_F_1000X },
    741 
    742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    743 	  "Intel i82544GC 1000BASE-T Ethernet",
    744 	  WM_T_82544,		WMP_F_1000T },
    745 
    746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    747 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    748 	  WM_T_82544,		WMP_F_1000T },
    749 
    750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    751 	  "Intel i82540EM 1000BASE-T Ethernet",
    752 	  WM_T_82540,		WMP_F_1000T },
    753 
    754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    755 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    756 	  WM_T_82540,		WMP_F_1000T },
    757 
    758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    759 	  "Intel i82540EP 1000BASE-T Ethernet",
    760 	  WM_T_82540,		WMP_F_1000T },
    761 
    762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    763 	  "Intel i82540EP 1000BASE-T Ethernet",
    764 	  WM_T_82540,		WMP_F_1000T },
    765 
    766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    767 	  "Intel i82540EP 1000BASE-T Ethernet",
    768 	  WM_T_82540,		WMP_F_1000T },
    769 
    770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    771 	  "Intel i82545EM 1000BASE-T Ethernet",
    772 	  WM_T_82545,		WMP_F_1000T },
    773 
    774 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    775 	  "Intel i82545GM 1000BASE-T Ethernet",
    776 	  WM_T_82545_3,		WMP_F_1000T },
    777 
    778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    779 	  "Intel i82545GM 1000BASE-X Ethernet",
    780 	  WM_T_82545_3,		WMP_F_1000X },
    781 
    782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    783 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    784 	  WM_T_82545_3,		WMP_F_SERDES },
    785 
    786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    787 	  "Intel i82546EB 1000BASE-T Ethernet",
    788 	  WM_T_82546,		WMP_F_1000T },
    789 
    790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    791 	  "Intel i82546EB 1000BASE-T Ethernet",
    792 	  WM_T_82546,		WMP_F_1000T },
    793 
    794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    795 	  "Intel i82545EM 1000BASE-X Ethernet",
    796 	  WM_T_82545,		WMP_F_1000X },
    797 
    798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    799 	  "Intel i82546EB 1000BASE-X Ethernet",
    800 	  WM_T_82546,		WMP_F_1000X },
    801 
    802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    803 	  "Intel i82546GB 1000BASE-T Ethernet",
    804 	  WM_T_82546_3,		WMP_F_1000T },
    805 
    806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    807 	  "Intel i82546GB 1000BASE-X Ethernet",
    808 	  WM_T_82546_3,		WMP_F_1000X },
    809 
    810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    811 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    812 	  WM_T_82546_3,		WMP_F_SERDES },
    813 
    814 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    815 	  "i82546GB quad-port Gigabit Ethernet",
    816 	  WM_T_82546_3,		WMP_F_1000T },
    817 
    818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    819 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    820 	  WM_T_82546_3,		WMP_F_1000T },
    821 
    822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    823 	  "Intel PRO/1000MT (82546GB)",
    824 	  WM_T_82546_3,		WMP_F_1000T },
    825 
    826 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    827 	  "Intel i82541EI 1000BASE-T Ethernet",
    828 	  WM_T_82541,		WMP_F_1000T },
    829 
    830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    831 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    832 	  WM_T_82541,		WMP_F_1000T },
    833 
    834 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    835 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    836 	  WM_T_82541,		WMP_F_1000T },
    837 
    838 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    839 	  "Intel i82541ER 1000BASE-T Ethernet",
    840 	  WM_T_82541_2,		WMP_F_1000T },
    841 
    842 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    843 	  "Intel i82541GI 1000BASE-T Ethernet",
    844 	  WM_T_82541_2,		WMP_F_1000T },
    845 
    846 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    847 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    848 	  WM_T_82541_2,		WMP_F_1000T },
    849 
    850 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    851 	  "Intel i82541PI 1000BASE-T Ethernet",
    852 	  WM_T_82541_2,		WMP_F_1000T },
    853 
    854 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    855 	  "Intel i82547EI 1000BASE-T Ethernet",
    856 	  WM_T_82547,		WMP_F_1000T },
    857 
    858 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    859 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    860 	  WM_T_82547,		WMP_F_1000T },
    861 
    862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    863 	  "Intel i82547GI 1000BASE-T Ethernet",
    864 	  WM_T_82547_2,		WMP_F_1000T },
    865 
    866 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    867 	  "Intel PRO/1000 PT (82571EB)",
    868 	  WM_T_82571,		WMP_F_1000T },
    869 
    870 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    871 	  "Intel PRO/1000 PF (82571EB)",
    872 	  WM_T_82571,		WMP_F_1000X },
    873 
    874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    875 	  "Intel PRO/1000 PB (82571EB)",
    876 	  WM_T_82571,		WMP_F_SERDES },
    877 
    878 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    879 	  "Intel PRO/1000 QT (82571EB)",
    880 	  WM_T_82571,		WMP_F_1000T },
    881 
    882 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    883 	  "Intel i82572EI 1000baseT Ethernet",
    884 	  WM_T_82572,		WMP_F_1000T },
    885 
    886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    887 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    888 	  WM_T_82571,		WMP_F_1000T, },
    889 
    890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    891 	  "Intel i82572EI 1000baseX Ethernet",
    892 	  WM_T_82572,		WMP_F_1000X },
    893 
    894 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    895 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    896 	  WM_T_82572,		WMP_F_SERDES },
    897 
    898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    899 	  "Intel i82572EI 1000baseT Ethernet",
    900 	  WM_T_82572,		WMP_F_1000T },
    901 
    902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    903 	  "Intel i82573E",
    904 	  WM_T_82573,		WMP_F_1000T },
    905 
    906 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    907 	  "Intel i82573E IAMT",
    908 	  WM_T_82573,		WMP_F_1000T },
    909 
    910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    911 	  "Intel i82573L Gigabit Ethernet",
    912 	  WM_T_82573,		WMP_F_1000T },
    913 
    914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    915 	  "Intel i82574L",
    916 	  WM_T_82574,		WMP_F_1000T },
    917 
    918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
    919 	  "Intel i82583V",
    920 	  WM_T_82583,		WMP_F_1000T },
    921 
    922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
    923 	  "i80003 dual 1000baseT Ethernet",
    924 	  WM_T_80003,		WMP_F_1000T },
    925 
    926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
    927 	  "i80003 dual 1000baseX Ethernet",
    928 	  WM_T_80003,		WMP_F_1000T },
    929 
    930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
    931 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
    932 	  WM_T_80003,		WMP_F_SERDES },
    933 
    934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
    935 	  "Intel i80003 1000baseT Ethernet",
    936 	  WM_T_80003,		WMP_F_1000T },
    937 
    938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
    939 	  "Intel i80003 Gigabit Ethernet (SERDES)",
    940 	  WM_T_80003,		WMP_F_SERDES },
    941 
    942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
    943 	  "Intel i82801H (M_AMT) LAN Controller",
    944 	  WM_T_ICH8,		WMP_F_1000T },
    945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
    946 	  "Intel i82801H (AMT) LAN Controller",
    947 	  WM_T_ICH8,		WMP_F_1000T },
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
    949 	  "Intel i82801H LAN Controller",
    950 	  WM_T_ICH8,		WMP_F_1000T },
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
    952 	  "Intel i82801H (IFE) LAN Controller",
    953 	  WM_T_ICH8,		WMP_F_1000T },
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
    955 	  "Intel i82801H (M) LAN Controller",
    956 	  WM_T_ICH8,		WMP_F_1000T },
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
    958 	  "Intel i82801H IFE (GT) LAN Controller",
    959 	  WM_T_ICH8,		WMP_F_1000T },
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
    961 	  "Intel i82801H IFE (G) LAN Controller",
    962 	  WM_T_ICH8,		WMP_F_1000T },
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
    964 	  "82801I (AMT) LAN Controller",
    965 	  WM_T_ICH9,		WMP_F_1000T },
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
    967 	  "82801I LAN Controller",
    968 	  WM_T_ICH9,		WMP_F_1000T },
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
    970 	  "82801I (G) LAN Controller",
    971 	  WM_T_ICH9,		WMP_F_1000T },
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
    973 	  "82801I (GT) LAN Controller",
    974 	  WM_T_ICH9,		WMP_F_1000T },
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
    976 	  "82801I (C) LAN Controller",
    977 	  WM_T_ICH9,		WMP_F_1000T },
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
    979 	  "82801I mobile LAN Controller",
    980 	  WM_T_ICH9,		WMP_F_1000T },
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
    982 	  "82801I mobile (V) LAN Controller",
    983 	  WM_T_ICH9,		WMP_F_1000T },
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
    985 	  "82801I mobile (AMT) LAN Controller",
    986 	  WM_T_ICH9,		WMP_F_1000T },
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
    988 	  "82567LM-4 LAN Controller",
    989 	  WM_T_ICH9,		WMP_F_1000T },
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
    991 	  "82567V-3 LAN Controller",
    992 	  WM_T_ICH9,		WMP_F_1000T },
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
    994 	  "82567LM-2 LAN Controller",
    995 	  WM_T_ICH10,		WMP_F_1000T },
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
    997 	  "82567LF-2 LAN Controller",
    998 	  WM_T_ICH10,		WMP_F_1000T },
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1000 	  "82567LM-3 LAN Controller",
   1001 	  WM_T_ICH10,		WMP_F_1000T },
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1003 	  "82567LF-3 LAN Controller",
   1004 	  WM_T_ICH10,		WMP_F_1000T },
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1006 	  "82567V-2 LAN Controller",
   1007 	  WM_T_ICH10,		WMP_F_1000T },
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1009 	  "82567V-3? LAN Controller",
   1010 	  WM_T_ICH10,		WMP_F_1000T },
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1012 	  "HANKSVILLE LAN Controller",
   1013 	  WM_T_ICH10,		WMP_F_1000T },
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1015 	  "PCH LAN (82577LM) Controller",
   1016 	  WM_T_PCH,		WMP_F_1000T },
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1018 	  "PCH LAN (82577LC) Controller",
   1019 	  WM_T_PCH,		WMP_F_1000T },
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1021 	  "PCH LAN (82578DM) Controller",
   1022 	  WM_T_PCH,		WMP_F_1000T },
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1024 	  "PCH LAN (82578DC) Controller",
   1025 	  WM_T_PCH,		WMP_F_1000T },
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1027 	  "PCH2 LAN (82579LM) Controller",
   1028 	  WM_T_PCH2,		WMP_F_1000T },
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1030 	  "PCH2 LAN (82579V) Controller",
   1031 	  WM_T_PCH2,		WMP_F_1000T },
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1033 	  "82575EB dual-1000baseT Ethernet",
   1034 	  WM_T_82575,		WMP_F_1000T },
   1035 #if 0
   1036 	/*
   1037 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
   1038 	 * disabled for now ...
   1039 	 */
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1041 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1042 	  WM_T_82575,		WMP_F_SERDES },
   1043 #endif
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1045 	  "82575GB quad-1000baseT Ethernet",
   1046 	  WM_T_82575,		WMP_F_1000T },
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1048 	  "82575GB quad-1000baseT Ethernet (PM)",
   1049 	  WM_T_82575,		WMP_F_1000T },
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1051 	  "82576 1000BaseT Ethernet",
   1052 	  WM_T_82576,		WMP_F_1000T },
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1054 	  "82576 1000BaseX Ethernet",
   1055 	  WM_T_82576,		WMP_F_1000X },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1058 	  "82576 gigabit Ethernet (SERDES)",
   1059 	  WM_T_82576,		WMP_F_SERDES },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1062 	  "82576 quad-1000BaseT Ethernet",
   1063 	  WM_T_82576,		WMP_F_1000T },
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1065 	  "82576 gigabit Ethernet",
   1066 	  WM_T_82576,		WMP_F_1000T },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1069 	  "82576 gigabit Ethernet (SERDES)",
   1070 	  WM_T_82576,		WMP_F_SERDES },
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1072 	  "82576 quad-gigabit Ethernet (SERDES)",
   1073 	  WM_T_82576,		WMP_F_SERDES },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1076 	  "82580 1000BaseT Ethernet",
   1077 	  WM_T_82580,		WMP_F_1000T },
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1079 	  "82580 1000BaseX Ethernet",
   1080 	  WM_T_82580,		WMP_F_1000X },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1083 	  "82580 1000BaseT Ethernet (SERDES)",
   1084 	  WM_T_82580,		WMP_F_SERDES },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1087 	  "82580 gigabit Ethernet (SGMII)",
   1088 	  WM_T_82580,		WMP_F_1000T },
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1090 	  "82580 dual-1000BaseT Ethernet",
   1091 	  WM_T_82580,		WMP_F_1000T },
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
   1093 	  "82580 1000BaseT Ethernet",
   1094 	  WM_T_82580ER,		WMP_F_1000T },
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
   1096 	  "82580 dual-1000BaseT Ethernet",
   1097 	  WM_T_82580ER,		WMP_F_1000T },
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1099 	  "82580 quad-1000BaseX Ethernet",
   1100 	  WM_T_82580,		WMP_F_1000X },
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1102 	  "I350 Gigabit Network Connection",
   1103 	  WM_T_I350,		WMP_F_1000T },
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1105 	  "I350 Gigabit Fiber Network Connection",
   1106 	  WM_T_I350,		WMP_F_1000X },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1109 	  "I350 Gigabit Backplane Connection",
   1110 	  WM_T_I350,		WMP_F_SERDES },
   1111 #if 0
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1113 	  "I350 Gigabit Connection",
   1114 	  WM_T_I350,		WMP_F_1000T },
   1115 #endif
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1117 	  "I354 Gigabit Connection",
   1118 	  WM_T_I354,		WMP_F_1000T },
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1120 	  "I210-T1 Ethernet Server Adapter",
   1121 	  WM_T_I210,		WMP_F_1000T },
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1123 	  "I210 Ethernet (Copper OEM)",
   1124 	  WM_T_I210,		WMP_F_1000T },
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1126 	  "I210 Ethernet (Copper IT)",
   1127 	  WM_T_I210,		WMP_F_1000T },
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1129 	  "I210 Gigabit Ethernet (Fiber)",
   1130 	  WM_T_I210,		WMP_F_1000X },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1133 	  "I210 Gigabit Ethernet (SERDES)",
   1134 	  WM_T_I210,		WMP_F_SERDES },
   1135 #if 0
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1137 	  "I210 Gigabit Ethernet (SGMII)",
   1138 	  WM_T_I210,		WMP_F_SERDES },
   1139 #endif
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1141 	  "I211 Ethernet (COPPER)",
   1142 	  WM_T_I211,		WMP_F_1000T },
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1144 	  "I217 V Ethernet Connection",
   1145 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1147 	  "I217 LM Ethernet Connection",
   1148 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1150 	  "I218 V Ethernet Connection",
   1151 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1153 	  "I218 LM Ethernet Connection",
   1154 	  WM_T_PCH_LPT,		WMP_F_1000T },
   1155 	{ 0,			0,
   1156 	  NULL,
   1157 	  0,			0 },
   1158 };
   1159 
   1160 #ifdef WM_EVENT_COUNTERS
   1161 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1162 #endif /* WM_EVENT_COUNTERS */
   1163 
   1164 
   1165 /*
   1166  * Register read/write functions.
   1167  * Other than CSR_{READ|WRITE}().
   1168  */
   1169 
   1170 #if 0 /* Not currently used */
   1171 static inline uint32_t
   1172 wm_io_read(struct wm_softc *sc, int reg)
   1173 {
   1174 
   1175 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1176 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1177 }
   1178 #endif
   1179 
   1180 static inline void
   1181 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1182 {
   1183 
   1184 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1185 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1186 }
   1187 
   1188 static inline void
   1189 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1190     uint32_t data)
   1191 {
   1192 	uint32_t regval;
   1193 	int i;
   1194 
   1195 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1196 
   1197 	CSR_WRITE(sc, reg, regval);
   1198 
   1199 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1200 		delay(5);
   1201 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1202 			break;
   1203 	}
   1204 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1205 		aprint_error("%s: WARNING:"
   1206 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1207 		    device_xname(sc->sc_dev), reg);
   1208 	}
   1209 }
   1210 
   1211 static inline void
   1212 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1213 {
   1214 	wa->wa_low = htole32(v & 0xffffffffU);
   1215 	if (sizeof(bus_addr_t) == 8)
   1216 		wa->wa_high = htole32((uint64_t) v >> 32);
   1217 	else
   1218 		wa->wa_high = 0;
   1219 }
   1220 
   1221 /*
   1222  * Device driver interface functions and commonly used functions.
   1223  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1224  */
   1225 
   1226 /* Lookup supported device table */
   1227 static const struct wm_product *
   1228 wm_lookup(const struct pci_attach_args *pa)
   1229 {
   1230 	const struct wm_product *wmp;
   1231 
   1232 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1233 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1234 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1235 			return wmp;
   1236 	}
   1237 	return NULL;
   1238 }
   1239 
   1240 /* The match function (ca_match) */
   1241 static int
   1242 wm_match(device_t parent, cfdata_t cf, void *aux)
   1243 {
   1244 	struct pci_attach_args *pa = aux;
   1245 
   1246 	if (wm_lookup(pa) != NULL)
   1247 		return 1;
   1248 
   1249 	return 0;
   1250 }
   1251 
   1252 /* The attach function (ca_attach) */
   1253 static void
   1254 wm_attach(device_t parent, device_t self, void *aux)
   1255 {
   1256 	struct wm_softc *sc = device_private(self);
   1257 	struct pci_attach_args *pa = aux;
   1258 	prop_dictionary_t dict;
   1259 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1260 	pci_chipset_tag_t pc = pa->pa_pc;
   1261 	pci_intr_handle_t ih;
   1262 	const char *intrstr = NULL;
   1263 	const char *eetype, *xname;
   1264 	bus_space_tag_t memt;
   1265 	bus_space_handle_t memh;
   1266 	bus_size_t memsize;
   1267 	int memh_valid;
   1268 	int i, error;
   1269 	const struct wm_product *wmp;
   1270 	prop_data_t ea;
   1271 	prop_number_t pn;
   1272 	uint8_t enaddr[ETHER_ADDR_LEN];
   1273 	uint16_t cfg1, cfg2, swdpin, io3;
   1274 	pcireg_t preg, memtype;
   1275 	uint16_t eeprom_data, apme_mask;
   1276 	bool force_clear_smbi;
   1277 	uint32_t reg;
   1278 	char intrbuf[PCI_INTRSTR_LEN];
   1279 
   1280 	sc->sc_dev = self;
   1281 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1282 	sc->sc_stopping = false;
   1283 
   1284 	sc->sc_wmp = wmp = wm_lookup(pa);
   1285 	if (wmp == NULL) {
   1286 		printf("\n");
   1287 		panic("wm_attach: impossible");
   1288 	}
   1289 
   1290 	sc->sc_pc = pa->pa_pc;
   1291 	sc->sc_pcitag = pa->pa_tag;
   1292 
   1293 	if (pci_dma64_available(pa))
   1294 		sc->sc_dmat = pa->pa_dmat64;
   1295 	else
   1296 		sc->sc_dmat = pa->pa_dmat;
   1297 
   1298 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1299 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1300 
   1301 	sc->sc_type = wmp->wmp_type;
   1302 	if (sc->sc_type < WM_T_82543) {
   1303 		if (sc->sc_rev < 2) {
   1304 			aprint_error_dev(sc->sc_dev,
   1305 			    "i82542 must be at least rev. 2\n");
   1306 			return;
   1307 		}
   1308 		if (sc->sc_rev < 3)
   1309 			sc->sc_type = WM_T_82542_2_0;
   1310 	}
   1311 
   1312 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1313 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   1314 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1315 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1316 		sc->sc_flags |= WM_F_NEWQUEUE;
   1317 
   1318 	/* Set device properties (mactype) */
   1319 	dict = device_properties(sc->sc_dev);
   1320 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1321 
   1322 	/*
   1323 	 * Map the device.  All devices support memory-mapped acccess,
   1324 	 * and it is really required for normal operation.
   1325 	 */
   1326 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1327 	switch (memtype) {
   1328 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1329 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1330 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1331 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1332 		break;
   1333 	default:
   1334 		memh_valid = 0;
   1335 		break;
   1336 	}
   1337 
   1338 	if (memh_valid) {
   1339 		sc->sc_st = memt;
   1340 		sc->sc_sh = memh;
   1341 		sc->sc_ss = memsize;
   1342 	} else {
   1343 		aprint_error_dev(sc->sc_dev,
   1344 		    "unable to map device registers\n");
   1345 		return;
   1346 	}
   1347 
   1348 	/*
   1349 	 * In addition, i82544 and later support I/O mapped indirect
   1350 	 * register access.  It is not desirable (nor supported in
   1351 	 * this driver) to use it for normal operation, though it is
   1352 	 * required to work around bugs in some chip versions.
   1353 	 */
   1354 	if (sc->sc_type >= WM_T_82544) {
   1355 		/* First we have to find the I/O BAR. */
   1356 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1357 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1358 			if (memtype == PCI_MAPREG_TYPE_IO)
   1359 				break;
   1360 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1361 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1362 				i += 4;	/* skip high bits, too */
   1363 		}
   1364 		if (i < PCI_MAPREG_END) {
   1365 			/*
   1366 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1367 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1368 			 * It's no problem because newer chips has no this
   1369 			 * bug.
   1370 			 *
   1371 			 * The i8254x doesn't apparently respond when the
   1372 			 * I/O BAR is 0, which looks somewhat like it's not
   1373 			 * been configured.
   1374 			 */
   1375 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1376 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1377 				aprint_error_dev(sc->sc_dev,
   1378 				    "WARNING: I/O BAR at zero.\n");
   1379 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1380 					0, &sc->sc_iot, &sc->sc_ioh,
   1381 					NULL, &sc->sc_ios) == 0) {
   1382 				sc->sc_flags |= WM_F_IOH_VALID;
   1383 			} else {
   1384 				aprint_error_dev(sc->sc_dev,
   1385 				    "WARNING: unable to map I/O space\n");
   1386 			}
   1387 		}
   1388 
   1389 	}
   1390 
   1391 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1392 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1393 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1394 	if (sc->sc_type < WM_T_82542_2_1)
   1395 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1396 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1397 
   1398 	/* power up chip */
   1399 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1400 	    NULL)) && error != EOPNOTSUPP) {
   1401 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1402 		return;
   1403 	}
   1404 
   1405 	/*
   1406 	 * Map and establish our interrupt.
   1407 	 */
   1408 	if (pci_intr_map(pa, &ih)) {
   1409 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
   1410 		return;
   1411 	}
   1412 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
   1413 #ifdef WM_MPSAFE
   1414 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
   1415 #endif
   1416 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
   1417 	if (sc->sc_ih == NULL) {
   1418 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
   1419 		if (intrstr != NULL)
   1420 			aprint_error(" at %s", intrstr);
   1421 		aprint_error("\n");
   1422 		return;
   1423 	}
   1424 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   1425 
   1426 	/*
   1427 	 * Check the function ID (unit number of the chip).
   1428 	 */
   1429 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1430 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1431 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1432 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   1433 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1434 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1435 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1436 	else
   1437 		sc->sc_funcid = 0;
   1438 
   1439 	/*
   1440 	 * Determine a few things about the bus we're connected to.
   1441 	 */
   1442 	if (sc->sc_type < WM_T_82543) {
   1443 		/* We don't really know the bus characteristics here. */
   1444 		sc->sc_bus_speed = 33;
   1445 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1446 		/*
   1447 		 * CSA (Communication Streaming Architecture) is about as fast
   1448 		 * a 32-bit 66MHz PCI Bus.
   1449 		 */
   1450 		sc->sc_flags |= WM_F_CSA;
   1451 		sc->sc_bus_speed = 66;
   1452 		aprint_verbose_dev(sc->sc_dev,
   1453 		    "Communication Streaming Architecture\n");
   1454 		if (sc->sc_type == WM_T_82547) {
   1455 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1456 			callout_setfunc(&sc->sc_txfifo_ch,
   1457 					wm_82547_txfifo_stall, sc);
   1458 			aprint_verbose_dev(sc->sc_dev,
   1459 			    "using 82547 Tx FIFO stall work-around\n");
   1460 		}
   1461 	} else if (sc->sc_type >= WM_T_82571) {
   1462 		sc->sc_flags |= WM_F_PCIE;
   1463 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1464 		    && (sc->sc_type != WM_T_ICH10)
   1465 		    && (sc->sc_type != WM_T_PCH)
   1466 		    && (sc->sc_type != WM_T_PCH2)
   1467 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1468 			/* ICH* and PCH* have no PCIe capability registers */
   1469 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1470 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1471 				NULL) == 0)
   1472 				aprint_error_dev(sc->sc_dev,
   1473 				    "unable to find PCIe capability\n");
   1474 		}
   1475 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1476 	} else {
   1477 		reg = CSR_READ(sc, WMREG_STATUS);
   1478 		if (reg & STATUS_BUS64)
   1479 			sc->sc_flags |= WM_F_BUS64;
   1480 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1481 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1482 
   1483 			sc->sc_flags |= WM_F_PCIX;
   1484 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1485 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1486 				aprint_error_dev(sc->sc_dev,
   1487 				    "unable to find PCIX capability\n");
   1488 			else if (sc->sc_type != WM_T_82545_3 &&
   1489 				 sc->sc_type != WM_T_82546_3) {
   1490 				/*
   1491 				 * Work around a problem caused by the BIOS
   1492 				 * setting the max memory read byte count
   1493 				 * incorrectly.
   1494 				 */
   1495 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1496 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1497 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1498 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1499 
   1500 				bytecnt =
   1501 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1502 				    PCIX_CMD_BYTECNT_SHIFT;
   1503 				maxb =
   1504 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1505 				    PCIX_STATUS_MAXB_SHIFT;
   1506 				if (bytecnt > maxb) {
   1507 					aprint_verbose_dev(sc->sc_dev,
   1508 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1509 					    512 << bytecnt, 512 << maxb);
   1510 					pcix_cmd = (pcix_cmd &
   1511 					    ~PCIX_CMD_BYTECNT_MASK) |
   1512 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1513 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1514 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1515 					    pcix_cmd);
   1516 				}
   1517 			}
   1518 		}
   1519 		/*
   1520 		 * The quad port adapter is special; it has a PCIX-PCIX
   1521 		 * bridge on the board, and can run the secondary bus at
   1522 		 * a higher speed.
   1523 		 */
   1524 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1525 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1526 								      : 66;
   1527 		} else if (sc->sc_flags & WM_F_PCIX) {
   1528 			switch (reg & STATUS_PCIXSPD_MASK) {
   1529 			case STATUS_PCIXSPD_50_66:
   1530 				sc->sc_bus_speed = 66;
   1531 				break;
   1532 			case STATUS_PCIXSPD_66_100:
   1533 				sc->sc_bus_speed = 100;
   1534 				break;
   1535 			case STATUS_PCIXSPD_100_133:
   1536 				sc->sc_bus_speed = 133;
   1537 				break;
   1538 			default:
   1539 				aprint_error_dev(sc->sc_dev,
   1540 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1541 				    reg & STATUS_PCIXSPD_MASK);
   1542 				sc->sc_bus_speed = 66;
   1543 				break;
   1544 			}
   1545 		} else
   1546 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1547 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1548 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1549 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1550 	}
   1551 
   1552 	/*
   1553 	 * Allocate the control data structures, and create and load the
   1554 	 * DMA map for it.
   1555 	 *
   1556 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   1557 	 * memory.  So must Rx descriptors.  We simplify by allocating
   1558 	 * both sets within the same 4G segment.
   1559 	 */
   1560 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
   1561 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
   1562 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
   1563 	    sizeof(struct wm_control_data_82542) :
   1564 	    sizeof(struct wm_control_data_82544);
   1565 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
   1566 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
   1567 		    &sc->sc_cd_rseg, 0)) != 0) {
   1568 		aprint_error_dev(sc->sc_dev,
   1569 		    "unable to allocate control data, error = %d\n",
   1570 		    error);
   1571 		goto fail_0;
   1572 	}
   1573 
   1574 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
   1575 		    sc->sc_cd_rseg, sc->sc_cd_size,
   1576 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
   1577 		aprint_error_dev(sc->sc_dev,
   1578 		    "unable to map control data, error = %d\n", error);
   1579 		goto fail_1;
   1580 	}
   1581 
   1582 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
   1583 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
   1584 		aprint_error_dev(sc->sc_dev,
   1585 		    "unable to create control data DMA map, error = %d\n",
   1586 		    error);
   1587 		goto fail_2;
   1588 	}
   1589 
   1590 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
   1591 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
   1592 		aprint_error_dev(sc->sc_dev,
   1593 		    "unable to load control data DMA map, error = %d\n",
   1594 		    error);
   1595 		goto fail_3;
   1596 	}
   1597 
   1598 	/* Create the transmit buffer DMA maps. */
   1599 	WM_TXQUEUELEN(sc) =
   1600 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   1601 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   1602 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   1603 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   1604 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   1605 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
   1606 			aprint_error_dev(sc->sc_dev,
   1607 			    "unable to create Tx DMA map %d, error = %d\n",
   1608 			    i, error);
   1609 			goto fail_4;
   1610 		}
   1611 	}
   1612 
   1613 	/* Create the receive buffer DMA maps. */
   1614 	for (i = 0; i < WM_NRXDESC; i++) {
   1615 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   1616 			    MCLBYTES, 0, 0,
   1617 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
   1618 			aprint_error_dev(sc->sc_dev,
   1619 			    "unable to create Rx DMA map %d error = %d\n",
   1620 			    i, error);
   1621 			goto fail_5;
   1622 		}
   1623 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
   1624 	}
   1625 
   1626 	/* clear interesting stat counters */
   1627 	CSR_READ(sc, WMREG_COLC);
   1628 	CSR_READ(sc, WMREG_RXERRC);
   1629 
   1630 	/* get PHY control from SMBus to PCIe */
   1631 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1632 	    || (sc->sc_type == WM_T_PCH_LPT))
   1633 		wm_smbustopci(sc);
   1634 
   1635 	/* Reset the chip to a known state. */
   1636 	wm_reset(sc);
   1637 
   1638 	/* Get some information about the EEPROM. */
   1639 	switch (sc->sc_type) {
   1640 	case WM_T_82542_2_0:
   1641 	case WM_T_82542_2_1:
   1642 	case WM_T_82543:
   1643 	case WM_T_82544:
   1644 		/* Microwire */
   1645 		sc->sc_ee_addrbits = 6;
   1646 		break;
   1647 	case WM_T_82540:
   1648 	case WM_T_82545:
   1649 	case WM_T_82545_3:
   1650 	case WM_T_82546:
   1651 	case WM_T_82546_3:
   1652 		/* Microwire */
   1653 		reg = CSR_READ(sc, WMREG_EECD);
   1654 		if (reg & EECD_EE_SIZE)
   1655 			sc->sc_ee_addrbits = 8;
   1656 		else
   1657 			sc->sc_ee_addrbits = 6;
   1658 		sc->sc_flags |= WM_F_LOCK_EECD;
   1659 		break;
   1660 	case WM_T_82541:
   1661 	case WM_T_82541_2:
   1662 	case WM_T_82547:
   1663 	case WM_T_82547_2:
   1664 		reg = CSR_READ(sc, WMREG_EECD);
   1665 		if (reg & EECD_EE_TYPE) {
   1666 			/* SPI */
   1667 			wm_set_spiaddrbits(sc);
   1668 		} else
   1669 			/* Microwire */
   1670 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
   1671 		sc->sc_flags |= WM_F_LOCK_EECD;
   1672 		break;
   1673 	case WM_T_82571:
   1674 	case WM_T_82572:
   1675 		/* SPI */
   1676 		wm_set_spiaddrbits(sc);
   1677 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1678 		break;
   1679 	case WM_T_82573:
   1680 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1681 		/* FALLTHROUGH */
   1682 	case WM_T_82574:
   1683 	case WM_T_82583:
   1684 		if (wm_nvm_is_onboard_eeprom(sc) == 0)
   1685 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1686 		else {
   1687 			/* SPI */
   1688 			wm_set_spiaddrbits(sc);
   1689 		}
   1690 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1691 		break;
   1692 	case WM_T_82575:
   1693 	case WM_T_82576:
   1694 	case WM_T_82580:
   1695 	case WM_T_82580ER:
   1696 	case WM_T_I350:
   1697 	case WM_T_I354:
   1698 	case WM_T_80003:
   1699 		/* SPI */
   1700 		wm_set_spiaddrbits(sc);
   1701 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1702 		    | WM_F_LOCK_SWSM;
   1703 		break;
   1704 	case WM_T_ICH8:
   1705 	case WM_T_ICH9:
   1706 	case WM_T_ICH10:
   1707 	case WM_T_PCH:
   1708 	case WM_T_PCH2:
   1709 	case WM_T_PCH_LPT:
   1710 		/* FLASH */
   1711 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1712 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1713 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1714 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
   1715 			aprint_error_dev(sc->sc_dev,
   1716 			    "can't map FLASH registers\n");
   1717 			return;
   1718 		}
   1719 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1720 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1721 						ICH_FLASH_SECTOR_SIZE;
   1722 		sc->sc_ich8_flash_bank_size =
   1723 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1724 		sc->sc_ich8_flash_bank_size -=
   1725 		    (reg & ICH_GFPREG_BASE_MASK);
   1726 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1727 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1728 		break;
   1729 	case WM_T_I210:
   1730 	case WM_T_I211:
   1731 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1732 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1733 		break;
   1734 	default:
   1735 		break;
   1736 	}
   1737 
   1738 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1739 	switch (sc->sc_type) {
   1740 	case WM_T_82571:
   1741 	case WM_T_82572:
   1742 		reg = CSR_READ(sc, WMREG_SWSM2);
   1743 		if ((reg & SWSM2_LOCK) != 0) {
   1744 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1745 			force_clear_smbi = true;
   1746 		} else
   1747 			force_clear_smbi = false;
   1748 		break;
   1749 	case WM_T_82573:
   1750 	case WM_T_82574:
   1751 	case WM_T_82583:
   1752 		force_clear_smbi = true;
   1753 		break;
   1754 	default:
   1755 		force_clear_smbi = false;
   1756 		break;
   1757 	}
   1758 	if (force_clear_smbi) {
   1759 		reg = CSR_READ(sc, WMREG_SWSM);
   1760 		if ((reg & SWSM_SMBI) != 0)
   1761 			aprint_error_dev(sc->sc_dev,
   1762 			    "Please update the Bootagent\n");
   1763 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1764 	}
   1765 
   1766 	/*
   1767 	 * Defer printing the EEPROM type until after verifying the checksum
   1768 	 * This allows the EEPROM type to be printed correctly in the case
   1769 	 * that no EEPROM is attached.
   1770 	 */
   1771 	/*
   1772 	 * Validate the EEPROM checksum. If the checksum fails, flag
   1773 	 * this for later, so we can fail future reads from the EEPROM.
   1774 	 */
   1775 	if (wm_nvm_validate_checksum(sc)) {
   1776 		/*
   1777 		 * Read twice again because some PCI-e parts fail the
   1778 		 * first check due to the link being in sleep state.
   1779 		 */
   1780 		if (wm_nvm_validate_checksum(sc))
   1781 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   1782 	}
   1783 
   1784 	/* Set device properties (macflags) */
   1785 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   1786 
   1787 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1788 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
   1789 	else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
   1790 		aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
   1791 	} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   1792 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
   1793 	} else {
   1794 		if (sc->sc_flags & WM_F_EEPROM_SPI)
   1795 			eetype = "SPI";
   1796 		else
   1797 			eetype = "MicroWire";
   1798 		aprint_verbose_dev(sc->sc_dev,
   1799 		    "%u word (%d address bits) %s EEPROM\n",
   1800 		    1U << sc->sc_ee_addrbits,
   1801 		    sc->sc_ee_addrbits, eetype);
   1802 	}
   1803 
   1804 	switch (sc->sc_type) {
   1805 	case WM_T_82571:
   1806 	case WM_T_82572:
   1807 	case WM_T_82573:
   1808 	case WM_T_82574:
   1809 	case WM_T_82583:
   1810 	case WM_T_80003:
   1811 	case WM_T_ICH8:
   1812 	case WM_T_ICH9:
   1813 	case WM_T_ICH10:
   1814 	case WM_T_PCH:
   1815 	case WM_T_PCH2:
   1816 	case WM_T_PCH_LPT:
   1817 		if (wm_check_mng_mode(sc) != 0)
   1818 			wm_get_hw_control(sc);
   1819 		break;
   1820 	default:
   1821 		break;
   1822 	}
   1823 	wm_get_wakeup(sc);
   1824 	/*
   1825 	 * Read the Ethernet address from the EEPROM, if not first found
   1826 	 * in device properties.
   1827 	 */
   1828 	ea = prop_dictionary_get(dict, "mac-address");
   1829 	if (ea != NULL) {
   1830 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   1831 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   1832 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   1833 	} else {
   1834 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   1835 			aprint_error_dev(sc->sc_dev,
   1836 			    "unable to read Ethernet address\n");
   1837 			return;
   1838 		}
   1839 	}
   1840 
   1841 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   1842 	    ether_sprintf(enaddr));
   1843 
   1844 	/*
   1845 	 * Read the config info from the EEPROM, and set up various
   1846 	 * bits in the control registers based on their contents.
   1847 	 */
   1848 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   1849 	if (pn != NULL) {
   1850 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1851 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   1852 	} else {
   1853 		if (wm_nvm_read(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
   1854 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   1855 			return;
   1856 		}
   1857 	}
   1858 
   1859 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   1860 	if (pn != NULL) {
   1861 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1862 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   1863 	} else {
   1864 		if (wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
   1865 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   1866 			return;
   1867 		}
   1868 	}
   1869 
   1870 	/* check for WM_F_WOL */
   1871 	switch (sc->sc_type) {
   1872 	case WM_T_82542_2_0:
   1873 	case WM_T_82542_2_1:
   1874 	case WM_T_82543:
   1875 		/* dummy? */
   1876 		eeprom_data = 0;
   1877 		apme_mask = EEPROM_CFG3_APME;
   1878 		break;
   1879 	case WM_T_82544:
   1880 		apme_mask = EEPROM_CFG2_82544_APM_EN;
   1881 		eeprom_data = cfg2;
   1882 		break;
   1883 	case WM_T_82546:
   1884 	case WM_T_82546_3:
   1885 	case WM_T_82571:
   1886 	case WM_T_82572:
   1887 	case WM_T_82573:
   1888 	case WM_T_82574:
   1889 	case WM_T_82583:
   1890 	case WM_T_80003:
   1891 	default:
   1892 		apme_mask = EEPROM_CFG3_APME;
   1893 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
   1894 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
   1895 		break;
   1896 	case WM_T_82575:
   1897 	case WM_T_82576:
   1898 	case WM_T_82580:
   1899 	case WM_T_82580ER:
   1900 	case WM_T_I350:
   1901 	case WM_T_I354: /* XXX ok? */
   1902 	case WM_T_ICH8:
   1903 	case WM_T_ICH9:
   1904 	case WM_T_ICH10:
   1905 	case WM_T_PCH:
   1906 	case WM_T_PCH2:
   1907 	case WM_T_PCH_LPT:
   1908 		/* XXX The funcid should be checked on some devices */
   1909 		apme_mask = WUC_APME;
   1910 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   1911 		break;
   1912 	}
   1913 
   1914 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   1915 	if ((eeprom_data & apme_mask) != 0)
   1916 		sc->sc_flags |= WM_F_WOL;
   1917 #ifdef WM_DEBUG
   1918 	if ((sc->sc_flags & WM_F_WOL) != 0)
   1919 		printf("WOL\n");
   1920 #endif
   1921 
   1922 	/*
   1923 	 * XXX need special handling for some multiple port cards
   1924 	 * to disable a paticular port.
   1925 	 */
   1926 
   1927 	if (sc->sc_type >= WM_T_82544) {
   1928 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   1929 		if (pn != NULL) {
   1930 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1931 			swdpin = (uint16_t) prop_number_integer_value(pn);
   1932 		} else {
   1933 			if (wm_nvm_read(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
   1934 				aprint_error_dev(sc->sc_dev,
   1935 				    "unable to read SWDPIN\n");
   1936 				return;
   1937 			}
   1938 		}
   1939 	}
   1940 
   1941 	if (cfg1 & EEPROM_CFG1_ILOS)
   1942 		sc->sc_ctrl |= CTRL_ILOS;
   1943 	if (sc->sc_type >= WM_T_82544) {
   1944 		sc->sc_ctrl |=
   1945 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   1946 		    CTRL_SWDPIO_SHIFT;
   1947 		sc->sc_ctrl |=
   1948 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   1949 		    CTRL_SWDPINS_SHIFT;
   1950 	} else {
   1951 		sc->sc_ctrl |=
   1952 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   1953 		    CTRL_SWDPIO_SHIFT;
   1954 	}
   1955 
   1956 #if 0
   1957 	if (sc->sc_type >= WM_T_82544) {
   1958 		if (cfg1 & EEPROM_CFG1_IPS0)
   1959 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   1960 		if (cfg1 & EEPROM_CFG1_IPS1)
   1961 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   1962 		sc->sc_ctrl_ext |=
   1963 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   1964 		    CTRL_EXT_SWDPIO_SHIFT;
   1965 		sc->sc_ctrl_ext |=
   1966 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   1967 		    CTRL_EXT_SWDPINS_SHIFT;
   1968 	} else {
   1969 		sc->sc_ctrl_ext |=
   1970 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   1971 		    CTRL_EXT_SWDPIO_SHIFT;
   1972 	}
   1973 #endif
   1974 
   1975 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   1976 #if 0
   1977 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   1978 #endif
   1979 
   1980 	/*
   1981 	 * Set up some register offsets that are different between
   1982 	 * the i82542 and the i82543 and later chips.
   1983 	 */
   1984 	if (sc->sc_type < WM_T_82543) {
   1985 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   1986 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   1987 	} else {
   1988 		sc->sc_rdt_reg = WMREG_RDT;
   1989 		sc->sc_tdt_reg = WMREG_TDT;
   1990 	}
   1991 
   1992 	if (sc->sc_type == WM_T_PCH) {
   1993 		uint16_t val;
   1994 
   1995 		/* Save the NVM K1 bit setting */
   1996 		wm_nvm_read(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
   1997 
   1998 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
   1999 			sc->sc_nvm_k1_enabled = 1;
   2000 		else
   2001 			sc->sc_nvm_k1_enabled = 0;
   2002 	}
   2003 
   2004 	/*
   2005 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2006 	 * media structures accordingly.
   2007 	 */
   2008 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2009 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2010 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2011 	    || sc->sc_type == WM_T_82573
   2012 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2013 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2014 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2015 	} else if (sc->sc_type < WM_T_82543 ||
   2016 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2017 		if (wmp->wmp_flags & WMP_F_1000T)
   2018 			aprint_error_dev(sc->sc_dev,
   2019 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2020 		wm_tbi_mediainit(sc);
   2021 	} else {
   2022 		switch (sc->sc_type) {
   2023 		case WM_T_82575:
   2024 		case WM_T_82576:
   2025 		case WM_T_82580:
   2026 		case WM_T_82580ER:
   2027 		case WM_T_I350:
   2028 		case WM_T_I354:
   2029 		case WM_T_I210:
   2030 		case WM_T_I211:
   2031 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2032 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
   2033 			case CTRL_EXT_LINK_MODE_1000KX:
   2034 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2035 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   2036 				    reg | CTRL_EXT_I2C_ENA);
   2037 				panic("not supported yet\n");
   2038 				break;
   2039 			case CTRL_EXT_LINK_MODE_SGMII:
   2040 				if (wm_sgmii_uses_mdio(sc)) {
   2041 					aprint_verbose_dev(sc->sc_dev,
   2042 					    "SGMII(MDIO)\n");
   2043 					sc->sc_flags |= WM_F_SGMII;
   2044 					wm_gmii_mediainit(sc,
   2045 					    wmp->wmp_product);
   2046 					break;
   2047 				}
   2048 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2049 				/*FALLTHROUGH*/
   2050 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2051 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2052 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   2053 				    reg | CTRL_EXT_I2C_ENA);
   2054 				panic("not supported yet\n");
   2055 				break;
   2056 			case CTRL_EXT_LINK_MODE_GMII:
   2057 			default:
   2058 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   2059 				    reg & ~CTRL_EXT_I2C_ENA);
   2060 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2061 				break;
   2062 			}
   2063 			break;
   2064 		default:
   2065 			if (wmp->wmp_flags & WMP_F_1000X)
   2066 				aprint_error_dev(sc->sc_dev,
   2067 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2068 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2069 		}
   2070 	}
   2071 
   2072 	ifp = &sc->sc_ethercom.ec_if;
   2073 	xname = device_xname(sc->sc_dev);
   2074 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2075 	ifp->if_softc = sc;
   2076 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2077 	ifp->if_ioctl = wm_ioctl;
   2078 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2079 		ifp->if_start = wm_nq_start;
   2080 	else
   2081 		ifp->if_start = wm_start;
   2082 	ifp->if_watchdog = wm_watchdog;
   2083 	ifp->if_init = wm_init;
   2084 	ifp->if_stop = wm_stop;
   2085 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2086 	IFQ_SET_READY(&ifp->if_snd);
   2087 
   2088 	/* Check for jumbo frame */
   2089 	switch (sc->sc_type) {
   2090 	case WM_T_82573:
   2091 		/* XXX limited to 9234 if ASPM is disabled */
   2092 		wm_nvm_read(sc, EEPROM_INIT_3GIO_3, 1, &io3);
   2093 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
   2094 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2095 		break;
   2096 	case WM_T_82571:
   2097 	case WM_T_82572:
   2098 	case WM_T_82574:
   2099 	case WM_T_82575:
   2100 	case WM_T_82576:
   2101 	case WM_T_82580:
   2102 	case WM_T_82580ER:
   2103 	case WM_T_I350:
   2104 	case WM_T_I354: /* XXXX ok? */
   2105 	case WM_T_I210:
   2106 	case WM_T_I211:
   2107 	case WM_T_80003:
   2108 	case WM_T_ICH9:
   2109 	case WM_T_ICH10:
   2110 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2111 	case WM_T_PCH_LPT:
   2112 		/* XXX limited to 9234 */
   2113 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2114 		break;
   2115 	case WM_T_PCH:
   2116 		/* XXX limited to 4096 */
   2117 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2118 		break;
   2119 	case WM_T_82542_2_0:
   2120 	case WM_T_82542_2_1:
   2121 	case WM_T_82583:
   2122 	case WM_T_ICH8:
   2123 		/* No support for jumbo frame */
   2124 		break;
   2125 	default:
   2126 		/* ETHER_MAX_LEN_JUMBO */
   2127 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2128 		break;
   2129 	}
   2130 
   2131 	/* If we're a i82543 or greater, we can support VLANs. */
   2132 	if (sc->sc_type >= WM_T_82543)
   2133 		sc->sc_ethercom.ec_capabilities |=
   2134 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2135 
   2136 	/*
   2137 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2138 	 * on i82543 and later.
   2139 	 */
   2140 	if (sc->sc_type >= WM_T_82543) {
   2141 		ifp->if_capabilities |=
   2142 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2143 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2144 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2145 		    IFCAP_CSUM_TCPv6_Tx |
   2146 		    IFCAP_CSUM_UDPv6_Tx;
   2147 	}
   2148 
   2149 	/*
   2150 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2151 	 *
   2152 	 *	82541GI (8086:1076) ... no
   2153 	 *	82572EI (8086:10b9) ... yes
   2154 	 */
   2155 	if (sc->sc_type >= WM_T_82571) {
   2156 		ifp->if_capabilities |=
   2157 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2158 	}
   2159 
   2160 	/*
   2161 	 * If we're a i82544 or greater (except i82547), we can do
   2162 	 * TCP segmentation offload.
   2163 	 */
   2164 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2165 		ifp->if_capabilities |= IFCAP_TSOv4;
   2166 	}
   2167 
   2168 	if (sc->sc_type >= WM_T_82571) {
   2169 		ifp->if_capabilities |= IFCAP_TSOv6;
   2170 	}
   2171 
   2172 #ifdef WM_MPSAFE
   2173 	sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2174 	sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2175 #else
   2176 	sc->sc_tx_lock = NULL;
   2177 	sc->sc_rx_lock = NULL;
   2178 #endif
   2179 
   2180 	/* Attach the interface. */
   2181 	if_attach(ifp);
   2182 	ether_ifattach(ifp, enaddr);
   2183 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2184 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
   2185 
   2186 #ifdef WM_EVENT_COUNTERS
   2187 	/* Attach event counters. */
   2188 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2189 	    NULL, xname, "txsstall");
   2190 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2191 	    NULL, xname, "txdstall");
   2192 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2193 	    NULL, xname, "txfifo_stall");
   2194 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2195 	    NULL, xname, "txdw");
   2196 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2197 	    NULL, xname, "txqe");
   2198 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2199 	    NULL, xname, "rxintr");
   2200 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2201 	    NULL, xname, "linkintr");
   2202 
   2203 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2204 	    NULL, xname, "rxipsum");
   2205 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2206 	    NULL, xname, "rxtusum");
   2207 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2208 	    NULL, xname, "txipsum");
   2209 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2210 	    NULL, xname, "txtusum");
   2211 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2212 	    NULL, xname, "txtusum6");
   2213 
   2214 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2215 	    NULL, xname, "txtso");
   2216 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2217 	    NULL, xname, "txtso6");
   2218 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2219 	    NULL, xname, "txtsopain");
   2220 
   2221 	for (i = 0; i < WM_NTXSEGS; i++) {
   2222 		snprintf(wm_txseg_evcnt_names[i],
   2223 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2224 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2225 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2226 	}
   2227 
   2228 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2229 	    NULL, xname, "txdrop");
   2230 
   2231 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2232 	    NULL, xname, "tu");
   2233 
   2234 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2235 	    NULL, xname, "tx_xoff");
   2236 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2237 	    NULL, xname, "tx_xon");
   2238 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2239 	    NULL, xname, "rx_xoff");
   2240 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2241 	    NULL, xname, "rx_xon");
   2242 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2243 	    NULL, xname, "rx_macctl");
   2244 #endif /* WM_EVENT_COUNTERS */
   2245 
   2246 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2247 		pmf_class_network_register(self, ifp);
   2248 	else
   2249 		aprint_error_dev(self, "couldn't establish power handler\n");
   2250 
   2251 	return;
   2252 
   2253 	/*
   2254 	 * Free any resources we've allocated during the failed attach
   2255 	 * attempt.  Do this in reverse order and fall through.
   2256 	 */
   2257  fail_5:
   2258 	for (i = 0; i < WM_NRXDESC; i++) {
   2259 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2260 			bus_dmamap_destroy(sc->sc_dmat,
   2261 			    sc->sc_rxsoft[i].rxs_dmamap);
   2262 	}
   2263  fail_4:
   2264 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2265 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2266 			bus_dmamap_destroy(sc->sc_dmat,
   2267 			    sc->sc_txsoft[i].txs_dmamap);
   2268 	}
   2269 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2270  fail_3:
   2271 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2272  fail_2:
   2273 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2274 	    sc->sc_cd_size);
   2275  fail_1:
   2276 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2277  fail_0:
   2278 	return;
   2279 }
   2280 
   2281 /* The detach function (ca_detach) */
   2282 static int
   2283 wm_detach(device_t self, int flags __unused)
   2284 {
   2285 	struct wm_softc *sc = device_private(self);
   2286 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2287 	int i;
   2288 #ifndef WM_MPSAFE
   2289 	int s;
   2290 
   2291 	s = splnet();
   2292 #endif
   2293 	/* Stop the interface. Callouts are stopped in it. */
   2294 	wm_stop(ifp, 1);
   2295 
   2296 #ifndef WM_MPSAFE
   2297 	splx(s);
   2298 #endif
   2299 
   2300 	pmf_device_deregister(self);
   2301 
   2302 	/* Tell the firmware about the release */
   2303 	WM_BOTH_LOCK(sc);
   2304 	wm_release_manageability(sc);
   2305 	wm_release_hw_control(sc);
   2306 	WM_BOTH_UNLOCK(sc);
   2307 
   2308 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2309 
   2310 	/* Delete all remaining media. */
   2311 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2312 
   2313 	ether_ifdetach(ifp);
   2314 	if_detach(ifp);
   2315 
   2316 
   2317 	/* Unload RX dmamaps and free mbufs */
   2318 	WM_RX_LOCK(sc);
   2319 	wm_rxdrain(sc);
   2320 	WM_RX_UNLOCK(sc);
   2321 	/* Must unlock here */
   2322 
   2323 	/* Free dmamap. It's the same as the end of the wm_attach() function */
   2324 	for (i = 0; i < WM_NRXDESC; i++) {
   2325 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   2326 			bus_dmamap_destroy(sc->sc_dmat,
   2327 			    sc->sc_rxsoft[i].rxs_dmamap);
   2328 	}
   2329 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   2330 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   2331 			bus_dmamap_destroy(sc->sc_dmat,
   2332 			    sc->sc_txsoft[i].txs_dmamap);
   2333 	}
   2334 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   2335 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   2336 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   2337 	    sc->sc_cd_size);
   2338 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
   2339 
   2340 	/* Disestablish the interrupt handler */
   2341 	if (sc->sc_ih != NULL) {
   2342 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
   2343 		sc->sc_ih = NULL;
   2344 	}
   2345 
   2346 	/* Unmap the registers */
   2347 	if (sc->sc_ss) {
   2348 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2349 		sc->sc_ss = 0;
   2350 	}
   2351 
   2352 	if (sc->sc_ios) {
   2353 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2354 		sc->sc_ios = 0;
   2355 	}
   2356 
   2357 	if (sc->sc_tx_lock)
   2358 		mutex_obj_free(sc->sc_tx_lock);
   2359 	if (sc->sc_rx_lock)
   2360 		mutex_obj_free(sc->sc_rx_lock);
   2361 
   2362 	return 0;
   2363 }
   2364 
   2365 static bool
   2366 wm_suspend(device_t self, const pmf_qual_t *qual)
   2367 {
   2368 	struct wm_softc *sc = device_private(self);
   2369 
   2370 	wm_release_manageability(sc);
   2371 	wm_release_hw_control(sc);
   2372 #ifdef WM_WOL
   2373 	wm_enable_wakeup(sc);
   2374 #endif
   2375 
   2376 	return true;
   2377 }
   2378 
   2379 static bool
   2380 wm_resume(device_t self, const pmf_qual_t *qual)
   2381 {
   2382 	struct wm_softc *sc = device_private(self);
   2383 
   2384 	wm_init_manageability(sc);
   2385 
   2386 	return true;
   2387 }
   2388 
   2389 /*
   2390  * wm_watchdog:		[ifnet interface function]
   2391  *
   2392  *	Watchdog timer handler.
   2393  */
   2394 static void
   2395 wm_watchdog(struct ifnet *ifp)
   2396 {
   2397 	struct wm_softc *sc = ifp->if_softc;
   2398 
   2399 	/*
   2400 	 * Since we're using delayed interrupts, sweep up
   2401 	 * before we report an error.
   2402 	 */
   2403 	WM_TX_LOCK(sc);
   2404 	wm_txintr(sc);
   2405 	WM_TX_UNLOCK(sc);
   2406 
   2407 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
   2408 #ifdef WM_DEBUG
   2409 		int i, j;
   2410 		struct wm_txsoft *txs;
   2411 #endif
   2412 		log(LOG_ERR,
   2413 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2414 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
   2415 		    sc->sc_txnext);
   2416 		ifp->if_oerrors++;
   2417 #ifdef WM_DEBUG
   2418 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
   2419 		    i = WM_NEXTTXS(sc, i)) {
   2420 		    txs = &sc->sc_txsoft[i];
   2421 		    printf("txs %d tx %d -> %d\n",
   2422 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2423 		    for (j = txs->txs_firstdesc; ;
   2424 			j = WM_NEXTTX(sc, j)) {
   2425 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2426 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
   2427 			printf("\t %#08x%08x\n",
   2428 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
   2429 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
   2430 			if (j == txs->txs_lastdesc)
   2431 				break;
   2432 			}
   2433 		}
   2434 #endif
   2435 		/* Reset the interface. */
   2436 		(void) wm_init(ifp);
   2437 	}
   2438 
   2439 	/* Try to get more packets going. */
   2440 	ifp->if_start(ifp);
   2441 }
   2442 
   2443 /*
   2444  * wm_tick:
   2445  *
   2446  *	One second timer, used to check link status, sweep up
   2447  *	completed transmit jobs, etc.
   2448  */
   2449 static void
   2450 wm_tick(void *arg)
   2451 {
   2452 	struct wm_softc *sc = arg;
   2453 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2454 #ifndef WM_MPSAFE
   2455 	int s;
   2456 
   2457 	s = splnet();
   2458 #endif
   2459 
   2460 	WM_TX_LOCK(sc);
   2461 
   2462 	if (sc->sc_stopping)
   2463 		goto out;
   2464 
   2465 	if (sc->sc_type >= WM_T_82542_2_1) {
   2466 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2467 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2468 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2469 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2470 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2471 	}
   2472 
   2473 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2474 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2475 	    + CSR_READ(sc, WMREG_CRCERRS)
   2476 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2477 	    + CSR_READ(sc, WMREG_SYMERRC)
   2478 	    + CSR_READ(sc, WMREG_RXERRC)
   2479 	    + CSR_READ(sc, WMREG_SEC)
   2480 	    + CSR_READ(sc, WMREG_CEXTERR)
   2481 	    + CSR_READ(sc, WMREG_RLEC);
   2482 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2483 
   2484 	if (sc->sc_flags & WM_F_HAS_MII)
   2485 		mii_tick(&sc->sc_mii);
   2486 	else
   2487 		wm_tbi_check_link(sc);
   2488 
   2489 out:
   2490 	WM_TX_UNLOCK(sc);
   2491 #ifndef WM_MPSAFE
   2492 	splx(s);
   2493 #endif
   2494 
   2495 	if (!sc->sc_stopping)
   2496 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2497 }
   2498 
   2499 static int
   2500 wm_ifflags_cb(struct ethercom *ec)
   2501 {
   2502 	struct ifnet *ifp = &ec->ec_if;
   2503 	struct wm_softc *sc = ifp->if_softc;
   2504 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2505 	int rc = 0;
   2506 
   2507 	WM_BOTH_LOCK(sc);
   2508 
   2509 	if (change != 0)
   2510 		sc->sc_if_flags = ifp->if_flags;
   2511 
   2512 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   2513 		rc = ENETRESET;
   2514 		goto out;
   2515 	}
   2516 
   2517 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2518 		wm_set_filter(sc);
   2519 
   2520 	wm_set_vlan(sc);
   2521 
   2522 out:
   2523 	WM_BOTH_UNLOCK(sc);
   2524 
   2525 	return rc;
   2526 }
   2527 
   2528 /*
   2529  * wm_ioctl:		[ifnet interface function]
   2530  *
   2531  *	Handle control requests from the operator.
   2532  */
   2533 static int
   2534 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2535 {
   2536 	struct wm_softc *sc = ifp->if_softc;
   2537 	struct ifreq *ifr = (struct ifreq *) data;
   2538 	struct ifaddr *ifa = (struct ifaddr *)data;
   2539 	struct sockaddr_dl *sdl;
   2540 	int s, error;
   2541 
   2542 #ifndef WM_MPSAFE
   2543 	s = splnet();
   2544 #endif
   2545 	WM_BOTH_LOCK(sc);
   2546 
   2547 	switch (cmd) {
   2548 	case SIOCSIFMEDIA:
   2549 	case SIOCGIFMEDIA:
   2550 		/* Flow control requires full-duplex mode. */
   2551 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2552 		    (ifr->ifr_media & IFM_FDX) == 0)
   2553 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2554 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2555 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2556 				/* We can do both TXPAUSE and RXPAUSE. */
   2557 				ifr->ifr_media |=
   2558 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2559 			}
   2560 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2561 		}
   2562 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2563 		break;
   2564 	case SIOCINITIFADDR:
   2565 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2566 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2567 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2568 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2569 			/* unicast address is first multicast entry */
   2570 			wm_set_filter(sc);
   2571 			error = 0;
   2572 			break;
   2573 		}
   2574 		/*FALLTHROUGH*/
   2575 	default:
   2576 		WM_BOTH_UNLOCK(sc);
   2577 #ifdef WM_MPSAFE
   2578 		s = splnet();
   2579 #endif
   2580 		/* It may call wm_start, so unlock here */
   2581 		error = ether_ioctl(ifp, cmd, data);
   2582 #ifdef WM_MPSAFE
   2583 		splx(s);
   2584 #endif
   2585 		WM_BOTH_LOCK(sc);
   2586 
   2587 		if (error != ENETRESET)
   2588 			break;
   2589 
   2590 		error = 0;
   2591 
   2592 		if (cmd == SIOCSIFCAP) {
   2593 			WM_BOTH_UNLOCK(sc);
   2594 			error = (*ifp->if_init)(ifp);
   2595 			WM_BOTH_LOCK(sc);
   2596 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2597 			;
   2598 		else if (ifp->if_flags & IFF_RUNNING) {
   2599 			/*
   2600 			 * Multicast list has changed; set the hardware filter
   2601 			 * accordingly.
   2602 			 */
   2603 			wm_set_filter(sc);
   2604 		}
   2605 		break;
   2606 	}
   2607 
   2608 	WM_BOTH_UNLOCK(sc);
   2609 
   2610 	/* Try to get more packets going. */
   2611 	ifp->if_start(ifp);
   2612 
   2613 #ifndef WM_MPSAFE
   2614 	splx(s);
   2615 #endif
   2616 	return error;
   2617 }
   2618 
   2619 /* MAC address related */
   2620 
   2621 static int
   2622 wm_check_alt_mac_addr(struct wm_softc *sc)
   2623 {
   2624 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2625 	uint16_t offset = EEPROM_OFF_MACADDR;
   2626 
   2627 	/* Try to read alternative MAC address pointer */
   2628 	if (wm_nvm_read(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2629 		return -1;
   2630 
   2631 	/* Check pointer */
   2632 	if (offset == 0xffff)
   2633 		return -1;
   2634 
   2635 	/*
   2636 	 * Check whether alternative MAC address is valid or not.
   2637 	 * Some cards have non 0xffff pointer but those don't use
   2638 	 * alternative MAC address in reality.
   2639 	 *
   2640 	 * Check whether the broadcast bit is set or not.
   2641 	 */
   2642 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2643 		if (((myea[0] & 0xff) & 0x01) == 0)
   2644 			return 0; /* found! */
   2645 
   2646 	/* not found */
   2647 	return -1;
   2648 }
   2649 
   2650 static int
   2651 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2652 {
   2653 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2654 	uint16_t offset = EEPROM_OFF_MACADDR;
   2655 	int do_invert = 0;
   2656 
   2657 	switch (sc->sc_type) {
   2658 	case WM_T_82580:
   2659 	case WM_T_82580ER:
   2660 	case WM_T_I350:
   2661 	case WM_T_I354:
   2662 		switch (sc->sc_funcid) {
   2663 		case 0:
   2664 			/* default value (== EEPROM_OFF_MACADDR) */
   2665 			break;
   2666 		case 1:
   2667 			offset = EEPROM_OFF_LAN1;
   2668 			break;
   2669 		case 2:
   2670 			offset = EEPROM_OFF_LAN2;
   2671 			break;
   2672 		case 3:
   2673 			offset = EEPROM_OFF_LAN3;
   2674 			break;
   2675 		default:
   2676 			goto bad;
   2677 			/* NOTREACHED */
   2678 			break;
   2679 		}
   2680 		break;
   2681 	case WM_T_82571:
   2682 	case WM_T_82575:
   2683 	case WM_T_82576:
   2684 	case WM_T_80003:
   2685 	case WM_T_I210:
   2686 	case WM_T_I211:
   2687 		if (wm_check_alt_mac_addr(sc) != 0) {
   2688 			/* reset the offset to LAN0 */
   2689 			offset = EEPROM_OFF_MACADDR;
   2690 			if ((sc->sc_funcid & 0x01) == 1)
   2691 				do_invert = 1;
   2692 			goto do_read;
   2693 		}
   2694 		switch (sc->sc_funcid) {
   2695 		case 0:
   2696 			/*
   2697 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
   2698 			 * itself.
   2699 			 */
   2700 			break;
   2701 		case 1:
   2702 			offset += EEPROM_OFF_MACADDR_LAN1;
   2703 			break;
   2704 		case 2:
   2705 			offset += EEPROM_OFF_MACADDR_LAN2;
   2706 			break;
   2707 		case 3:
   2708 			offset += EEPROM_OFF_MACADDR_LAN3;
   2709 			break;
   2710 		default:
   2711 			goto bad;
   2712 			/* NOTREACHED */
   2713 			break;
   2714 		}
   2715 		break;
   2716 	default:
   2717 		if ((sc->sc_funcid & 0x01) == 1)
   2718 			do_invert = 1;
   2719 		break;
   2720 	}
   2721 
   2722  do_read:
   2723 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2724 		myea) != 0) {
   2725 		goto bad;
   2726 	}
   2727 
   2728 	enaddr[0] = myea[0] & 0xff;
   2729 	enaddr[1] = myea[0] >> 8;
   2730 	enaddr[2] = myea[1] & 0xff;
   2731 	enaddr[3] = myea[1] >> 8;
   2732 	enaddr[4] = myea[2] & 0xff;
   2733 	enaddr[5] = myea[2] >> 8;
   2734 
   2735 	/*
   2736 	 * Toggle the LSB of the MAC address on the second port
   2737 	 * of some dual port cards.
   2738 	 */
   2739 	if (do_invert != 0)
   2740 		enaddr[5] ^= 1;
   2741 
   2742 	return 0;
   2743 
   2744  bad:
   2745 	return -1;
   2746 }
   2747 
   2748 /*
   2749  * wm_set_ral:
   2750  *
   2751  *	Set an entery in the receive address list.
   2752  */
   2753 static void
   2754 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2755 {
   2756 	uint32_t ral_lo, ral_hi;
   2757 
   2758 	if (enaddr != NULL) {
   2759 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2760 		    (enaddr[3] << 24);
   2761 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2762 		ral_hi |= RAL_AV;
   2763 	} else {
   2764 		ral_lo = 0;
   2765 		ral_hi = 0;
   2766 	}
   2767 
   2768 	if (sc->sc_type >= WM_T_82544) {
   2769 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2770 		    ral_lo);
   2771 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2772 		    ral_hi);
   2773 	} else {
   2774 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2775 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2776 	}
   2777 }
   2778 
   2779 /*
   2780  * wm_mchash:
   2781  *
   2782  *	Compute the hash of the multicast address for the 4096-bit
   2783  *	multicast filter.
   2784  */
   2785 static uint32_t
   2786 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2787 {
   2788 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2789 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2790 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   2791 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   2792 	uint32_t hash;
   2793 
   2794 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2795 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2796 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   2797 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   2798 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   2799 		return (hash & 0x3ff);
   2800 	}
   2801 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   2802 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   2803 
   2804 	return (hash & 0xfff);
   2805 }
   2806 
   2807 /*
   2808  * wm_set_filter:
   2809  *
   2810  *	Set up the receive filter.
   2811  */
   2812 static void
   2813 wm_set_filter(struct wm_softc *sc)
   2814 {
   2815 	struct ethercom *ec = &sc->sc_ethercom;
   2816 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2817 	struct ether_multi *enm;
   2818 	struct ether_multistep step;
   2819 	bus_addr_t mta_reg;
   2820 	uint32_t hash, reg, bit;
   2821 	int i, size;
   2822 
   2823 	if (sc->sc_type >= WM_T_82544)
   2824 		mta_reg = WMREG_CORDOVA_MTA;
   2825 	else
   2826 		mta_reg = WMREG_MTA;
   2827 
   2828 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   2829 
   2830 	if (ifp->if_flags & IFF_BROADCAST)
   2831 		sc->sc_rctl |= RCTL_BAM;
   2832 	if (ifp->if_flags & IFF_PROMISC) {
   2833 		sc->sc_rctl |= RCTL_UPE;
   2834 		goto allmulti;
   2835 	}
   2836 
   2837 	/*
   2838 	 * Set the station address in the first RAL slot, and
   2839 	 * clear the remaining slots.
   2840 	 */
   2841 	if (sc->sc_type == WM_T_ICH8)
   2842 		size = WM_RAL_TABSIZE_ICH8 -1;
   2843 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   2844 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   2845 	    || (sc->sc_type == WM_T_PCH_LPT))
   2846 		size = WM_RAL_TABSIZE_ICH8;
   2847 	else if (sc->sc_type == WM_T_82575)
   2848 		size = WM_RAL_TABSIZE_82575;
   2849 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   2850 		size = WM_RAL_TABSIZE_82576;
   2851 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2852 		size = WM_RAL_TABSIZE_I350;
   2853 	else
   2854 		size = WM_RAL_TABSIZE;
   2855 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   2856 	for (i = 1; i < size; i++)
   2857 		wm_set_ral(sc, NULL, i);
   2858 
   2859 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2860 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2861 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   2862 		size = WM_ICH8_MC_TABSIZE;
   2863 	else
   2864 		size = WM_MC_TABSIZE;
   2865 	/* Clear out the multicast table. */
   2866 	for (i = 0; i < size; i++)
   2867 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   2868 
   2869 	ETHER_FIRST_MULTI(step, ec, enm);
   2870 	while (enm != NULL) {
   2871 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2872 			/*
   2873 			 * We must listen to a range of multicast addresses.
   2874 			 * For now, just accept all multicasts, rather than
   2875 			 * trying to set only those filter bits needed to match
   2876 			 * the range.  (At this time, the only use of address
   2877 			 * ranges is for IP multicast routing, for which the
   2878 			 * range is big enough to require all bits set.)
   2879 			 */
   2880 			goto allmulti;
   2881 		}
   2882 
   2883 		hash = wm_mchash(sc, enm->enm_addrlo);
   2884 
   2885 		reg = (hash >> 5);
   2886 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2887 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2888 		    || (sc->sc_type == WM_T_PCH2)
   2889 		    || (sc->sc_type == WM_T_PCH_LPT))
   2890 			reg &= 0x1f;
   2891 		else
   2892 			reg &= 0x7f;
   2893 		bit = hash & 0x1f;
   2894 
   2895 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   2896 		hash |= 1U << bit;
   2897 
   2898 		/* XXX Hardware bug?? */
   2899 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   2900 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   2901 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2902 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   2903 		} else
   2904 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2905 
   2906 		ETHER_NEXT_MULTI(step, enm);
   2907 	}
   2908 
   2909 	ifp->if_flags &= ~IFF_ALLMULTI;
   2910 	goto setit;
   2911 
   2912  allmulti:
   2913 	ifp->if_flags |= IFF_ALLMULTI;
   2914 	sc->sc_rctl |= RCTL_MPE;
   2915 
   2916  setit:
   2917 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   2918 }
   2919 
   2920 /* Reset and init related */
   2921 
   2922 static void
   2923 wm_set_vlan(struct wm_softc *sc)
   2924 {
   2925 	/* Deal with VLAN enables. */
   2926 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   2927 		sc->sc_ctrl |= CTRL_VME;
   2928 	else
   2929 		sc->sc_ctrl &= ~CTRL_VME;
   2930 
   2931 	/* Write the control registers. */
   2932 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2933 }
   2934 
   2935 static void
   2936 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   2937 {
   2938 	uint32_t gcr;
   2939 	pcireg_t ctrl2;
   2940 
   2941 	gcr = CSR_READ(sc, WMREG_GCR);
   2942 
   2943 	/* Only take action if timeout value is defaulted to 0 */
   2944 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   2945 		goto out;
   2946 
   2947 	if ((gcr & GCR_CAP_VER2) == 0) {
   2948 		gcr |= GCR_CMPL_TMOUT_10MS;
   2949 		goto out;
   2950 	}
   2951 
   2952 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   2953 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   2954 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   2955 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   2956 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   2957 
   2958 out:
   2959 	/* Disable completion timeout resend */
   2960 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   2961 
   2962 	CSR_WRITE(sc, WMREG_GCR, gcr);
   2963 }
   2964 
   2965 void
   2966 wm_get_auto_rd_done(struct wm_softc *sc)
   2967 {
   2968 	int i;
   2969 
   2970 	/* wait for eeprom to reload */
   2971 	switch (sc->sc_type) {
   2972 	case WM_T_82571:
   2973 	case WM_T_82572:
   2974 	case WM_T_82573:
   2975 	case WM_T_82574:
   2976 	case WM_T_82583:
   2977 	case WM_T_82575:
   2978 	case WM_T_82576:
   2979 	case WM_T_82580:
   2980 	case WM_T_82580ER:
   2981 	case WM_T_I350:
   2982 	case WM_T_I354:
   2983 	case WM_T_I210:
   2984 	case WM_T_I211:
   2985 	case WM_T_80003:
   2986 	case WM_T_ICH8:
   2987 	case WM_T_ICH9:
   2988 		for (i = 0; i < 10; i++) {
   2989 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   2990 				break;
   2991 			delay(1000);
   2992 		}
   2993 		if (i == 10) {
   2994 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   2995 			    "complete\n", device_xname(sc->sc_dev));
   2996 		}
   2997 		break;
   2998 	default:
   2999 		break;
   3000 	}
   3001 }
   3002 
   3003 void
   3004 wm_lan_init_done(struct wm_softc *sc)
   3005 {
   3006 	uint32_t reg = 0;
   3007 	int i;
   3008 
   3009 	/* wait for eeprom to reload */
   3010 	switch (sc->sc_type) {
   3011 	case WM_T_ICH10:
   3012 	case WM_T_PCH:
   3013 	case WM_T_PCH2:
   3014 	case WM_T_PCH_LPT:
   3015 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3016 			reg = CSR_READ(sc, WMREG_STATUS);
   3017 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3018 				break;
   3019 			delay(100);
   3020 		}
   3021 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3022 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3023 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3024 		}
   3025 		break;
   3026 	default:
   3027 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3028 		    __func__);
   3029 		break;
   3030 	}
   3031 
   3032 	reg &= ~STATUS_LAN_INIT_DONE;
   3033 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3034 }
   3035 
   3036 void
   3037 wm_get_cfg_done(struct wm_softc *sc)
   3038 {
   3039 	int mask;
   3040 	uint32_t reg;
   3041 	int i;
   3042 
   3043 	/* wait for eeprom to reload */
   3044 	switch (sc->sc_type) {
   3045 	case WM_T_82542_2_0:
   3046 	case WM_T_82542_2_1:
   3047 		/* null */
   3048 		break;
   3049 	case WM_T_82543:
   3050 	case WM_T_82544:
   3051 	case WM_T_82540:
   3052 	case WM_T_82545:
   3053 	case WM_T_82545_3:
   3054 	case WM_T_82546:
   3055 	case WM_T_82546_3:
   3056 	case WM_T_82541:
   3057 	case WM_T_82541_2:
   3058 	case WM_T_82547:
   3059 	case WM_T_82547_2:
   3060 	case WM_T_82573:
   3061 	case WM_T_82574:
   3062 	case WM_T_82583:
   3063 		/* generic */
   3064 		delay(10*1000);
   3065 		break;
   3066 	case WM_T_80003:
   3067 	case WM_T_82571:
   3068 	case WM_T_82572:
   3069 	case WM_T_82575:
   3070 	case WM_T_82576:
   3071 	case WM_T_82580:
   3072 	case WM_T_82580ER:
   3073 	case WM_T_I350:
   3074 	case WM_T_I354:
   3075 	case WM_T_I210:
   3076 	case WM_T_I211:
   3077 		if (sc->sc_type == WM_T_82571) {
   3078 			/* Only 82571 shares port 0 */
   3079 			mask = EEMNGCTL_CFGDONE_0;
   3080 		} else
   3081 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3082 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3083 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3084 				break;
   3085 			delay(1000);
   3086 		}
   3087 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3088 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3089 				device_xname(sc->sc_dev), __func__));
   3090 		}
   3091 		break;
   3092 	case WM_T_ICH8:
   3093 	case WM_T_ICH9:
   3094 	case WM_T_ICH10:
   3095 	case WM_T_PCH:
   3096 	case WM_T_PCH2:
   3097 	case WM_T_PCH_LPT:
   3098 		delay(10*1000);
   3099 		if (sc->sc_type >= WM_T_ICH10)
   3100 			wm_lan_init_done(sc);
   3101 		else
   3102 			wm_get_auto_rd_done(sc);
   3103 
   3104 		reg = CSR_READ(sc, WMREG_STATUS);
   3105 		if ((reg & STATUS_PHYRA) != 0)
   3106 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3107 		break;
   3108 	default:
   3109 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3110 		    __func__);
   3111 		break;
   3112 	}
   3113 }
   3114 
   3115 /*
   3116  * wm_reset:
   3117  *
   3118  *	Reset the i82542 chip.
   3119  */
   3120 static void
   3121 wm_reset(struct wm_softc *sc)
   3122 {
   3123 	int phy_reset = 0;
   3124 	int error = 0;
   3125 	uint32_t reg, mask;
   3126 
   3127 	/*
   3128 	 * Allocate on-chip memory according to the MTU size.
   3129 	 * The Packet Buffer Allocation register must be written
   3130 	 * before the chip is reset.
   3131 	 */
   3132 	switch (sc->sc_type) {
   3133 	case WM_T_82547:
   3134 	case WM_T_82547_2:
   3135 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3136 		    PBA_22K : PBA_30K;
   3137 		sc->sc_txfifo_head = 0;
   3138 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3139 		sc->sc_txfifo_size =
   3140 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3141 		sc->sc_txfifo_stall = 0;
   3142 		break;
   3143 	case WM_T_82571:
   3144 	case WM_T_82572:
   3145 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3146 	case WM_T_I350:
   3147 	case WM_T_I354:
   3148 	case WM_T_80003:
   3149 		sc->sc_pba = PBA_32K;
   3150 		break;
   3151 	case WM_T_82580:
   3152 	case WM_T_82580ER:
   3153 		sc->sc_pba = PBA_35K;
   3154 		break;
   3155 	case WM_T_I210:
   3156 	case WM_T_I211:
   3157 		sc->sc_pba = PBA_34K;
   3158 		break;
   3159 	case WM_T_82576:
   3160 		sc->sc_pba = PBA_64K;
   3161 		break;
   3162 	case WM_T_82573:
   3163 		sc->sc_pba = PBA_12K;
   3164 		break;
   3165 	case WM_T_82574:
   3166 	case WM_T_82583:
   3167 		sc->sc_pba = PBA_20K;
   3168 		break;
   3169 	case WM_T_ICH8:
   3170 		sc->sc_pba = PBA_8K;
   3171 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3172 		break;
   3173 	case WM_T_ICH9:
   3174 	case WM_T_ICH10:
   3175 		sc->sc_pba = PBA_10K;
   3176 		break;
   3177 	case WM_T_PCH:
   3178 	case WM_T_PCH2:
   3179 	case WM_T_PCH_LPT:
   3180 		sc->sc_pba = PBA_26K;
   3181 		break;
   3182 	default:
   3183 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3184 		    PBA_40K : PBA_48K;
   3185 		break;
   3186 	}
   3187 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3188 
   3189 	/* Prevent the PCI-E bus from sticking */
   3190 	if (sc->sc_flags & WM_F_PCIE) {
   3191 		int timeout = 800;
   3192 
   3193 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3194 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3195 
   3196 		while (timeout--) {
   3197 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3198 			    == 0)
   3199 				break;
   3200 			delay(100);
   3201 		}
   3202 	}
   3203 
   3204 	/* Set the completion timeout for interface */
   3205 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3206 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   3207 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3208 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3209 		wm_set_pcie_completion_timeout(sc);
   3210 
   3211 	/* Clear interrupt */
   3212 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3213 
   3214 	/* Stop the transmit and receive processes. */
   3215 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3216 	sc->sc_rctl &= ~RCTL_EN;
   3217 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3218 	CSR_WRITE_FLUSH(sc);
   3219 
   3220 	/* XXX set_tbi_sbp_82543() */
   3221 
   3222 	delay(10*1000);
   3223 
   3224 	/* Must acquire the MDIO ownership before MAC reset */
   3225 	switch (sc->sc_type) {
   3226 	case WM_T_82573:
   3227 	case WM_T_82574:
   3228 	case WM_T_82583:
   3229 		error = wm_get_hw_semaphore_82573(sc);
   3230 		break;
   3231 	default:
   3232 		break;
   3233 	}
   3234 
   3235 	/*
   3236 	 * 82541 Errata 29? & 82547 Errata 28?
   3237 	 * See also the description about PHY_RST bit in CTRL register
   3238 	 * in 8254x_GBe_SDM.pdf.
   3239 	 */
   3240 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3241 		CSR_WRITE(sc, WMREG_CTRL,
   3242 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3243 		CSR_WRITE_FLUSH(sc);
   3244 		delay(5000);
   3245 	}
   3246 
   3247 	switch (sc->sc_type) {
   3248 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3249 	case WM_T_82541:
   3250 	case WM_T_82541_2:
   3251 	case WM_T_82547:
   3252 	case WM_T_82547_2:
   3253 		/*
   3254 		 * On some chipsets, a reset through a memory-mapped write
   3255 		 * cycle can cause the chip to reset before completing the
   3256 		 * write cycle.  This causes major headache that can be
   3257 		 * avoided by issuing the reset via indirect register writes
   3258 		 * through I/O space.
   3259 		 *
   3260 		 * So, if we successfully mapped the I/O BAR at attach time,
   3261 		 * use that.  Otherwise, try our luck with a memory-mapped
   3262 		 * reset.
   3263 		 */
   3264 		if (sc->sc_flags & WM_F_IOH_VALID)
   3265 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3266 		else
   3267 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3268 		break;
   3269 	case WM_T_82545_3:
   3270 	case WM_T_82546_3:
   3271 		/* Use the shadow control register on these chips. */
   3272 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3273 		break;
   3274 	case WM_T_80003:
   3275 		mask = swfwphysem[sc->sc_funcid];
   3276 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3277 		wm_get_swfw_semaphore(sc, mask);
   3278 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3279 		wm_put_swfw_semaphore(sc, mask);
   3280 		break;
   3281 	case WM_T_ICH8:
   3282 	case WM_T_ICH9:
   3283 	case WM_T_ICH10:
   3284 	case WM_T_PCH:
   3285 	case WM_T_PCH2:
   3286 	case WM_T_PCH_LPT:
   3287 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3288 		if (wm_check_reset_block(sc) == 0) {
   3289 			/*
   3290 			 * Gate automatic PHY configuration by hardware on
   3291 			 * non-managed 82579
   3292 			 */
   3293 			if ((sc->sc_type == WM_T_PCH2)
   3294 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3295 				!= 0))
   3296 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3297 
   3298 
   3299 			reg |= CTRL_PHY_RESET;
   3300 			phy_reset = 1;
   3301 		}
   3302 		wm_get_swfwhw_semaphore(sc);
   3303 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3304 		/* Don't insert a completion barrier when reset */
   3305 		delay(20*1000);
   3306 		wm_put_swfwhw_semaphore(sc);
   3307 		break;
   3308 	case WM_T_82542_2_0:
   3309 	case WM_T_82542_2_1:
   3310 	case WM_T_82543:
   3311 	case WM_T_82540:
   3312 	case WM_T_82545:
   3313 	case WM_T_82546:
   3314 	case WM_T_82571:
   3315 	case WM_T_82572:
   3316 	case WM_T_82573:
   3317 	case WM_T_82574:
   3318 	case WM_T_82575:
   3319 	case WM_T_82576:
   3320 	case WM_T_82580:
   3321 	case WM_T_82580ER:
   3322 	case WM_T_82583:
   3323 	case WM_T_I350:
   3324 	case WM_T_I354:
   3325 	case WM_T_I210:
   3326 	case WM_T_I211:
   3327 	default:
   3328 		/* Everything else can safely use the documented method. */
   3329 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3330 		break;
   3331 	}
   3332 
   3333 	/* Must release the MDIO ownership after MAC reset */
   3334 	switch (sc->sc_type) {
   3335 	case WM_T_82573:
   3336 	case WM_T_82574:
   3337 	case WM_T_82583:
   3338 		if (error == 0)
   3339 			wm_put_hw_semaphore_82573(sc);
   3340 		break;
   3341 	default:
   3342 		break;
   3343 	}
   3344 
   3345 	if (phy_reset != 0)
   3346 		wm_get_cfg_done(sc);
   3347 
   3348 	/* reload EEPROM */
   3349 	switch (sc->sc_type) {
   3350 	case WM_T_82542_2_0:
   3351 	case WM_T_82542_2_1:
   3352 	case WM_T_82543:
   3353 	case WM_T_82544:
   3354 		delay(10);
   3355 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3356 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3357 		CSR_WRITE_FLUSH(sc);
   3358 		delay(2000);
   3359 		break;
   3360 	case WM_T_82540:
   3361 	case WM_T_82545:
   3362 	case WM_T_82545_3:
   3363 	case WM_T_82546:
   3364 	case WM_T_82546_3:
   3365 		delay(5*1000);
   3366 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3367 		break;
   3368 	case WM_T_82541:
   3369 	case WM_T_82541_2:
   3370 	case WM_T_82547:
   3371 	case WM_T_82547_2:
   3372 		delay(20000);
   3373 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3374 		break;
   3375 	case WM_T_82571:
   3376 	case WM_T_82572:
   3377 	case WM_T_82573:
   3378 	case WM_T_82574:
   3379 	case WM_T_82583:
   3380 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3381 			delay(10);
   3382 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3383 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3384 			CSR_WRITE_FLUSH(sc);
   3385 		}
   3386 		/* check EECD_EE_AUTORD */
   3387 		wm_get_auto_rd_done(sc);
   3388 		/*
   3389 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3390 		 * is set.
   3391 		 */
   3392 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3393 		    || (sc->sc_type == WM_T_82583))
   3394 			delay(25*1000);
   3395 		break;
   3396 	case WM_T_82575:
   3397 	case WM_T_82576:
   3398 	case WM_T_82580:
   3399 	case WM_T_82580ER:
   3400 	case WM_T_I350:
   3401 	case WM_T_I354:
   3402 	case WM_T_I210:
   3403 	case WM_T_I211:
   3404 	case WM_T_80003:
   3405 		/* check EECD_EE_AUTORD */
   3406 		wm_get_auto_rd_done(sc);
   3407 		break;
   3408 	case WM_T_ICH8:
   3409 	case WM_T_ICH9:
   3410 	case WM_T_ICH10:
   3411 	case WM_T_PCH:
   3412 	case WM_T_PCH2:
   3413 	case WM_T_PCH_LPT:
   3414 		break;
   3415 	default:
   3416 		panic("%s: unknown type\n", __func__);
   3417 	}
   3418 
   3419 	/* Check whether EEPROM is present or not */
   3420 	switch (sc->sc_type) {
   3421 	case WM_T_82575:
   3422 	case WM_T_82576:
   3423 #if 0 /* XXX */
   3424 	case WM_T_82580:
   3425 	case WM_T_82580ER:
   3426 #endif
   3427 	case WM_T_I350:
   3428 	case WM_T_I354:
   3429 	case WM_T_ICH8:
   3430 	case WM_T_ICH9:
   3431 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3432 			/* Not found */
   3433 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3434 			if ((sc->sc_type == WM_T_82575)
   3435 			    || (sc->sc_type == WM_T_82576)
   3436 			    || (sc->sc_type == WM_T_82580)
   3437 			    || (sc->sc_type == WM_T_82580ER)
   3438 			    || (sc->sc_type == WM_T_I350)
   3439 			    || (sc->sc_type == WM_T_I354))
   3440 				wm_reset_init_script_82575(sc);
   3441 		}
   3442 		break;
   3443 	default:
   3444 		break;
   3445 	}
   3446 
   3447 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   3448 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3449 		/* clear global device reset status bit */
   3450 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3451 	}
   3452 
   3453 	/* Clear any pending interrupt events. */
   3454 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3455 	reg = CSR_READ(sc, WMREG_ICR);
   3456 
   3457 	/* reload sc_ctrl */
   3458 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3459 
   3460 	if (sc->sc_type == WM_T_I350)
   3461 		wm_set_eee_i350(sc);
   3462 
   3463 	/* dummy read from WUC */
   3464 	if (sc->sc_type == WM_T_PCH)
   3465 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   3466 	/*
   3467 	 * For PCH, this write will make sure that any noise will be detected
   3468 	 * as a CRC error and be dropped rather than show up as a bad packet
   3469 	 * to the DMA engine
   3470 	 */
   3471 	if (sc->sc_type == WM_T_PCH)
   3472 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   3473 
   3474 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3475 		CSR_WRITE(sc, WMREG_WUC, 0);
   3476 
   3477 	/* XXX need special handling for 82580 */
   3478 }
   3479 
   3480 /*
   3481  * wm_add_rxbuf:
   3482  *
   3483  *	Add a receive buffer to the indiciated descriptor.
   3484  */
   3485 static int
   3486 wm_add_rxbuf(struct wm_softc *sc, int idx)
   3487 {
   3488 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   3489 	struct mbuf *m;
   3490 	int error;
   3491 
   3492 	KASSERT(WM_RX_LOCKED(sc));
   3493 
   3494 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3495 	if (m == NULL)
   3496 		return ENOBUFS;
   3497 
   3498 	MCLGET(m, M_DONTWAIT);
   3499 	if ((m->m_flags & M_EXT) == 0) {
   3500 		m_freem(m);
   3501 		return ENOBUFS;
   3502 	}
   3503 
   3504 	if (rxs->rxs_mbuf != NULL)
   3505 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3506 
   3507 	rxs->rxs_mbuf = m;
   3508 
   3509 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3510 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3511 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3512 	if (error) {
   3513 		/* XXX XXX XXX */
   3514 		aprint_error_dev(sc->sc_dev,
   3515 		    "unable to load rx DMA map %d, error = %d\n",
   3516 		    idx, error);
   3517 		panic("wm_add_rxbuf");
   3518 	}
   3519 
   3520 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3521 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3522 
   3523 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3524 		if ((sc->sc_rctl & RCTL_EN) != 0)
   3525 			WM_INIT_RXDESC(sc, idx);
   3526 	} else
   3527 		WM_INIT_RXDESC(sc, idx);
   3528 
   3529 	return 0;
   3530 }
   3531 
   3532 /*
   3533  * wm_rxdrain:
   3534  *
   3535  *	Drain the receive queue.
   3536  */
   3537 static void
   3538 wm_rxdrain(struct wm_softc *sc)
   3539 {
   3540 	struct wm_rxsoft *rxs;
   3541 	int i;
   3542 
   3543 	KASSERT(WM_RX_LOCKED(sc));
   3544 
   3545 	for (i = 0; i < WM_NRXDESC; i++) {
   3546 		rxs = &sc->sc_rxsoft[i];
   3547 		if (rxs->rxs_mbuf != NULL) {
   3548 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3549 			m_freem(rxs->rxs_mbuf);
   3550 			rxs->rxs_mbuf = NULL;
   3551 		}
   3552 	}
   3553 }
   3554 
   3555 /*
   3556  * wm_init:		[ifnet interface function]
   3557  *
   3558  *	Initialize the interface.
   3559  */
   3560 static int
   3561 wm_init(struct ifnet *ifp)
   3562 {
   3563 	struct wm_softc *sc = ifp->if_softc;
   3564 	int ret;
   3565 
   3566 	WM_BOTH_LOCK(sc);
   3567 	ret = wm_init_locked(ifp);
   3568 	WM_BOTH_UNLOCK(sc);
   3569 
   3570 	return ret;
   3571 }
   3572 
   3573 static int
   3574 wm_init_locked(struct ifnet *ifp)
   3575 {
   3576 	struct wm_softc *sc = ifp->if_softc;
   3577 	struct wm_rxsoft *rxs;
   3578 	int i, j, trynum, error = 0;
   3579 	uint32_t reg;
   3580 
   3581 	KASSERT(WM_BOTH_LOCKED(sc));
   3582 	/*
   3583 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   3584 	 * There is a small but measurable benefit to avoiding the adjusment
   3585 	 * of the descriptor so that the headers are aligned, for normal mtu,
   3586 	 * on such platforms.  One possibility is that the DMA itself is
   3587 	 * slightly more efficient if the front of the entire packet (instead
   3588 	 * of the front of the headers) is aligned.
   3589 	 *
   3590 	 * Note we must always set align_tweak to 0 if we are using
   3591 	 * jumbo frames.
   3592 	 */
   3593 #ifdef __NO_STRICT_ALIGNMENT
   3594 	sc->sc_align_tweak = 0;
   3595 #else
   3596 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   3597 		sc->sc_align_tweak = 0;
   3598 	else
   3599 		sc->sc_align_tweak = 2;
   3600 #endif /* __NO_STRICT_ALIGNMENT */
   3601 
   3602 	/* Cancel any pending I/O. */
   3603 	wm_stop_locked(ifp, 0);
   3604 
   3605 	/* update statistics before reset */
   3606 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3607 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   3608 
   3609 	/* Reset the chip to a known state. */
   3610 	wm_reset(sc);
   3611 
   3612 	switch (sc->sc_type) {
   3613 	case WM_T_82571:
   3614 	case WM_T_82572:
   3615 	case WM_T_82573:
   3616 	case WM_T_82574:
   3617 	case WM_T_82583:
   3618 	case WM_T_80003:
   3619 	case WM_T_ICH8:
   3620 	case WM_T_ICH9:
   3621 	case WM_T_ICH10:
   3622 	case WM_T_PCH:
   3623 	case WM_T_PCH2:
   3624 	case WM_T_PCH_LPT:
   3625 		if (wm_check_mng_mode(sc) != 0)
   3626 			wm_get_hw_control(sc);
   3627 		break;
   3628 	default:
   3629 		break;
   3630 	}
   3631 
   3632 	/* Reset the PHY. */
   3633 	if (sc->sc_flags & WM_F_HAS_MII)
   3634 		wm_gmii_reset(sc);
   3635 
   3636 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3637 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
   3638 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   3639 	    || (sc->sc_type == WM_T_PCH_LPT))
   3640 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
   3641 
   3642 	/* Initialize the transmit descriptor ring. */
   3643 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
   3644 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
   3645 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   3646 	sc->sc_txfree = WM_NTXDESC(sc);
   3647 	sc->sc_txnext = 0;
   3648 
   3649 	if (sc->sc_type < WM_T_82543) {
   3650 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
   3651 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
   3652 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
   3653 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   3654 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   3655 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   3656 	} else {
   3657 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
   3658 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
   3659 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
   3660 		CSR_WRITE(sc, WMREG_TDH, 0);
   3661 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
   3662 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
   3663 
   3664 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3665 			/*
   3666 			 * Don't write TDT before TCTL.EN is set.
   3667 			 * See the document.
   3668 			 */
   3669 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
   3670 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   3671 			    | TXDCTL_WTHRESH(0));
   3672 		else {
   3673 			CSR_WRITE(sc, WMREG_TDT, 0);
   3674 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   3675 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   3676 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   3677 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   3678 		}
   3679 	}
   3680 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   3681 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   3682 
   3683 	/* Initialize the transmit job descriptors. */
   3684 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
   3685 		sc->sc_txsoft[i].txs_mbuf = NULL;
   3686 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
   3687 	sc->sc_txsnext = 0;
   3688 	sc->sc_txsdirty = 0;
   3689 
   3690 	/*
   3691 	 * Initialize the receive descriptor and receive job
   3692 	 * descriptor rings.
   3693 	 */
   3694 	if (sc->sc_type < WM_T_82543) {
   3695 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
   3696 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
   3697 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   3698 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   3699 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   3700 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   3701 
   3702 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   3703 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   3704 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   3705 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   3706 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   3707 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   3708 	} else {
   3709 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
   3710 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
   3711 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   3712 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3713 			CSR_WRITE(sc, WMREG_EITR(0), 450);
   3714 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   3715 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   3716 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
   3717 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   3718 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
   3719 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   3720 			    | RXDCTL_WTHRESH(1));
   3721 		} else {
   3722 			CSR_WRITE(sc, WMREG_RDH, 0);
   3723 			CSR_WRITE(sc, WMREG_RDT, 0);
   3724 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
   3725 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
   3726 		}
   3727 	}
   3728 	for (i = 0; i < WM_NRXDESC; i++) {
   3729 		rxs = &sc->sc_rxsoft[i];
   3730 		if (rxs->rxs_mbuf == NULL) {
   3731 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   3732 				log(LOG_ERR, "%s: unable to allocate or map "
   3733 				    "rx buffer %d, error = %d\n",
   3734 				    device_xname(sc->sc_dev), i, error);
   3735 				/*
   3736 				 * XXX Should attempt to run with fewer receive
   3737 				 * XXX buffers instead of just failing.
   3738 				 */
   3739 				wm_rxdrain(sc);
   3740 				goto out;
   3741 			}
   3742 		} else {
   3743 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3744 				WM_INIT_RXDESC(sc, i);
   3745 			/*
   3746 			 * For 82575 and newer device, the RX descriptors
   3747 			 * must be initialized after the setting of RCTL.EN in
   3748 			 * wm_set_filter()
   3749 			 */
   3750 		}
   3751 	}
   3752 	sc->sc_rxptr = 0;
   3753 	sc->sc_rxdiscard = 0;
   3754 	WM_RXCHAIN_RESET(sc);
   3755 
   3756 	/*
   3757 	 * Clear out the VLAN table -- we don't use it (yet).
   3758 	 */
   3759 	CSR_WRITE(sc, WMREG_VET, 0);
   3760 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3761 		trynum = 10; /* Due to hw errata */
   3762 	else
   3763 		trynum = 1;
   3764 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   3765 		for (j = 0; j < trynum; j++)
   3766 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   3767 
   3768 	/*
   3769 	 * Set up flow-control parameters.
   3770 	 *
   3771 	 * XXX Values could probably stand some tuning.
   3772 	 */
   3773 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   3774 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   3775 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   3776 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   3777 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   3778 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   3779 	}
   3780 
   3781 	sc->sc_fcrtl = FCRTL_DFLT;
   3782 	if (sc->sc_type < WM_T_82543) {
   3783 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   3784 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   3785 	} else {
   3786 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   3787 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   3788 	}
   3789 
   3790 	if (sc->sc_type == WM_T_80003)
   3791 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   3792 	else
   3793 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   3794 
   3795 	/* Writes the control register. */
   3796 	wm_set_vlan(sc);
   3797 
   3798 	if (sc->sc_flags & WM_F_HAS_MII) {
   3799 		int val;
   3800 
   3801 		switch (sc->sc_type) {
   3802 		case WM_T_80003:
   3803 		case WM_T_ICH8:
   3804 		case WM_T_ICH9:
   3805 		case WM_T_ICH10:
   3806 		case WM_T_PCH:
   3807 		case WM_T_PCH2:
   3808 		case WM_T_PCH_LPT:
   3809 			/*
   3810 			 * Set the mac to wait the maximum time between each
   3811 			 * iteration and increase the max iterations when
   3812 			 * polling the phy; this fixes erroneous timeouts at
   3813 			 * 10Mbps.
   3814 			 */
   3815 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   3816 			    0xFFFF);
   3817 			val = wm_kmrn_readreg(sc,
   3818 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   3819 			val |= 0x3F;
   3820 			wm_kmrn_writereg(sc,
   3821 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   3822 			break;
   3823 		default:
   3824 			break;
   3825 		}
   3826 
   3827 		if (sc->sc_type == WM_T_80003) {
   3828 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   3829 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   3830 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   3831 
   3832 			/* Bypass RX and TX FIFO's */
   3833 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   3834 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   3835 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   3836 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   3837 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   3838 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   3839 		}
   3840 	}
   3841 #if 0
   3842 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   3843 #endif
   3844 
   3845 	/* Set up checksum offload parameters. */
   3846 	reg = CSR_READ(sc, WMREG_RXCSUM);
   3847 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   3848 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   3849 		reg |= RXCSUM_IPOFL;
   3850 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   3851 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   3852 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   3853 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   3854 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   3855 
   3856 	/* Reset TBI's RXCFG count */
   3857 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
   3858 
   3859 	/* Set up the interrupt registers. */
   3860 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3861 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   3862 	    ICR_RXO | ICR_RXT0;
   3863 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
   3864 		sc->sc_icr |= ICR_RXCFG;
   3865 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   3866 
   3867 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3868 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3869 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   3870 		reg = CSR_READ(sc, WMREG_KABGTXD);
   3871 		reg |= KABGTXD_BGSQLBIAS;
   3872 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   3873 	}
   3874 
   3875 	/* Set up the inter-packet gap. */
   3876 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   3877 
   3878 	if (sc->sc_type >= WM_T_82543) {
   3879 		/*
   3880 		 * Set up the interrupt throttling register (units of 256ns)
   3881 		 * Note that a footnote in Intel's documentation says this
   3882 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   3883 		 * or 10Mbit mode.  Empirically, it appears to be the case
   3884 		 * that that is also true for the 1024ns units of the other
   3885 		 * interrupt-related timer registers -- so, really, we ought
   3886 		 * to divide this value by 4 when the link speed is low.
   3887 		 *
   3888 		 * XXX implement this division at link speed change!
   3889 		 */
   3890 
   3891 		/*
   3892 		 * For N interrupts/sec, set this value to:
   3893 		 * 1000000000 / (N * 256).  Note that we set the
   3894 		 * absolute and packet timer values to this value
   3895 		 * divided by 4 to get "simple timer" behavior.
   3896 		 */
   3897 
   3898 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   3899 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   3900 	}
   3901 
   3902 	/* Set the VLAN ethernetype. */
   3903 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   3904 
   3905 	/*
   3906 	 * Set up the transmit control register; we start out with
   3907 	 * a collision distance suitable for FDX, but update it whe
   3908 	 * we resolve the media type.
   3909 	 */
   3910 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   3911 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   3912 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3913 	if (sc->sc_type >= WM_T_82571)
   3914 		sc->sc_tctl |= TCTL_MULR;
   3915 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3916 
   3917 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3918 		/* Write TDT after TCTL.EN is set. See the document. */
   3919 		CSR_WRITE(sc, WMREG_TDT, 0);
   3920 	}
   3921 
   3922 	if (sc->sc_type == WM_T_80003) {
   3923 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   3924 		reg &= ~TCTL_EXT_GCEX_MASK;
   3925 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   3926 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   3927 	}
   3928 
   3929 	/* Set the media. */
   3930 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   3931 		goto out;
   3932 
   3933 	/* Configure for OS presence */
   3934 	wm_init_manageability(sc);
   3935 
   3936 	/*
   3937 	 * Set up the receive control register; we actually program
   3938 	 * the register when we set the receive filter.  Use multicast
   3939 	 * address offset type 0.
   3940 	 *
   3941 	 * Only the i82544 has the ability to strip the incoming
   3942 	 * CRC, so we don't enable that feature.
   3943 	 */
   3944 	sc->sc_mchash_type = 0;
   3945 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   3946 	    | RCTL_MO(sc->sc_mchash_type);
   3947 
   3948 	/*
   3949 	 * The I350 has a bug where it always strips the CRC whether
   3950 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3951 	 */
   3952 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3953 	    || (sc->sc_type == WM_T_I210))
   3954 		sc->sc_rctl |= RCTL_SECRC;
   3955 
   3956 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   3957 	    && (ifp->if_mtu > ETHERMTU)) {
   3958 		sc->sc_rctl |= RCTL_LPE;
   3959 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3960 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   3961 	}
   3962 
   3963 	if (MCLBYTES == 2048) {
   3964 		sc->sc_rctl |= RCTL_2k;
   3965 	} else {
   3966 		if (sc->sc_type >= WM_T_82543) {
   3967 			switch (MCLBYTES) {
   3968 			case 4096:
   3969 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   3970 				break;
   3971 			case 8192:
   3972 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   3973 				break;
   3974 			case 16384:
   3975 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   3976 				break;
   3977 			default:
   3978 				panic("wm_init: MCLBYTES %d unsupported",
   3979 				    MCLBYTES);
   3980 				break;
   3981 			}
   3982 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   3983 	}
   3984 
   3985 	/* Set the receive filter. */
   3986 	wm_set_filter(sc);
   3987 
   3988 	/* Enable ECC */
   3989 	switch (sc->sc_type) {
   3990 	case WM_T_82571:
   3991 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   3992 		reg |= PBA_ECC_CORR_EN;
   3993 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   3994 		break;
   3995 	case WM_T_PCH_LPT:
   3996 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   3997 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   3998 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   3999 
   4000 		reg = CSR_READ(sc, WMREG_CTRL);
   4001 		reg |= CTRL_MEHE;
   4002 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4003 		break;
   4004 	default:
   4005 		break;
   4006 	}
   4007 
   4008 	/* On 575 and later set RDT only if RX enabled */
   4009 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4010 		for (i = 0; i < WM_NRXDESC; i++)
   4011 			WM_INIT_RXDESC(sc, i);
   4012 
   4013 	sc->sc_stopping = false;
   4014 
   4015 	/* Start the one second link check clock. */
   4016 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4017 
   4018 	/* ...all done! */
   4019 	ifp->if_flags |= IFF_RUNNING;
   4020 	ifp->if_flags &= ~IFF_OACTIVE;
   4021 
   4022  out:
   4023 	sc->sc_if_flags = ifp->if_flags;
   4024 	if (error)
   4025 		log(LOG_ERR, "%s: interface not running\n",
   4026 		    device_xname(sc->sc_dev));
   4027 	return error;
   4028 }
   4029 
   4030 /*
   4031  * wm_stop:		[ifnet interface function]
   4032  *
   4033  *	Stop transmission on the interface.
   4034  */
   4035 static void
   4036 wm_stop(struct ifnet *ifp, int disable)
   4037 {
   4038 	struct wm_softc *sc = ifp->if_softc;
   4039 
   4040 	WM_BOTH_LOCK(sc);
   4041 	wm_stop_locked(ifp, disable);
   4042 	WM_BOTH_UNLOCK(sc);
   4043 }
   4044 
   4045 static void
   4046 wm_stop_locked(struct ifnet *ifp, int disable)
   4047 {
   4048 	struct wm_softc *sc = ifp->if_softc;
   4049 	struct wm_txsoft *txs;
   4050 	int i;
   4051 
   4052 	KASSERT(WM_BOTH_LOCKED(sc));
   4053 
   4054 	sc->sc_stopping = true;
   4055 
   4056 	/* Stop the one second clock. */
   4057 	callout_stop(&sc->sc_tick_ch);
   4058 
   4059 	/* Stop the 82547 Tx FIFO stall check timer. */
   4060 	if (sc->sc_type == WM_T_82547)
   4061 		callout_stop(&sc->sc_txfifo_ch);
   4062 
   4063 	if (sc->sc_flags & WM_F_HAS_MII) {
   4064 		/* Down the MII. */
   4065 		mii_down(&sc->sc_mii);
   4066 	} else {
   4067 #if 0
   4068 		/* Should we clear PHY's status properly? */
   4069 		wm_reset(sc);
   4070 #endif
   4071 	}
   4072 
   4073 	/* Stop the transmit and receive processes. */
   4074 	CSR_WRITE(sc, WMREG_TCTL, 0);
   4075 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4076 	sc->sc_rctl &= ~RCTL_EN;
   4077 
   4078 	/*
   4079 	 * Clear the interrupt mask to ensure the device cannot assert its
   4080 	 * interrupt line.
   4081 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
   4082 	 * any currently pending or shared interrupt.
   4083 	 */
   4084 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4085 	sc->sc_icr = 0;
   4086 
   4087 	/* Release any queued transmit buffers. */
   4088 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   4089 		txs = &sc->sc_txsoft[i];
   4090 		if (txs->txs_mbuf != NULL) {
   4091 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   4092 			m_freem(txs->txs_mbuf);
   4093 			txs->txs_mbuf = NULL;
   4094 		}
   4095 	}
   4096 
   4097 	/* Mark the interface as down and cancel the watchdog timer. */
   4098 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   4099 	ifp->if_timer = 0;
   4100 
   4101 	if (disable)
   4102 		wm_rxdrain(sc);
   4103 
   4104 #if 0 /* notyet */
   4105 	if (sc->sc_type >= WM_T_82544)
   4106 		CSR_WRITE(sc, WMREG_WUC, 0);
   4107 #endif
   4108 }
   4109 
   4110 /*
   4111  * wm_tx_offload:
   4112  *
   4113  *	Set up TCP/IP checksumming parameters for the
   4114  *	specified packet.
   4115  */
   4116 static int
   4117 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   4118     uint8_t *fieldsp)
   4119 {
   4120 	struct mbuf *m0 = txs->txs_mbuf;
   4121 	struct livengood_tcpip_ctxdesc *t;
   4122 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   4123 	uint32_t ipcse;
   4124 	struct ether_header *eh;
   4125 	int offset, iphl;
   4126 	uint8_t fields;
   4127 
   4128 	/*
   4129 	 * XXX It would be nice if the mbuf pkthdr had offset
   4130 	 * fields for the protocol headers.
   4131 	 */
   4132 
   4133 	eh = mtod(m0, struct ether_header *);
   4134 	switch (htons(eh->ether_type)) {
   4135 	case ETHERTYPE_IP:
   4136 	case ETHERTYPE_IPV6:
   4137 		offset = ETHER_HDR_LEN;
   4138 		break;
   4139 
   4140 	case ETHERTYPE_VLAN:
   4141 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   4142 		break;
   4143 
   4144 	default:
   4145 		/*
   4146 		 * Don't support this protocol or encapsulation.
   4147 		 */
   4148 		*fieldsp = 0;
   4149 		*cmdp = 0;
   4150 		return 0;
   4151 	}
   4152 
   4153 	if ((m0->m_pkthdr.csum_flags &
   4154 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   4155 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   4156 	} else {
   4157 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   4158 	}
   4159 	ipcse = offset + iphl - 1;
   4160 
   4161 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   4162 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   4163 	seg = 0;
   4164 	fields = 0;
   4165 
   4166 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   4167 		int hlen = offset + iphl;
   4168 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   4169 
   4170 		if (__predict_false(m0->m_len <
   4171 				    (hlen + sizeof(struct tcphdr)))) {
   4172 			/*
   4173 			 * TCP/IP headers are not in the first mbuf; we need
   4174 			 * to do this the slow and painful way.  Let's just
   4175 			 * hope this doesn't happen very often.
   4176 			 */
   4177 			struct tcphdr th;
   4178 
   4179 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   4180 
   4181 			m_copydata(m0, hlen, sizeof(th), &th);
   4182 			if (v4) {
   4183 				struct ip ip;
   4184 
   4185 				m_copydata(m0, offset, sizeof(ip), &ip);
   4186 				ip.ip_len = 0;
   4187 				m_copyback(m0,
   4188 				    offset + offsetof(struct ip, ip_len),
   4189 				    sizeof(ip.ip_len), &ip.ip_len);
   4190 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   4191 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   4192 			} else {
   4193 				struct ip6_hdr ip6;
   4194 
   4195 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   4196 				ip6.ip6_plen = 0;
   4197 				m_copyback(m0,
   4198 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   4199 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   4200 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   4201 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   4202 			}
   4203 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   4204 			    sizeof(th.th_sum), &th.th_sum);
   4205 
   4206 			hlen += th.th_off << 2;
   4207 		} else {
   4208 			/*
   4209 			 * TCP/IP headers are in the first mbuf; we can do
   4210 			 * this the easy way.
   4211 			 */
   4212 			struct tcphdr *th;
   4213 
   4214 			if (v4) {
   4215 				struct ip *ip =
   4216 				    (void *)(mtod(m0, char *) + offset);
   4217 				th = (void *)(mtod(m0, char *) + hlen);
   4218 
   4219 				ip->ip_len = 0;
   4220 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   4221 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   4222 			} else {
   4223 				struct ip6_hdr *ip6 =
   4224 				    (void *)(mtod(m0, char *) + offset);
   4225 				th = (void *)(mtod(m0, char *) + hlen);
   4226 
   4227 				ip6->ip6_plen = 0;
   4228 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   4229 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   4230 			}
   4231 			hlen += th->th_off << 2;
   4232 		}
   4233 
   4234 		if (v4) {
   4235 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   4236 			cmdlen |= WTX_TCPIP_CMD_IP;
   4237 		} else {
   4238 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   4239 			ipcse = 0;
   4240 		}
   4241 		cmd |= WTX_TCPIP_CMD_TSE;
   4242 		cmdlen |= WTX_TCPIP_CMD_TSE |
   4243 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   4244 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   4245 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   4246 	}
   4247 
   4248 	/*
   4249 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   4250 	 * offload feature, if we load the context descriptor, we
   4251 	 * MUST provide valid values for IPCSS and TUCSS fields.
   4252 	 */
   4253 
   4254 	ipcs = WTX_TCPIP_IPCSS(offset) |
   4255 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   4256 	    WTX_TCPIP_IPCSE(ipcse);
   4257 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   4258 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   4259 		fields |= WTX_IXSM;
   4260 	}
   4261 
   4262 	offset += iphl;
   4263 
   4264 	if (m0->m_pkthdr.csum_flags &
   4265 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   4266 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   4267 		fields |= WTX_TXSM;
   4268 		tucs = WTX_TCPIP_TUCSS(offset) |
   4269 		    WTX_TCPIP_TUCSO(offset +
   4270 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   4271 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4272 	} else if ((m0->m_pkthdr.csum_flags &
   4273 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   4274 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   4275 		fields |= WTX_TXSM;
   4276 		tucs = WTX_TCPIP_TUCSS(offset) |
   4277 		    WTX_TCPIP_TUCSO(offset +
   4278 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   4279 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4280 	} else {
   4281 		/* Just initialize it to a valid TCP context. */
   4282 		tucs = WTX_TCPIP_TUCSS(offset) |
   4283 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   4284 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   4285 	}
   4286 
   4287 	/* Fill in the context descriptor. */
   4288 	t = (struct livengood_tcpip_ctxdesc *)
   4289 	    &sc->sc_txdescs[sc->sc_txnext];
   4290 	t->tcpip_ipcs = htole32(ipcs);
   4291 	t->tcpip_tucs = htole32(tucs);
   4292 	t->tcpip_cmdlen = htole32(cmdlen);
   4293 	t->tcpip_seg = htole32(seg);
   4294 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   4295 
   4296 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   4297 	txs->txs_ndesc++;
   4298 
   4299 	*cmdp = cmd;
   4300 	*fieldsp = fields;
   4301 
   4302 	return 0;
   4303 }
   4304 
   4305 static void
   4306 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   4307 {
   4308 	struct mbuf *m;
   4309 	int i;
   4310 
   4311 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   4312 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   4313 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   4314 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   4315 		    m->m_data, m->m_len, m->m_flags);
   4316 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   4317 	    i, i == 1 ? "" : "s");
   4318 }
   4319 
   4320 /*
   4321  * wm_82547_txfifo_stall:
   4322  *
   4323  *	Callout used to wait for the 82547 Tx FIFO to drain,
   4324  *	reset the FIFO pointers, and restart packet transmission.
   4325  */
   4326 static void
   4327 wm_82547_txfifo_stall(void *arg)
   4328 {
   4329 	struct wm_softc *sc = arg;
   4330 #ifndef WM_MPSAFE
   4331 	int s;
   4332 
   4333 	s = splnet();
   4334 #endif
   4335 	WM_TX_LOCK(sc);
   4336 
   4337 	if (sc->sc_stopping)
   4338 		goto out;
   4339 
   4340 	if (sc->sc_txfifo_stall) {
   4341 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
   4342 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   4343 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   4344 			/*
   4345 			 * Packets have drained.  Stop transmitter, reset
   4346 			 * FIFO pointers, restart transmitter, and kick
   4347 			 * the packet queue.
   4348 			 */
   4349 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   4350 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   4351 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
   4352 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
   4353 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
   4354 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
   4355 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   4356 			CSR_WRITE_FLUSH(sc);
   4357 
   4358 			sc->sc_txfifo_head = 0;
   4359 			sc->sc_txfifo_stall = 0;
   4360 			wm_start_locked(&sc->sc_ethercom.ec_if);
   4361 		} else {
   4362 			/*
   4363 			 * Still waiting for packets to drain; try again in
   4364 			 * another tick.
   4365 			 */
   4366 			callout_schedule(&sc->sc_txfifo_ch, 1);
   4367 		}
   4368 	}
   4369 
   4370 out:
   4371 	WM_TX_UNLOCK(sc);
   4372 #ifndef WM_MPSAFE
   4373 	splx(s);
   4374 #endif
   4375 }
   4376 
   4377 /*
   4378  * wm_82547_txfifo_bugchk:
   4379  *
   4380  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   4381  *	prevent enqueueing a packet that would wrap around the end
   4382  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   4383  *
   4384  *	We do this by checking the amount of space before the end
   4385  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   4386  *	the Tx FIFO, wait for all remaining packets to drain, reset
   4387  *	the internal FIFO pointers to the beginning, and restart
   4388  *	transmission on the interface.
   4389  */
   4390 #define	WM_FIFO_HDR		0x10
   4391 #define	WM_82547_PAD_LEN	0x3e0
   4392 static int
   4393 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   4394 {
   4395 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
   4396 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   4397 
   4398 	/* Just return if already stalled. */
   4399 	if (sc->sc_txfifo_stall)
   4400 		return 1;
   4401 
   4402 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   4403 		/* Stall only occurs in half-duplex mode. */
   4404 		goto send_packet;
   4405 	}
   4406 
   4407 	if (len >= WM_82547_PAD_LEN + space) {
   4408 		sc->sc_txfifo_stall = 1;
   4409 		callout_schedule(&sc->sc_txfifo_ch, 1);
   4410 		return 1;
   4411 	}
   4412 
   4413  send_packet:
   4414 	sc->sc_txfifo_head += len;
   4415 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
   4416 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
   4417 
   4418 	return 0;
   4419 }
   4420 
   4421 /*
   4422  * wm_start:		[ifnet interface function]
   4423  *
   4424  *	Start packet transmission on the interface.
   4425  */
   4426 static void
   4427 wm_start(struct ifnet *ifp)
   4428 {
   4429 	struct wm_softc *sc = ifp->if_softc;
   4430 
   4431 	WM_TX_LOCK(sc);
   4432 	if (!sc->sc_stopping)
   4433 		wm_start_locked(ifp);
   4434 	WM_TX_UNLOCK(sc);
   4435 }
   4436 
   4437 static void
   4438 wm_start_locked(struct ifnet *ifp)
   4439 {
   4440 	struct wm_softc *sc = ifp->if_softc;
   4441 	struct mbuf *m0;
   4442 	struct m_tag *mtag;
   4443 	struct wm_txsoft *txs;
   4444 	bus_dmamap_t dmamap;
   4445 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   4446 	bus_addr_t curaddr;
   4447 	bus_size_t seglen, curlen;
   4448 	uint32_t cksumcmd;
   4449 	uint8_t cksumfields;
   4450 
   4451 	KASSERT(WM_TX_LOCKED(sc));
   4452 
   4453 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   4454 		return;
   4455 
   4456 	/* Remember the previous number of free descriptors. */
   4457 	ofree = sc->sc_txfree;
   4458 
   4459 	/*
   4460 	 * Loop through the send queue, setting up transmit descriptors
   4461 	 * until we drain the queue, or use up all available transmit
   4462 	 * descriptors.
   4463 	 */
   4464 	for (;;) {
   4465 		m0 = NULL;
   4466 
   4467 		/* Get a work queue entry. */
   4468 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   4469 			wm_txintr(sc);
   4470 			if (sc->sc_txsfree == 0) {
   4471 				DPRINTF(WM_DEBUG_TX,
   4472 				    ("%s: TX: no free job descriptors\n",
   4473 					device_xname(sc->sc_dev)));
   4474 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   4475 				break;
   4476 			}
   4477 		}
   4478 
   4479 		/* Grab a packet off the queue. */
   4480 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   4481 		if (m0 == NULL)
   4482 			break;
   4483 
   4484 		DPRINTF(WM_DEBUG_TX,
   4485 		    ("%s: TX: have packet to transmit: %p\n",
   4486 		    device_xname(sc->sc_dev), m0));
   4487 
   4488 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   4489 		dmamap = txs->txs_dmamap;
   4490 
   4491 		use_tso = (m0->m_pkthdr.csum_flags &
   4492 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   4493 
   4494 		/*
   4495 		 * So says the Linux driver:
   4496 		 * The controller does a simple calculation to make sure
   4497 		 * there is enough room in the FIFO before initiating the
   4498 		 * DMA for each buffer.  The calc is:
   4499 		 *	4 = ceil(buffer len / MSS)
   4500 		 * To make sure we don't overrun the FIFO, adjust the max
   4501 		 * buffer len if the MSS drops.
   4502 		 */
   4503 		dmamap->dm_maxsegsz =
   4504 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   4505 		    ? m0->m_pkthdr.segsz << 2
   4506 		    : WTX_MAX_LEN;
   4507 
   4508 		/*
   4509 		 * Load the DMA map.  If this fails, the packet either
   4510 		 * didn't fit in the allotted number of segments, or we
   4511 		 * were short on resources.  For the too-many-segments
   4512 		 * case, we simply report an error and drop the packet,
   4513 		 * since we can't sanely copy a jumbo packet to a single
   4514 		 * buffer.
   4515 		 */
   4516 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   4517 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   4518 		if (error) {
   4519 			if (error == EFBIG) {
   4520 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   4521 				log(LOG_ERR, "%s: Tx packet consumes too many "
   4522 				    "DMA segments, dropping...\n",
   4523 				    device_xname(sc->sc_dev));
   4524 				wm_dump_mbuf_chain(sc, m0);
   4525 				m_freem(m0);
   4526 				continue;
   4527 			}
   4528 			/*  Short on resources, just stop for now. */
   4529 			DPRINTF(WM_DEBUG_TX,
   4530 			    ("%s: TX: dmamap load failed: %d\n",
   4531 			    device_xname(sc->sc_dev), error));
   4532 			break;
   4533 		}
   4534 
   4535 		segs_needed = dmamap->dm_nsegs;
   4536 		if (use_tso) {
   4537 			/* For sentinel descriptor; see below. */
   4538 			segs_needed++;
   4539 		}
   4540 
   4541 		/*
   4542 		 * Ensure we have enough descriptors free to describe
   4543 		 * the packet.  Note, we always reserve one descriptor
   4544 		 * at the end of the ring due to the semantics of the
   4545 		 * TDT register, plus one more in the event we need
   4546 		 * to load offload context.
   4547 		 */
   4548 		if (segs_needed > sc->sc_txfree - 2) {
   4549 			/*
   4550 			 * Not enough free descriptors to transmit this
   4551 			 * packet.  We haven't committed anything yet,
   4552 			 * so just unload the DMA map, put the packet
   4553 			 * pack on the queue, and punt.  Notify the upper
   4554 			 * layer that there are no more slots left.
   4555 			 */
   4556 			DPRINTF(WM_DEBUG_TX,
   4557 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   4558 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   4559 			    segs_needed, sc->sc_txfree - 1));
   4560 			ifp->if_flags |= IFF_OACTIVE;
   4561 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   4562 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   4563 			break;
   4564 		}
   4565 
   4566 		/*
   4567 		 * Check for 82547 Tx FIFO bug.  We need to do this
   4568 		 * once we know we can transmit the packet, since we
   4569 		 * do some internal FIFO space accounting here.
   4570 		 */
   4571 		if (sc->sc_type == WM_T_82547 &&
   4572 		    wm_82547_txfifo_bugchk(sc, m0)) {
   4573 			DPRINTF(WM_DEBUG_TX,
   4574 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   4575 			    device_xname(sc->sc_dev)));
   4576 			ifp->if_flags |= IFF_OACTIVE;
   4577 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   4578 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   4579 			break;
   4580 		}
   4581 
   4582 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   4583 
   4584 		DPRINTF(WM_DEBUG_TX,
   4585 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   4586 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   4587 
   4588 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   4589 
   4590 		/*
   4591 		 * Store a pointer to the packet so that we can free it
   4592 		 * later.
   4593 		 *
   4594 		 * Initially, we consider the number of descriptors the
   4595 		 * packet uses the number of DMA segments.  This may be
   4596 		 * incremented by 1 if we do checksum offload (a descriptor
   4597 		 * is used to set the checksum context).
   4598 		 */
   4599 		txs->txs_mbuf = m0;
   4600 		txs->txs_firstdesc = sc->sc_txnext;
   4601 		txs->txs_ndesc = segs_needed;
   4602 
   4603 		/* Set up offload parameters for this packet. */
   4604 		if (m0->m_pkthdr.csum_flags &
   4605 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   4606 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   4607 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   4608 			if (wm_tx_offload(sc, txs, &cksumcmd,
   4609 					  &cksumfields) != 0) {
   4610 				/* Error message already displayed. */
   4611 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   4612 				continue;
   4613 			}
   4614 		} else {
   4615 			cksumcmd = 0;
   4616 			cksumfields = 0;
   4617 		}
   4618 
   4619 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   4620 
   4621 		/* Sync the DMA map. */
   4622 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   4623 		    BUS_DMASYNC_PREWRITE);
   4624 
   4625 		/* Initialize the transmit descriptor. */
   4626 		for (nexttx = sc->sc_txnext, seg = 0;
   4627 		     seg < dmamap->dm_nsegs; seg++) {
   4628 			for (seglen = dmamap->dm_segs[seg].ds_len,
   4629 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   4630 			     seglen != 0;
   4631 			     curaddr += curlen, seglen -= curlen,
   4632 			     nexttx = WM_NEXTTX(sc, nexttx)) {
   4633 				curlen = seglen;
   4634 
   4635 				/*
   4636 				 * So says the Linux driver:
   4637 				 * Work around for premature descriptor
   4638 				 * write-backs in TSO mode.  Append a
   4639 				 * 4-byte sentinel descriptor.
   4640 				 */
   4641 				if (use_tso &&
   4642 				    seg == dmamap->dm_nsegs - 1 &&
   4643 				    curlen > 8)
   4644 					curlen -= 4;
   4645 
   4646 				wm_set_dma_addr(
   4647 				    &sc->sc_txdescs[nexttx].wtx_addr,
   4648 				    curaddr);
   4649 				sc->sc_txdescs[nexttx].wtx_cmdlen =
   4650 				    htole32(cksumcmd | curlen);
   4651 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
   4652 				    0;
   4653 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
   4654 				    cksumfields;
   4655 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   4656 				lasttx = nexttx;
   4657 
   4658 				DPRINTF(WM_DEBUG_TX,
   4659 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   4660 				     "len %#04zx\n",
   4661 				    device_xname(sc->sc_dev), nexttx,
   4662 				    (uint64_t)curaddr, curlen));
   4663 			}
   4664 		}
   4665 
   4666 		KASSERT(lasttx != -1);
   4667 
   4668 		/*
   4669 		 * Set up the command byte on the last descriptor of
   4670 		 * the packet.  If we're in the interrupt delay window,
   4671 		 * delay the interrupt.
   4672 		 */
   4673 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   4674 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   4675 
   4676 		/*
   4677 		 * If VLANs are enabled and the packet has a VLAN tag, set
   4678 		 * up the descriptor to encapsulate the packet for us.
   4679 		 *
   4680 		 * This is only valid on the last descriptor of the packet.
   4681 		 */
   4682 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   4683 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   4684 			    htole32(WTX_CMD_VLE);
   4685 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
   4686 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   4687 		}
   4688 
   4689 		txs->txs_lastdesc = lasttx;
   4690 
   4691 		DPRINTF(WM_DEBUG_TX,
   4692 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   4693 		    device_xname(sc->sc_dev),
   4694 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   4695 
   4696 		/* Sync the descriptors we're using. */
   4697 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   4698 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   4699 
   4700 		/* Give the packet to the chip. */
   4701 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   4702 
   4703 		DPRINTF(WM_DEBUG_TX,
   4704 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   4705 
   4706 		DPRINTF(WM_DEBUG_TX,
   4707 		    ("%s: TX: finished transmitting packet, job %d\n",
   4708 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   4709 
   4710 		/* Advance the tx pointer. */
   4711 		sc->sc_txfree -= txs->txs_ndesc;
   4712 		sc->sc_txnext = nexttx;
   4713 
   4714 		sc->sc_txsfree--;
   4715 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   4716 
   4717 		/* Pass the packet to any BPF listeners. */
   4718 		bpf_mtap(ifp, m0);
   4719 	}
   4720 
   4721 	if (m0 != NULL) {
   4722 		ifp->if_flags |= IFF_OACTIVE;
   4723 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   4724 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   4725 		m_freem(m0);
   4726 	}
   4727 
   4728 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   4729 		/* No more slots; notify upper layer. */
   4730 		ifp->if_flags |= IFF_OACTIVE;
   4731 	}
   4732 
   4733 	if (sc->sc_txfree != ofree) {
   4734 		/* Set a watchdog timer in case the chip flakes out. */
   4735 		ifp->if_timer = 5;
   4736 	}
   4737 }
   4738 
   4739 /*
   4740  * wm_nq_tx_offload:
   4741  *
   4742  *	Set up TCP/IP checksumming parameters for the
   4743  *	specified packet, for NEWQUEUE devices
   4744  */
   4745 static int
   4746 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   4747     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   4748 {
   4749 	struct mbuf *m0 = txs->txs_mbuf;
   4750 	struct m_tag *mtag;
   4751 	uint32_t vl_len, mssidx, cmdc;
   4752 	struct ether_header *eh;
   4753 	int offset, iphl;
   4754 
   4755 	/*
   4756 	 * XXX It would be nice if the mbuf pkthdr had offset
   4757 	 * fields for the protocol headers.
   4758 	 */
   4759 	*cmdlenp = 0;
   4760 	*fieldsp = 0;
   4761 
   4762 	eh = mtod(m0, struct ether_header *);
   4763 	switch (htons(eh->ether_type)) {
   4764 	case ETHERTYPE_IP:
   4765 	case ETHERTYPE_IPV6:
   4766 		offset = ETHER_HDR_LEN;
   4767 		break;
   4768 
   4769 	case ETHERTYPE_VLAN:
   4770 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   4771 		break;
   4772 
   4773 	default:
   4774 		/* Don't support this protocol or encapsulation. */
   4775 		*do_csum = false;
   4776 		return 0;
   4777 	}
   4778 	*do_csum = true;
   4779 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   4780 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   4781 
   4782 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   4783 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   4784 
   4785 	if ((m0->m_pkthdr.csum_flags &
   4786 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   4787 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   4788 	} else {
   4789 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   4790 	}
   4791 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   4792 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   4793 
   4794 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   4795 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   4796 		     << NQTXC_VLLEN_VLAN_SHIFT);
   4797 		*cmdlenp |= NQTX_CMD_VLE;
   4798 	}
   4799 
   4800 	mssidx = 0;
   4801 
   4802 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   4803 		int hlen = offset + iphl;
   4804 		int tcp_hlen;
   4805 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   4806 
   4807 		if (__predict_false(m0->m_len <
   4808 				    (hlen + sizeof(struct tcphdr)))) {
   4809 			/*
   4810 			 * TCP/IP headers are not in the first mbuf; we need
   4811 			 * to do this the slow and painful way.  Let's just
   4812 			 * hope this doesn't happen very often.
   4813 			 */
   4814 			struct tcphdr th;
   4815 
   4816 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   4817 
   4818 			m_copydata(m0, hlen, sizeof(th), &th);
   4819 			if (v4) {
   4820 				struct ip ip;
   4821 
   4822 				m_copydata(m0, offset, sizeof(ip), &ip);
   4823 				ip.ip_len = 0;
   4824 				m_copyback(m0,
   4825 				    offset + offsetof(struct ip, ip_len),
   4826 				    sizeof(ip.ip_len), &ip.ip_len);
   4827 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   4828 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   4829 			} else {
   4830 				struct ip6_hdr ip6;
   4831 
   4832 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   4833 				ip6.ip6_plen = 0;
   4834 				m_copyback(m0,
   4835 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   4836 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   4837 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   4838 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   4839 			}
   4840 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   4841 			    sizeof(th.th_sum), &th.th_sum);
   4842 
   4843 			tcp_hlen = th.th_off << 2;
   4844 		} else {
   4845 			/*
   4846 			 * TCP/IP headers are in the first mbuf; we can do
   4847 			 * this the easy way.
   4848 			 */
   4849 			struct tcphdr *th;
   4850 
   4851 			if (v4) {
   4852 				struct ip *ip =
   4853 				    (void *)(mtod(m0, char *) + offset);
   4854 				th = (void *)(mtod(m0, char *) + hlen);
   4855 
   4856 				ip->ip_len = 0;
   4857 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   4858 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   4859 			} else {
   4860 				struct ip6_hdr *ip6 =
   4861 				    (void *)(mtod(m0, char *) + offset);
   4862 				th = (void *)(mtod(m0, char *) + hlen);
   4863 
   4864 				ip6->ip6_plen = 0;
   4865 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   4866 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   4867 			}
   4868 			tcp_hlen = th->th_off << 2;
   4869 		}
   4870 		hlen += tcp_hlen;
   4871 		*cmdlenp |= NQTX_CMD_TSE;
   4872 
   4873 		if (v4) {
   4874 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   4875 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   4876 		} else {
   4877 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   4878 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   4879 		}
   4880 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   4881 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   4882 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   4883 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   4884 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   4885 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   4886 	} else {
   4887 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   4888 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   4889 	}
   4890 
   4891 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   4892 		*fieldsp |= NQTXD_FIELDS_IXSM;
   4893 		cmdc |= NQTXC_CMD_IP4;
   4894 	}
   4895 
   4896 	if (m0->m_pkthdr.csum_flags &
   4897 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   4898 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   4899 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   4900 			cmdc |= NQTXC_CMD_TCP;
   4901 		} else {
   4902 			cmdc |= NQTXC_CMD_UDP;
   4903 		}
   4904 		cmdc |= NQTXC_CMD_IP4;
   4905 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   4906 	}
   4907 	if (m0->m_pkthdr.csum_flags &
   4908 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   4909 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   4910 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   4911 			cmdc |= NQTXC_CMD_TCP;
   4912 		} else {
   4913 			cmdc |= NQTXC_CMD_UDP;
   4914 		}
   4915 		cmdc |= NQTXC_CMD_IP6;
   4916 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   4917 	}
   4918 
   4919 	/* Fill in the context descriptor. */
   4920 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
   4921 	    htole32(vl_len);
   4922 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
   4923 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
   4924 	    htole32(cmdc);
   4925 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
   4926 	    htole32(mssidx);
   4927 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   4928 	DPRINTF(WM_DEBUG_TX,
   4929 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   4930 	    sc->sc_txnext, 0, vl_len));
   4931 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   4932 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   4933 	txs->txs_ndesc++;
   4934 	return 0;
   4935 }
   4936 
   4937 /*
   4938  * wm_nq_start:		[ifnet interface function]
   4939  *
   4940  *	Start packet transmission on the interface for NEWQUEUE devices
   4941  */
   4942 static void
   4943 wm_nq_start(struct ifnet *ifp)
   4944 {
   4945 	struct wm_softc *sc = ifp->if_softc;
   4946 
   4947 	WM_TX_LOCK(sc);
   4948 	if (!sc->sc_stopping)
   4949 		wm_nq_start_locked(ifp);
   4950 	WM_TX_UNLOCK(sc);
   4951 }
   4952 
   4953 static void
   4954 wm_nq_start_locked(struct ifnet *ifp)
   4955 {
   4956 	struct wm_softc *sc = ifp->if_softc;
   4957 	struct mbuf *m0;
   4958 	struct m_tag *mtag;
   4959 	struct wm_txsoft *txs;
   4960 	bus_dmamap_t dmamap;
   4961 	int error, nexttx, lasttx = -1, seg, segs_needed;
   4962 	bool do_csum, sent;
   4963 
   4964 	KASSERT(WM_TX_LOCKED(sc));
   4965 
   4966 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   4967 		return;
   4968 
   4969 	sent = false;
   4970 
   4971 	/*
   4972 	 * Loop through the send queue, setting up transmit descriptors
   4973 	 * until we drain the queue, or use up all available transmit
   4974 	 * descriptors.
   4975 	 */
   4976 	for (;;) {
   4977 		m0 = NULL;
   4978 
   4979 		/* Get a work queue entry. */
   4980 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   4981 			wm_txintr(sc);
   4982 			if (sc->sc_txsfree == 0) {
   4983 				DPRINTF(WM_DEBUG_TX,
   4984 				    ("%s: TX: no free job descriptors\n",
   4985 					device_xname(sc->sc_dev)));
   4986 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   4987 				break;
   4988 			}
   4989 		}
   4990 
   4991 		/* Grab a packet off the queue. */
   4992 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   4993 		if (m0 == NULL)
   4994 			break;
   4995 
   4996 		DPRINTF(WM_DEBUG_TX,
   4997 		    ("%s: TX: have packet to transmit: %p\n",
   4998 		    device_xname(sc->sc_dev), m0));
   4999 
   5000 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   5001 		dmamap = txs->txs_dmamap;
   5002 
   5003 		/*
   5004 		 * Load the DMA map.  If this fails, the packet either
   5005 		 * didn't fit in the allotted number of segments, or we
   5006 		 * were short on resources.  For the too-many-segments
   5007 		 * case, we simply report an error and drop the packet,
   5008 		 * since we can't sanely copy a jumbo packet to a single
   5009 		 * buffer.
   5010 		 */
   5011 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   5012 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   5013 		if (error) {
   5014 			if (error == EFBIG) {
   5015 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5016 				log(LOG_ERR, "%s: Tx packet consumes too many "
   5017 				    "DMA segments, dropping...\n",
   5018 				    device_xname(sc->sc_dev));
   5019 				wm_dump_mbuf_chain(sc, m0);
   5020 				m_freem(m0);
   5021 				continue;
   5022 			}
   5023 			/* Short on resources, just stop for now. */
   5024 			DPRINTF(WM_DEBUG_TX,
   5025 			    ("%s: TX: dmamap load failed: %d\n",
   5026 			    device_xname(sc->sc_dev), error));
   5027 			break;
   5028 		}
   5029 
   5030 		segs_needed = dmamap->dm_nsegs;
   5031 
   5032 		/*
   5033 		 * Ensure we have enough descriptors free to describe
   5034 		 * the packet.  Note, we always reserve one descriptor
   5035 		 * at the end of the ring due to the semantics of the
   5036 		 * TDT register, plus one more in the event we need
   5037 		 * to load offload context.
   5038 		 */
   5039 		if (segs_needed > sc->sc_txfree - 2) {
   5040 			/*
   5041 			 * Not enough free descriptors to transmit this
   5042 			 * packet.  We haven't committed anything yet,
   5043 			 * so just unload the DMA map, put the packet
   5044 			 * pack on the queue, and punt.  Notify the upper
   5045 			 * layer that there are no more slots left.
   5046 			 */
   5047 			DPRINTF(WM_DEBUG_TX,
   5048 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   5049 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   5050 			    segs_needed, sc->sc_txfree - 1));
   5051 			ifp->if_flags |= IFF_OACTIVE;
   5052 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   5053 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   5054 			break;
   5055 		}
   5056 
   5057 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   5058 
   5059 		DPRINTF(WM_DEBUG_TX,
   5060 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   5061 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   5062 
   5063 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   5064 
   5065 		/*
   5066 		 * Store a pointer to the packet so that we can free it
   5067 		 * later.
   5068 		 *
   5069 		 * Initially, we consider the number of descriptors the
   5070 		 * packet uses the number of DMA segments.  This may be
   5071 		 * incremented by 1 if we do checksum offload (a descriptor
   5072 		 * is used to set the checksum context).
   5073 		 */
   5074 		txs->txs_mbuf = m0;
   5075 		txs->txs_firstdesc = sc->sc_txnext;
   5076 		txs->txs_ndesc = segs_needed;
   5077 
   5078 		/* Set up offload parameters for this packet. */
   5079 		uint32_t cmdlen, fields, dcmdlen;
   5080 		if (m0->m_pkthdr.csum_flags &
   5081 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   5082 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   5083 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   5084 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   5085 			    &do_csum) != 0) {
   5086 				/* Error message already displayed. */
   5087 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   5088 				continue;
   5089 			}
   5090 		} else {
   5091 			do_csum = false;
   5092 			cmdlen = 0;
   5093 			fields = 0;
   5094 		}
   5095 
   5096 		/* Sync the DMA map. */
   5097 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   5098 		    BUS_DMASYNC_PREWRITE);
   5099 
   5100 		/* Initialize the first transmit descriptor. */
   5101 		nexttx = sc->sc_txnext;
   5102 		if (!do_csum) {
   5103 			/* setup a legacy descriptor */
   5104 			wm_set_dma_addr(
   5105 			    &sc->sc_txdescs[nexttx].wtx_addr,
   5106 			    dmamap->dm_segs[0].ds_addr);
   5107 			sc->sc_txdescs[nexttx].wtx_cmdlen =
   5108 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   5109 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
   5110 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
   5111 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   5112 			    NULL) {
   5113 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
   5114 				    htole32(WTX_CMD_VLE);
   5115 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
   5116 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   5117 			} else {
   5118 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   5119 			}
   5120 			dcmdlen = 0;
   5121 		} else {
   5122 			/* setup an advanced data descriptor */
   5123 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   5124 			    htole64(dmamap->dm_segs[0].ds_addr);
   5125 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   5126 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   5127 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   5128 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
   5129 			    htole32(fields);
   5130 			DPRINTF(WM_DEBUG_TX,
   5131 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   5132 			    device_xname(sc->sc_dev), nexttx,
   5133 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   5134 			DPRINTF(WM_DEBUG_TX,
   5135 			    ("\t 0x%08x%08x\n", fields,
   5136 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   5137 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   5138 		}
   5139 
   5140 		lasttx = nexttx;
   5141 		nexttx = WM_NEXTTX(sc, nexttx);
   5142 		/*
   5143 		 * fill in the next descriptors. legacy or adcanced format
   5144 		 * is the same here
   5145 		 */
   5146 		for (seg = 1; seg < dmamap->dm_nsegs;
   5147 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
   5148 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
   5149 			    htole64(dmamap->dm_segs[seg].ds_addr);
   5150 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
   5151 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   5152 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   5153 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
   5154 			lasttx = nexttx;
   5155 
   5156 			DPRINTF(WM_DEBUG_TX,
   5157 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   5158 			     "len %#04zx\n",
   5159 			    device_xname(sc->sc_dev), nexttx,
   5160 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   5161 			    dmamap->dm_segs[seg].ds_len));
   5162 		}
   5163 
   5164 		KASSERT(lasttx != -1);
   5165 
   5166 		/*
   5167 		 * Set up the command byte on the last descriptor of
   5168 		 * the packet.  If we're in the interrupt delay window,
   5169 		 * delay the interrupt.
   5170 		 */
   5171 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   5172 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   5173 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   5174 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   5175 
   5176 		txs->txs_lastdesc = lasttx;
   5177 
   5178 		DPRINTF(WM_DEBUG_TX,
   5179 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   5180 		    device_xname(sc->sc_dev),
   5181 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   5182 
   5183 		/* Sync the descriptors we're using. */
   5184 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   5185 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5186 
   5187 		/* Give the packet to the chip. */
   5188 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   5189 		sent = true;
   5190 
   5191 		DPRINTF(WM_DEBUG_TX,
   5192 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   5193 
   5194 		DPRINTF(WM_DEBUG_TX,
   5195 		    ("%s: TX: finished transmitting packet, job %d\n",
   5196 		    device_xname(sc->sc_dev), sc->sc_txsnext));
   5197 
   5198 		/* Advance the tx pointer. */
   5199 		sc->sc_txfree -= txs->txs_ndesc;
   5200 		sc->sc_txnext = nexttx;
   5201 
   5202 		sc->sc_txsfree--;
   5203 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   5204 
   5205 		/* Pass the packet to any BPF listeners. */
   5206 		bpf_mtap(ifp, m0);
   5207 	}
   5208 
   5209 	if (m0 != NULL) {
   5210 		ifp->if_flags |= IFF_OACTIVE;
   5211 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   5212 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   5213 		m_freem(m0);
   5214 	}
   5215 
   5216 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   5217 		/* No more slots; notify upper layer. */
   5218 		ifp->if_flags |= IFF_OACTIVE;
   5219 	}
   5220 
   5221 	if (sent) {
   5222 		/* Set a watchdog timer in case the chip flakes out. */
   5223 		ifp->if_timer = 5;
   5224 	}
   5225 }
   5226 
   5227 /* Interrupt */
   5228 
   5229 /*
   5230  * wm_txintr:
   5231  *
   5232  *	Helper; handle transmit interrupts.
   5233  */
   5234 static void
   5235 wm_txintr(struct wm_softc *sc)
   5236 {
   5237 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5238 	struct wm_txsoft *txs;
   5239 	uint8_t status;
   5240 	int i;
   5241 
   5242 	if (sc->sc_stopping)
   5243 		return;
   5244 
   5245 	ifp->if_flags &= ~IFF_OACTIVE;
   5246 
   5247 	/*
   5248 	 * Go through the Tx list and free mbufs for those
   5249 	 * frames which have been transmitted.
   5250 	 */
   5251 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
   5252 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
   5253 		txs = &sc->sc_txsoft[i];
   5254 
   5255 		DPRINTF(WM_DEBUG_TX,
   5256 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   5257 
   5258 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
   5259 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   5260 
   5261 		status =
   5262 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   5263 		if ((status & WTX_ST_DD) == 0) {
   5264 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   5265 			    BUS_DMASYNC_PREREAD);
   5266 			break;
   5267 		}
   5268 
   5269 		DPRINTF(WM_DEBUG_TX,
   5270 		    ("%s: TX: job %d done: descs %d..%d\n",
   5271 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   5272 		    txs->txs_lastdesc));
   5273 
   5274 		/*
   5275 		 * XXX We should probably be using the statistics
   5276 		 * XXX registers, but I don't know if they exist
   5277 		 * XXX on chips before the i82544.
   5278 		 */
   5279 
   5280 #ifdef WM_EVENT_COUNTERS
   5281 		if (status & WTX_ST_TU)
   5282 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   5283 #endif /* WM_EVENT_COUNTERS */
   5284 
   5285 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   5286 			ifp->if_oerrors++;
   5287 			if (status & WTX_ST_LC)
   5288 				log(LOG_WARNING, "%s: late collision\n",
   5289 				    device_xname(sc->sc_dev));
   5290 			else if (status & WTX_ST_EC) {
   5291 				ifp->if_collisions += 16;
   5292 				log(LOG_WARNING, "%s: excessive collisions\n",
   5293 				    device_xname(sc->sc_dev));
   5294 			}
   5295 		} else
   5296 			ifp->if_opackets++;
   5297 
   5298 		sc->sc_txfree += txs->txs_ndesc;
   5299 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   5300 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   5301 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   5302 		m_freem(txs->txs_mbuf);
   5303 		txs->txs_mbuf = NULL;
   5304 	}
   5305 
   5306 	/* Update the dirty transmit buffer pointer. */
   5307 	sc->sc_txsdirty = i;
   5308 	DPRINTF(WM_DEBUG_TX,
   5309 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   5310 
   5311 	/*
   5312 	 * If there are no more pending transmissions, cancel the watchdog
   5313 	 * timer.
   5314 	 */
   5315 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
   5316 		ifp->if_timer = 0;
   5317 }
   5318 
   5319 /*
   5320  * wm_rxintr:
   5321  *
   5322  *	Helper; handle receive interrupts.
   5323  */
   5324 static void
   5325 wm_rxintr(struct wm_softc *sc)
   5326 {
   5327 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5328 	struct wm_rxsoft *rxs;
   5329 	struct mbuf *m;
   5330 	int i, len;
   5331 	uint8_t status, errors;
   5332 	uint16_t vlantag;
   5333 
   5334 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   5335 		rxs = &sc->sc_rxsoft[i];
   5336 
   5337 		DPRINTF(WM_DEBUG_RX,
   5338 		    ("%s: RX: checking descriptor %d\n",
   5339 		    device_xname(sc->sc_dev), i));
   5340 
   5341 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   5342 
   5343 		status = sc->sc_rxdescs[i].wrx_status;
   5344 		errors = sc->sc_rxdescs[i].wrx_errors;
   5345 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   5346 		vlantag = sc->sc_rxdescs[i].wrx_special;
   5347 
   5348 		if ((status & WRX_ST_DD) == 0) {
   5349 			/* We have processed all of the receive descriptors. */
   5350 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   5351 			break;
   5352 		}
   5353 
   5354 		if (__predict_false(sc->sc_rxdiscard)) {
   5355 			DPRINTF(WM_DEBUG_RX,
   5356 			    ("%s: RX: discarding contents of descriptor %d\n",
   5357 			    device_xname(sc->sc_dev), i));
   5358 			WM_INIT_RXDESC(sc, i);
   5359 			if (status & WRX_ST_EOP) {
   5360 				/* Reset our state. */
   5361 				DPRINTF(WM_DEBUG_RX,
   5362 				    ("%s: RX: resetting rxdiscard -> 0\n",
   5363 				    device_xname(sc->sc_dev)));
   5364 				sc->sc_rxdiscard = 0;
   5365 			}
   5366 			continue;
   5367 		}
   5368 
   5369 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5370 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   5371 
   5372 		m = rxs->rxs_mbuf;
   5373 
   5374 		/*
   5375 		 * Add a new receive buffer to the ring, unless of
   5376 		 * course the length is zero. Treat the latter as a
   5377 		 * failed mapping.
   5378 		 */
   5379 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
   5380 			/*
   5381 			 * Failed, throw away what we've done so
   5382 			 * far, and discard the rest of the packet.
   5383 			 */
   5384 			ifp->if_ierrors++;
   5385 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5386 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5387 			WM_INIT_RXDESC(sc, i);
   5388 			if ((status & WRX_ST_EOP) == 0)
   5389 				sc->sc_rxdiscard = 1;
   5390 			if (sc->sc_rxhead != NULL)
   5391 				m_freem(sc->sc_rxhead);
   5392 			WM_RXCHAIN_RESET(sc);
   5393 			DPRINTF(WM_DEBUG_RX,
   5394 			    ("%s: RX: Rx buffer allocation failed, "
   5395 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   5396 			    sc->sc_rxdiscard ? " (discard)" : ""));
   5397 			continue;
   5398 		}
   5399 
   5400 		m->m_len = len;
   5401 		sc->sc_rxlen += len;
   5402 		DPRINTF(WM_DEBUG_RX,
   5403 		    ("%s: RX: buffer at %p len %d\n",
   5404 		    device_xname(sc->sc_dev), m->m_data, len));
   5405 
   5406 		/* If this is not the end of the packet, keep looking. */
   5407 		if ((status & WRX_ST_EOP) == 0) {
   5408 			WM_RXCHAIN_LINK(sc, m);
   5409 			DPRINTF(WM_DEBUG_RX,
   5410 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   5411 			    device_xname(sc->sc_dev), sc->sc_rxlen));
   5412 			continue;
   5413 		}
   5414 
   5415 		/*
   5416 		 * Okay, we have the entire packet now.  The chip is
   5417 		 * configured to include the FCS except I350 and I21[01]
   5418 		 * (not all chips can be configured to strip it),
   5419 		 * so we need to trim it.
   5420 		 * May need to adjust length of previous mbuf in the
   5421 		 * chain if the current mbuf is too short.
   5422 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   5423 		 * is always set in I350, so we don't trim it.
   5424 		 */
   5425 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   5426 		    && (sc->sc_type != WM_T_I210)
   5427 		    && (sc->sc_type != WM_T_I211)) {
   5428 			if (m->m_len < ETHER_CRC_LEN) {
   5429 				sc->sc_rxtail->m_len
   5430 				    -= (ETHER_CRC_LEN - m->m_len);
   5431 				m->m_len = 0;
   5432 			} else
   5433 				m->m_len -= ETHER_CRC_LEN;
   5434 			len = sc->sc_rxlen - ETHER_CRC_LEN;
   5435 		} else
   5436 			len = sc->sc_rxlen;
   5437 
   5438 		WM_RXCHAIN_LINK(sc, m);
   5439 
   5440 		*sc->sc_rxtailp = NULL;
   5441 		m = sc->sc_rxhead;
   5442 
   5443 		WM_RXCHAIN_RESET(sc);
   5444 
   5445 		DPRINTF(WM_DEBUG_RX,
   5446 		    ("%s: RX: have entire packet, len -> %d\n",
   5447 		    device_xname(sc->sc_dev), len));
   5448 
   5449 		/* If an error occurred, update stats and drop the packet. */
   5450 		if (errors &
   5451 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   5452 			if (errors & WRX_ER_SE)
   5453 				log(LOG_WARNING, "%s: symbol error\n",
   5454 				    device_xname(sc->sc_dev));
   5455 			else if (errors & WRX_ER_SEQ)
   5456 				log(LOG_WARNING, "%s: receive sequence error\n",
   5457 				    device_xname(sc->sc_dev));
   5458 			else if (errors & WRX_ER_CE)
   5459 				log(LOG_WARNING, "%s: CRC error\n",
   5460 				    device_xname(sc->sc_dev));
   5461 			m_freem(m);
   5462 			continue;
   5463 		}
   5464 
   5465 		/* No errors.  Receive the packet. */
   5466 		m->m_pkthdr.rcvif = ifp;
   5467 		m->m_pkthdr.len = len;
   5468 
   5469 		/*
   5470 		 * If VLANs are enabled, VLAN packets have been unwrapped
   5471 		 * for us.  Associate the tag with the packet.
   5472 		 */
   5473 		/* XXXX should check for i350 and i354 */
   5474 		if ((status & WRX_ST_VP) != 0) {
   5475 			VLAN_INPUT_TAG(ifp, m,
   5476 			    le16toh(vlantag),
   5477 			    continue);
   5478 		}
   5479 
   5480 		/* Set up checksum info for this packet. */
   5481 		if ((status & WRX_ST_IXSM) == 0) {
   5482 			if (status & WRX_ST_IPCS) {
   5483 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   5484 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   5485 				if (errors & WRX_ER_IPE)
   5486 					m->m_pkthdr.csum_flags |=
   5487 					    M_CSUM_IPv4_BAD;
   5488 			}
   5489 			if (status & WRX_ST_TCPCS) {
   5490 				/*
   5491 				 * Note: we don't know if this was TCP or UDP,
   5492 				 * so we just set both bits, and expect the
   5493 				 * upper layers to deal.
   5494 				 */
   5495 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   5496 				m->m_pkthdr.csum_flags |=
   5497 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   5498 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   5499 				if (errors & WRX_ER_TCPE)
   5500 					m->m_pkthdr.csum_flags |=
   5501 					    M_CSUM_TCP_UDP_BAD;
   5502 			}
   5503 		}
   5504 
   5505 		ifp->if_ipackets++;
   5506 
   5507 		WM_RX_UNLOCK(sc);
   5508 
   5509 		/* Pass this up to any BPF listeners. */
   5510 		bpf_mtap(ifp, m);
   5511 
   5512 		/* Pass it on. */
   5513 		(*ifp->if_input)(ifp, m);
   5514 
   5515 		WM_RX_LOCK(sc);
   5516 
   5517 		if (sc->sc_stopping)
   5518 			break;
   5519 	}
   5520 
   5521 	/* Update the receive pointer. */
   5522 	sc->sc_rxptr = i;
   5523 
   5524 	DPRINTF(WM_DEBUG_RX,
   5525 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   5526 }
   5527 
   5528 /*
   5529  * wm_linkintr_gmii:
   5530  *
   5531  *	Helper; handle link interrupts for GMII.
   5532  */
   5533 static void
   5534 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   5535 {
   5536 
   5537 	KASSERT(WM_TX_LOCKED(sc));
   5538 
   5539 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   5540 		__func__));
   5541 
   5542 	if (icr & ICR_LSC) {
   5543 		DPRINTF(WM_DEBUG_LINK,
   5544 		    ("%s: LINK: LSC -> mii_pollstat\n",
   5545 			device_xname(sc->sc_dev)));
   5546 		mii_pollstat(&sc->sc_mii);
   5547 		if (sc->sc_type == WM_T_82543) {
   5548 			int miistatus, active;
   5549 
   5550 			/*
   5551 			 * With 82543, we need to force speed and
   5552 			 * duplex on the MAC equal to what the PHY
   5553 			 * speed and duplex configuration is.
   5554 			 */
   5555 			miistatus = sc->sc_mii.mii_media_status;
   5556 
   5557 			if (miistatus & IFM_ACTIVE) {
   5558 				active = sc->sc_mii.mii_media_active;
   5559 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   5560 				switch (IFM_SUBTYPE(active)) {
   5561 				case IFM_10_T:
   5562 					sc->sc_ctrl |= CTRL_SPEED_10;
   5563 					break;
   5564 				case IFM_100_TX:
   5565 					sc->sc_ctrl |= CTRL_SPEED_100;
   5566 					break;
   5567 				case IFM_1000_T:
   5568 					sc->sc_ctrl |= CTRL_SPEED_1000;
   5569 					break;
   5570 				default:
   5571 					/*
   5572 					 * fiber?
   5573 					 * Shoud not enter here.
   5574 					 */
   5575 					printf("unknown media (%x)\n",
   5576 					    active);
   5577 					break;
   5578 				}
   5579 				if (active & IFM_FDX)
   5580 					sc->sc_ctrl |= CTRL_FD;
   5581 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5582 			}
   5583 		} else if ((sc->sc_type == WM_T_ICH8)
   5584 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   5585 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   5586 		} else if (sc->sc_type == WM_T_PCH) {
   5587 			wm_k1_gig_workaround_hv(sc,
   5588 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   5589 		}
   5590 
   5591 		if ((sc->sc_phytype == WMPHY_82578)
   5592 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   5593 			== IFM_1000_T)) {
   5594 
   5595 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   5596 				delay(200*1000); /* XXX too big */
   5597 
   5598 				/* Link stall fix for link up */
   5599 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   5600 				    HV_MUX_DATA_CTRL,
   5601 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   5602 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   5603 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   5604 				    HV_MUX_DATA_CTRL,
   5605 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   5606 			}
   5607 		}
   5608 	} else if (icr & ICR_RXSEQ) {
   5609 		DPRINTF(WM_DEBUG_LINK,
   5610 		    ("%s: LINK Receive sequence error\n",
   5611 			device_xname(sc->sc_dev)));
   5612 	}
   5613 }
   5614 
   5615 /*
   5616  * wm_linkintr_tbi:
   5617  *
   5618  *	Helper; handle link interrupts for TBI mode.
   5619  */
   5620 static void
   5621 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   5622 {
   5623 	uint32_t status;
   5624 
   5625 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   5626 		__func__));
   5627 
   5628 	status = CSR_READ(sc, WMREG_STATUS);
   5629 	if (icr & ICR_LSC) {
   5630 		if (status & STATUS_LU) {
   5631 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   5632 			    device_xname(sc->sc_dev),
   5633 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   5634 			/*
   5635 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   5636 			 * so we should update sc->sc_ctrl
   5637 			 */
   5638 
   5639 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5640 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   5641 			sc->sc_fcrtl &= ~FCRTL_XONE;
   5642 			if (status & STATUS_FD)
   5643 				sc->sc_tctl |=
   5644 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5645 			else
   5646 				sc->sc_tctl |=
   5647 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   5648 			if (sc->sc_ctrl & CTRL_TFCE)
   5649 				sc->sc_fcrtl |= FCRTL_XONE;
   5650 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5651 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   5652 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   5653 				      sc->sc_fcrtl);
   5654 			sc->sc_tbi_linkup = 1;
   5655 		} else {
   5656 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   5657 			    device_xname(sc->sc_dev)));
   5658 			sc->sc_tbi_linkup = 0;
   5659 		}
   5660 		wm_tbi_set_linkled(sc);
   5661 	} else if (icr & ICR_RXCFG) {
   5662 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
   5663 		    device_xname(sc->sc_dev)));
   5664 		sc->sc_tbi_nrxcfg++;
   5665 		wm_check_for_link(sc);
   5666 	} else if (icr & ICR_RXSEQ) {
   5667 		DPRINTF(WM_DEBUG_LINK,
   5668 		    ("%s: LINK: Receive sequence error\n",
   5669 		    device_xname(sc->sc_dev)));
   5670 	}
   5671 }
   5672 
   5673 /*
   5674  * wm_linkintr:
   5675  *
   5676  *	Helper; handle link interrupts.
   5677  */
   5678 static void
   5679 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   5680 {
   5681 
   5682 	if (sc->sc_flags & WM_F_HAS_MII)
   5683 		wm_linkintr_gmii(sc, icr);
   5684 	else
   5685 		wm_linkintr_tbi(sc, icr);
   5686 }
   5687 
   5688 /*
   5689  * wm_intr:
   5690  *
   5691  *	Interrupt service routine.
   5692  */
   5693 static int
   5694 wm_intr(void *arg)
   5695 {
   5696 	struct wm_softc *sc = arg;
   5697 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   5698 	uint32_t icr;
   5699 	int handled = 0;
   5700 
   5701 	while (1 /* CONSTCOND */) {
   5702 		icr = CSR_READ(sc, WMREG_ICR);
   5703 		if ((icr & sc->sc_icr) == 0)
   5704 			break;
   5705 		rnd_add_uint32(&sc->rnd_source, icr);
   5706 
   5707 		WM_RX_LOCK(sc);
   5708 
   5709 		if (sc->sc_stopping) {
   5710 			WM_RX_UNLOCK(sc);
   5711 			break;
   5712 		}
   5713 
   5714 		handled = 1;
   5715 
   5716 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   5717 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   5718 			DPRINTF(WM_DEBUG_RX,
   5719 			    ("%s: RX: got Rx intr 0x%08x\n",
   5720 			    device_xname(sc->sc_dev),
   5721 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   5722 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   5723 		}
   5724 #endif
   5725 		wm_rxintr(sc);
   5726 
   5727 		WM_RX_UNLOCK(sc);
   5728 		WM_TX_LOCK(sc);
   5729 
   5730 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   5731 		if (icr & ICR_TXDW) {
   5732 			DPRINTF(WM_DEBUG_TX,
   5733 			    ("%s: TX: got TXDW interrupt\n",
   5734 			    device_xname(sc->sc_dev)));
   5735 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   5736 		}
   5737 #endif
   5738 		wm_txintr(sc);
   5739 
   5740 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
   5741 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   5742 			wm_linkintr(sc, icr);
   5743 		}
   5744 
   5745 		WM_TX_UNLOCK(sc);
   5746 
   5747 		if (icr & ICR_RXO) {
   5748 #if defined(WM_DEBUG)
   5749 			log(LOG_WARNING, "%s: Receive overrun\n",
   5750 			    device_xname(sc->sc_dev));
   5751 #endif /* defined(WM_DEBUG) */
   5752 		}
   5753 	}
   5754 
   5755 	if (handled) {
   5756 		/* Try to get more packets going. */
   5757 		ifp->if_start(ifp);
   5758 	}
   5759 
   5760 	return handled;
   5761 }
   5762 
   5763 /*
   5764  * Media related.
   5765  * GMII, SGMII, TBI (and SERDES)
   5766  */
   5767 
   5768 /* GMII related */
   5769 
   5770 /*
   5771  * wm_gmii_reset:
   5772  *
   5773  *	Reset the PHY.
   5774  */
   5775 static void
   5776 wm_gmii_reset(struct wm_softc *sc)
   5777 {
   5778 	uint32_t reg;
   5779 	int rv;
   5780 
   5781 	/* get phy semaphore */
   5782 	switch (sc->sc_type) {
   5783 	case WM_T_82571:
   5784 	case WM_T_82572:
   5785 	case WM_T_82573:
   5786 	case WM_T_82574:
   5787 	case WM_T_82583:
   5788 		 /* XXX should get sw semaphore, too */
   5789 		rv = wm_get_swsm_semaphore(sc);
   5790 		break;
   5791 	case WM_T_82575:
   5792 	case WM_T_82576:
   5793 	case WM_T_82580:
   5794 	case WM_T_82580ER:
   5795 	case WM_T_I350:
   5796 	case WM_T_I354:
   5797 	case WM_T_I210:
   5798 	case WM_T_I211:
   5799 	case WM_T_80003:
   5800 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   5801 		break;
   5802 	case WM_T_ICH8:
   5803 	case WM_T_ICH9:
   5804 	case WM_T_ICH10:
   5805 	case WM_T_PCH:
   5806 	case WM_T_PCH2:
   5807 	case WM_T_PCH_LPT:
   5808 		rv = wm_get_swfwhw_semaphore(sc);
   5809 		break;
   5810 	default:
   5811 		/* nothing to do*/
   5812 		rv = 0;
   5813 		break;
   5814 	}
   5815 	if (rv != 0) {
   5816 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   5817 		    __func__);
   5818 		return;
   5819 	}
   5820 
   5821 	switch (sc->sc_type) {
   5822 	case WM_T_82542_2_0:
   5823 	case WM_T_82542_2_1:
   5824 		/* null */
   5825 		break;
   5826 	case WM_T_82543:
   5827 		/*
   5828 		 * With 82543, we need to force speed and duplex on the MAC
   5829 		 * equal to what the PHY speed and duplex configuration is.
   5830 		 * In addition, we need to perform a hardware reset on the PHY
   5831 		 * to take it out of reset.
   5832 		 */
   5833 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   5834 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5835 
   5836 		/* The PHY reset pin is active-low. */
   5837 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5838 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   5839 		    CTRL_EXT_SWDPIN(4));
   5840 		reg |= CTRL_EXT_SWDPIO(4);
   5841 
   5842 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5843 		CSR_WRITE_FLUSH(sc);
   5844 		delay(10*1000);
   5845 
   5846 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   5847 		CSR_WRITE_FLUSH(sc);
   5848 		delay(150);
   5849 #if 0
   5850 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   5851 #endif
   5852 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   5853 		break;
   5854 	case WM_T_82544:	/* reset 10000us */
   5855 	case WM_T_82540:
   5856 	case WM_T_82545:
   5857 	case WM_T_82545_3:
   5858 	case WM_T_82546:
   5859 	case WM_T_82546_3:
   5860 	case WM_T_82541:
   5861 	case WM_T_82541_2:
   5862 	case WM_T_82547:
   5863 	case WM_T_82547_2:
   5864 	case WM_T_82571:	/* reset 100us */
   5865 	case WM_T_82572:
   5866 	case WM_T_82573:
   5867 	case WM_T_82574:
   5868 	case WM_T_82575:
   5869 	case WM_T_82576:
   5870 	case WM_T_82580:
   5871 	case WM_T_82580ER:
   5872 	case WM_T_I350:
   5873 	case WM_T_I354:
   5874 	case WM_T_I210:
   5875 	case WM_T_I211:
   5876 	case WM_T_82583:
   5877 	case WM_T_80003:
   5878 		/* generic reset */
   5879 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   5880 		CSR_WRITE_FLUSH(sc);
   5881 		delay(20000);
   5882 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5883 		CSR_WRITE_FLUSH(sc);
   5884 		delay(20000);
   5885 
   5886 		if ((sc->sc_type == WM_T_82541)
   5887 		    || (sc->sc_type == WM_T_82541_2)
   5888 		    || (sc->sc_type == WM_T_82547)
   5889 		    || (sc->sc_type == WM_T_82547_2)) {
   5890 			/* workaround for igp are done in igp_reset() */
   5891 			/* XXX add code to set LED after phy reset */
   5892 		}
   5893 		break;
   5894 	case WM_T_ICH8:
   5895 	case WM_T_ICH9:
   5896 	case WM_T_ICH10:
   5897 	case WM_T_PCH:
   5898 	case WM_T_PCH2:
   5899 	case WM_T_PCH_LPT:
   5900 		/* generic reset */
   5901 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   5902 		CSR_WRITE_FLUSH(sc);
   5903 		delay(100);
   5904 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5905 		CSR_WRITE_FLUSH(sc);
   5906 		delay(150);
   5907 		break;
   5908 	default:
   5909 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   5910 		    __func__);
   5911 		break;
   5912 	}
   5913 
   5914 	/* release PHY semaphore */
   5915 	switch (sc->sc_type) {
   5916 	case WM_T_82571:
   5917 	case WM_T_82572:
   5918 	case WM_T_82573:
   5919 	case WM_T_82574:
   5920 	case WM_T_82583:
   5921 		 /* XXX should put sw semaphore, too */
   5922 		wm_put_swsm_semaphore(sc);
   5923 		break;
   5924 	case WM_T_82575:
   5925 	case WM_T_82576:
   5926 	case WM_T_82580:
   5927 	case WM_T_82580ER:
   5928 	case WM_T_I350:
   5929 	case WM_T_I354:
   5930 	case WM_T_I210:
   5931 	case WM_T_I211:
   5932 	case WM_T_80003:
   5933 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   5934 		break;
   5935 	case WM_T_ICH8:
   5936 	case WM_T_ICH9:
   5937 	case WM_T_ICH10:
   5938 	case WM_T_PCH:
   5939 	case WM_T_PCH2:
   5940 	case WM_T_PCH_LPT:
   5941 		wm_put_swfwhw_semaphore(sc);
   5942 		break;
   5943 	default:
   5944 		/* nothing to do*/
   5945 		rv = 0;
   5946 		break;
   5947 	}
   5948 
   5949 	/* get_cfg_done */
   5950 	wm_get_cfg_done(sc);
   5951 
   5952 	/* extra setup */
   5953 	switch (sc->sc_type) {
   5954 	case WM_T_82542_2_0:
   5955 	case WM_T_82542_2_1:
   5956 	case WM_T_82543:
   5957 	case WM_T_82544:
   5958 	case WM_T_82540:
   5959 	case WM_T_82545:
   5960 	case WM_T_82545_3:
   5961 	case WM_T_82546:
   5962 	case WM_T_82546_3:
   5963 	case WM_T_82541_2:
   5964 	case WM_T_82547_2:
   5965 	case WM_T_82571:
   5966 	case WM_T_82572:
   5967 	case WM_T_82573:
   5968 	case WM_T_82574:
   5969 	case WM_T_82575:
   5970 	case WM_T_82576:
   5971 	case WM_T_82580:
   5972 	case WM_T_82580ER:
   5973 	case WM_T_I350:
   5974 	case WM_T_I354:
   5975 	case WM_T_I210:
   5976 	case WM_T_I211:
   5977 	case WM_T_82583:
   5978 	case WM_T_80003:
   5979 		/* null */
   5980 		break;
   5981 	case WM_T_82541:
   5982 	case WM_T_82547:
   5983 		/* XXX Configure actively LED after PHY reset */
   5984 		break;
   5985 	case WM_T_ICH8:
   5986 	case WM_T_ICH9:
   5987 	case WM_T_ICH10:
   5988 	case WM_T_PCH:
   5989 	case WM_T_PCH2:
   5990 	case WM_T_PCH_LPT:
   5991 		/* Allow time for h/w to get to a quiescent state afer reset */
   5992 		delay(10*1000);
   5993 
   5994 		if (sc->sc_type == WM_T_PCH)
   5995 			wm_hv_phy_workaround_ich8lan(sc);
   5996 
   5997 		if (sc->sc_type == WM_T_PCH2)
   5998 			wm_lv_phy_workaround_ich8lan(sc);
   5999 
   6000 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   6001 			/*
   6002 			 * dummy read to clear the phy wakeup bit after lcd
   6003 			 * reset
   6004 			 */
   6005 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   6006 		}
   6007 
   6008 		/*
   6009 		 * XXX Configure the LCD with th extended configuration region
   6010 		 * in NVM
   6011 		 */
   6012 
   6013 		/* Configure the LCD with the OEM bits in NVM */
   6014 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   6015 		    || (sc->sc_type == WM_T_PCH_LPT)) {
   6016 			/*
   6017 			 * Disable LPLU.
   6018 			 * XXX It seems that 82567 has LPLU, too.
   6019 			 */
   6020 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   6021 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
   6022 			reg |= HV_OEM_BITS_ANEGNOW;
   6023 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   6024 		}
   6025 		break;
   6026 	default:
   6027 		panic("%s: unknown type\n", __func__);
   6028 		break;
   6029 	}
   6030 }
   6031 
   6032 /*
   6033  * wm_get_phy_id_82575:
   6034  *
   6035  * Return PHY ID. Return -1 if it failed.
   6036  */
   6037 static int
   6038 wm_get_phy_id_82575(struct wm_softc *sc)
   6039 {
   6040 	uint32_t reg;
   6041 	int phyid = -1;
   6042 
   6043 	/* XXX */
   6044 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   6045 		return -1;
   6046 
   6047 	if (wm_sgmii_uses_mdio(sc)) {
   6048 		switch (sc->sc_type) {
   6049 		case WM_T_82575:
   6050 		case WM_T_82576:
   6051 			reg = CSR_READ(sc, WMREG_MDIC);
   6052 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   6053 			break;
   6054 		case WM_T_82580:
   6055 		case WM_T_I350:
   6056 		case WM_T_I354:
   6057 		case WM_T_I210:
   6058 		case WM_T_I211:
   6059 			reg = CSR_READ(sc, WMREG_MDICNFG);
   6060 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   6061 			break;
   6062 		default:
   6063 			return -1;
   6064 		}
   6065 	}
   6066 
   6067 	return phyid;
   6068 }
   6069 
   6070 
   6071 /*
   6072  * wm_gmii_mediainit:
   6073  *
   6074  *	Initialize media for use on 1000BASE-T devices.
   6075  */
   6076 static void
   6077 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   6078 {
   6079 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6080 	struct mii_data *mii = &sc->sc_mii;
   6081 	uint32_t reg;
   6082 
   6083 	/* We have MII. */
   6084 	sc->sc_flags |= WM_F_HAS_MII;
   6085 
   6086 	if (sc->sc_type == WM_T_80003)
   6087 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   6088 	else
   6089 		sc->sc_tipg = TIPG_1000T_DFLT;
   6090 
   6091 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   6092 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   6093 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   6094 	    || (sc->sc_type == WM_T_I211)) {
   6095 		reg = CSR_READ(sc, WMREG_PHPM);
   6096 		reg &= ~PHPM_GO_LINK_D;
   6097 		CSR_WRITE(sc, WMREG_PHPM, reg);
   6098 	}
   6099 
   6100 	/*
   6101 	 * Let the chip set speed/duplex on its own based on
   6102 	 * signals from the PHY.
   6103 	 * XXXbouyer - I'm not sure this is right for the 80003,
   6104 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   6105 	 */
   6106 	sc->sc_ctrl |= CTRL_SLU;
   6107 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6108 
   6109 	/* Initialize our media structures and probe the GMII. */
   6110 	mii->mii_ifp = ifp;
   6111 
   6112 	/*
   6113 	 * Determine the PHY access method.
   6114 	 *
   6115 	 *  For SGMII, use SGMII specific method.
   6116 	 *
   6117 	 *  For some devices, we can determine the PHY access method
   6118 	 * from sc_type.
   6119 	 *
   6120 	 *  For ICH8 variants, it's difficult to detemine the PHY access
   6121 	 * method by sc_type, so use the PCI product ID for some devices.
   6122 	 * For other ICH8 variants, try to use igp's method. If the PHY
   6123 	 * can't detect, then use bm's method.
   6124 	 */
   6125 	switch (prodid) {
   6126 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   6127 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   6128 		/* 82577 */
   6129 		sc->sc_phytype = WMPHY_82577;
   6130 		mii->mii_readreg = wm_gmii_hv_readreg;
   6131 		mii->mii_writereg = wm_gmii_hv_writereg;
   6132 		break;
   6133 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   6134 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   6135 		/* 82578 */
   6136 		sc->sc_phytype = WMPHY_82578;
   6137 		mii->mii_readreg = wm_gmii_hv_readreg;
   6138 		mii->mii_writereg = wm_gmii_hv_writereg;
   6139 		break;
   6140 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   6141 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   6142 		/* 82579 */
   6143 		sc->sc_phytype = WMPHY_82579;
   6144 		mii->mii_readreg = wm_gmii_hv_readreg;
   6145 		mii->mii_writereg = wm_gmii_hv_writereg;
   6146 		break;
   6147 	case PCI_PRODUCT_INTEL_I217_LM:
   6148 	case PCI_PRODUCT_INTEL_I217_V:
   6149 	case PCI_PRODUCT_INTEL_I218_LM:
   6150 	case PCI_PRODUCT_INTEL_I218_V:
   6151 		/* I21[78] */
   6152 		mii->mii_readreg = wm_gmii_hv_readreg;
   6153 		mii->mii_writereg = wm_gmii_hv_writereg;
   6154 		break;
   6155 	case PCI_PRODUCT_INTEL_82801I_BM:
   6156 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   6157 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   6158 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   6159 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   6160 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   6161 		/* 82567 */
   6162 		sc->sc_phytype = WMPHY_BM;
   6163 		mii->mii_readreg = wm_gmii_bm_readreg;
   6164 		mii->mii_writereg = wm_gmii_bm_writereg;
   6165 		break;
   6166 	default:
   6167 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   6168 		    && !wm_sgmii_uses_mdio(sc)){
   6169 			mii->mii_readreg = wm_sgmii_readreg;
   6170 			mii->mii_writereg = wm_sgmii_writereg;
   6171 		} else if (sc->sc_type >= WM_T_80003) {
   6172 			mii->mii_readreg = wm_gmii_i80003_readreg;
   6173 			mii->mii_writereg = wm_gmii_i80003_writereg;
   6174 		} else if (sc->sc_type >= WM_T_I210) {
   6175 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6176 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6177 		} else if (sc->sc_type >= WM_T_82580) {
   6178 			sc->sc_phytype = WMPHY_82580;
   6179 			mii->mii_readreg = wm_gmii_82580_readreg;
   6180 			mii->mii_writereg = wm_gmii_82580_writereg;
   6181 		} else if (sc->sc_type >= WM_T_82544) {
   6182 			mii->mii_readreg = wm_gmii_i82544_readreg;
   6183 			mii->mii_writereg = wm_gmii_i82544_writereg;
   6184 		} else {
   6185 			mii->mii_readreg = wm_gmii_i82543_readreg;
   6186 			mii->mii_writereg = wm_gmii_i82543_writereg;
   6187 		}
   6188 		break;
   6189 	}
   6190 	mii->mii_statchg = wm_gmii_statchg;
   6191 
   6192 	wm_gmii_reset(sc);
   6193 
   6194 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   6195 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   6196 	    wm_gmii_mediastatus);
   6197 
   6198 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   6199 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
   6200 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6201 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   6202 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   6203 			/* Attach only one port */
   6204 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   6205 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6206 		} else {
   6207 			int i, id;
   6208 			uint32_t ctrl_ext;
   6209 
   6210 			id = wm_get_phy_id_82575(sc);
   6211 			if (id != -1) {
   6212 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   6213 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   6214 			}
   6215 			if ((id == -1)
   6216 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6217 				/* Power on sgmii phy if it is disabled */
   6218 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   6219 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   6220 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   6221 				CSR_WRITE_FLUSH(sc);
   6222 				delay(300*1000); /* XXX too long */
   6223 
   6224 				/* from 1 to 8 */
   6225 				for (i = 1; i < 8; i++)
   6226 					mii_attach(sc->sc_dev, &sc->sc_mii,
   6227 					    0xffffffff, i, MII_OFFSET_ANY,
   6228 					    MIIF_DOPAUSE);
   6229 
   6230 				/* restore previous sfp cage power state */
   6231 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   6232 			}
   6233 		}
   6234 	} else {
   6235 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6236 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6237 	}
   6238 
   6239 	/*
   6240 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   6241 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   6242 	 */
   6243 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   6244 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   6245 		wm_set_mdio_slow_mode_hv(sc);
   6246 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6247 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6248 	}
   6249 
   6250 	/*
   6251 	 * (For ICH8 variants)
   6252 	 * If PHY detection failed, use BM's r/w function and retry.
   6253 	 */
   6254 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6255 		/* if failed, retry with *_bm_* */
   6256 		mii->mii_readreg = wm_gmii_bm_readreg;
   6257 		mii->mii_writereg = wm_gmii_bm_writereg;
   6258 
   6259 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   6260 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   6261 	}
   6262 
   6263 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   6264 		/* Any PHY wasn't find */
   6265 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   6266 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   6267 		sc->sc_phytype = WMPHY_NONE;
   6268 	} else {
   6269 		/*
   6270 		 * PHY Found!
   6271 		 * Check PHY type.
   6272 		 */
   6273 		uint32_t model;
   6274 		struct mii_softc *child;
   6275 
   6276 		child = LIST_FIRST(&mii->mii_phys);
   6277 		if (device_is_a(child->mii_dev, "igphy")) {
   6278 			struct igphy_softc *isc = (struct igphy_softc *)child;
   6279 
   6280 			model = isc->sc_mii.mii_mpd_model;
   6281 			if (model == MII_MODEL_yyINTEL_I82566)
   6282 				sc->sc_phytype = WMPHY_IGP_3;
   6283 		}
   6284 
   6285 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   6286 	}
   6287 }
   6288 
   6289 /*
   6290  * wm_gmii_mediastatus:	[ifmedia interface function]
   6291  *
   6292  *	Get the current interface media status on a 1000BASE-T device.
   6293  */
   6294 static void
   6295 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   6296 {
   6297 	struct wm_softc *sc = ifp->if_softc;
   6298 
   6299 	ether_mediastatus(ifp, ifmr);
   6300 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   6301 	    | sc->sc_flowflags;
   6302 }
   6303 
   6304 /*
   6305  * wm_gmii_mediachange:	[ifmedia interface function]
   6306  *
   6307  *	Set hardware to newly-selected media on a 1000BASE-T device.
   6308  */
   6309 static int
   6310 wm_gmii_mediachange(struct ifnet *ifp)
   6311 {
   6312 	struct wm_softc *sc = ifp->if_softc;
   6313 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   6314 	int rc;
   6315 
   6316 	if ((ifp->if_flags & IFF_UP) == 0)
   6317 		return 0;
   6318 
   6319 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   6320 	sc->sc_ctrl |= CTRL_SLU;
   6321 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   6322 	    || (sc->sc_type > WM_T_82543)) {
   6323 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   6324 	} else {
   6325 		sc->sc_ctrl &= ~CTRL_ASDE;
   6326 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   6327 		if (ife->ifm_media & IFM_FDX)
   6328 			sc->sc_ctrl |= CTRL_FD;
   6329 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   6330 		case IFM_10_T:
   6331 			sc->sc_ctrl |= CTRL_SPEED_10;
   6332 			break;
   6333 		case IFM_100_TX:
   6334 			sc->sc_ctrl |= CTRL_SPEED_100;
   6335 			break;
   6336 		case IFM_1000_T:
   6337 			sc->sc_ctrl |= CTRL_SPEED_1000;
   6338 			break;
   6339 		default:
   6340 			panic("wm_gmii_mediachange: bad media 0x%x",
   6341 			    ife->ifm_media);
   6342 		}
   6343 	}
   6344 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6345 	if (sc->sc_type <= WM_T_82543)
   6346 		wm_gmii_reset(sc);
   6347 
   6348 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   6349 		return 0;
   6350 	return rc;
   6351 }
   6352 
   6353 #define	MDI_IO		CTRL_SWDPIN(2)
   6354 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   6355 #define	MDI_CLK		CTRL_SWDPIN(3)
   6356 
   6357 static void
   6358 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   6359 {
   6360 	uint32_t i, v;
   6361 
   6362 	v = CSR_READ(sc, WMREG_CTRL);
   6363 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6364 	v |= MDI_DIR | CTRL_SWDPIO(3);
   6365 
   6366 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   6367 		if (data & i)
   6368 			v |= MDI_IO;
   6369 		else
   6370 			v &= ~MDI_IO;
   6371 		CSR_WRITE(sc, WMREG_CTRL, v);
   6372 		CSR_WRITE_FLUSH(sc);
   6373 		delay(10);
   6374 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6375 		CSR_WRITE_FLUSH(sc);
   6376 		delay(10);
   6377 		CSR_WRITE(sc, WMREG_CTRL, v);
   6378 		CSR_WRITE_FLUSH(sc);
   6379 		delay(10);
   6380 	}
   6381 }
   6382 
   6383 static uint32_t
   6384 wm_i82543_mii_recvbits(struct wm_softc *sc)
   6385 {
   6386 	uint32_t v, i, data = 0;
   6387 
   6388 	v = CSR_READ(sc, WMREG_CTRL);
   6389 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   6390 	v |= CTRL_SWDPIO(3);
   6391 
   6392 	CSR_WRITE(sc, WMREG_CTRL, v);
   6393 	CSR_WRITE_FLUSH(sc);
   6394 	delay(10);
   6395 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6396 	CSR_WRITE_FLUSH(sc);
   6397 	delay(10);
   6398 	CSR_WRITE(sc, WMREG_CTRL, v);
   6399 	CSR_WRITE_FLUSH(sc);
   6400 	delay(10);
   6401 
   6402 	for (i = 0; i < 16; i++) {
   6403 		data <<= 1;
   6404 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6405 		CSR_WRITE_FLUSH(sc);
   6406 		delay(10);
   6407 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   6408 			data |= 1;
   6409 		CSR_WRITE(sc, WMREG_CTRL, v);
   6410 		CSR_WRITE_FLUSH(sc);
   6411 		delay(10);
   6412 	}
   6413 
   6414 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   6415 	CSR_WRITE_FLUSH(sc);
   6416 	delay(10);
   6417 	CSR_WRITE(sc, WMREG_CTRL, v);
   6418 	CSR_WRITE_FLUSH(sc);
   6419 	delay(10);
   6420 
   6421 	return data;
   6422 }
   6423 
   6424 #undef MDI_IO
   6425 #undef MDI_DIR
   6426 #undef MDI_CLK
   6427 
   6428 /*
   6429  * wm_gmii_i82543_readreg:	[mii interface function]
   6430  *
   6431  *	Read a PHY register on the GMII (i82543 version).
   6432  */
   6433 static int
   6434 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   6435 {
   6436 	struct wm_softc *sc = device_private(self);
   6437 	int rv;
   6438 
   6439 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6440 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   6441 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   6442 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   6443 
   6444 	DPRINTF(WM_DEBUG_GMII,
   6445 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   6446 	    device_xname(sc->sc_dev), phy, reg, rv));
   6447 
   6448 	return rv;
   6449 }
   6450 
   6451 /*
   6452  * wm_gmii_i82543_writereg:	[mii interface function]
   6453  *
   6454  *	Write a PHY register on the GMII (i82543 version).
   6455  */
   6456 static void
   6457 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   6458 {
   6459 	struct wm_softc *sc = device_private(self);
   6460 
   6461 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   6462 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   6463 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   6464 	    (MII_COMMAND_START << 30), 32);
   6465 }
   6466 
   6467 /*
   6468  * wm_gmii_i82544_readreg:	[mii interface function]
   6469  *
   6470  *	Read a PHY register on the GMII.
   6471  */
   6472 static int
   6473 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   6474 {
   6475 	struct wm_softc *sc = device_private(self);
   6476 	uint32_t mdic = 0;
   6477 	int i, rv;
   6478 
   6479 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   6480 	    MDIC_REGADD(reg));
   6481 
   6482 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   6483 		mdic = CSR_READ(sc, WMREG_MDIC);
   6484 		if (mdic & MDIC_READY)
   6485 			break;
   6486 		delay(50);
   6487 	}
   6488 
   6489 	if ((mdic & MDIC_READY) == 0) {
   6490 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   6491 		    device_xname(sc->sc_dev), phy, reg);
   6492 		rv = 0;
   6493 	} else if (mdic & MDIC_E) {
   6494 #if 0 /* This is normal if no PHY is present. */
   6495 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   6496 		    device_xname(sc->sc_dev), phy, reg);
   6497 #endif
   6498 		rv = 0;
   6499 	} else {
   6500 		rv = MDIC_DATA(mdic);
   6501 		if (rv == 0xffff)
   6502 			rv = 0;
   6503 	}
   6504 
   6505 	return rv;
   6506 }
   6507 
   6508 /*
   6509  * wm_gmii_i82544_writereg:	[mii interface function]
   6510  *
   6511  *	Write a PHY register on the GMII.
   6512  */
   6513 static void
   6514 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   6515 {
   6516 	struct wm_softc *sc = device_private(self);
   6517 	uint32_t mdic = 0;
   6518 	int i;
   6519 
   6520 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   6521 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   6522 
   6523 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   6524 		mdic = CSR_READ(sc, WMREG_MDIC);
   6525 		if (mdic & MDIC_READY)
   6526 			break;
   6527 		delay(50);
   6528 	}
   6529 
   6530 	if ((mdic & MDIC_READY) == 0)
   6531 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   6532 		    device_xname(sc->sc_dev), phy, reg);
   6533 	else if (mdic & MDIC_E)
   6534 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   6535 		    device_xname(sc->sc_dev), phy, reg);
   6536 }
   6537 
   6538 /*
   6539  * wm_gmii_i80003_readreg:	[mii interface function]
   6540  *
   6541  *	Read a PHY register on the kumeran
   6542  * This could be handled by the PHY layer if we didn't have to lock the
   6543  * ressource ...
   6544  */
   6545 static int
   6546 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   6547 {
   6548 	struct wm_softc *sc = device_private(self);
   6549 	int sem;
   6550 	int rv;
   6551 
   6552 	if (phy != 1) /* only one PHY on kumeran bus */
   6553 		return 0;
   6554 
   6555 	sem = swfwphysem[sc->sc_funcid];
   6556 	if (wm_get_swfw_semaphore(sc, sem)) {
   6557 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6558 		    __func__);
   6559 		return 0;
   6560 	}
   6561 
   6562 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   6563 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   6564 		    reg >> GG82563_PAGE_SHIFT);
   6565 	} else {
   6566 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   6567 		    reg >> GG82563_PAGE_SHIFT);
   6568 	}
   6569 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   6570 	delay(200);
   6571 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   6572 	delay(200);
   6573 
   6574 	wm_put_swfw_semaphore(sc, sem);
   6575 	return rv;
   6576 }
   6577 
   6578 /*
   6579  * wm_gmii_i80003_writereg:	[mii interface function]
   6580  *
   6581  *	Write a PHY register on the kumeran.
   6582  * This could be handled by the PHY layer if we didn't have to lock the
   6583  * ressource ...
   6584  */
   6585 static void
   6586 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   6587 {
   6588 	struct wm_softc *sc = device_private(self);
   6589 	int sem;
   6590 
   6591 	if (phy != 1) /* only one PHY on kumeran bus */
   6592 		return;
   6593 
   6594 	sem = swfwphysem[sc->sc_funcid];
   6595 	if (wm_get_swfw_semaphore(sc, sem)) {
   6596 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6597 		    __func__);
   6598 		return;
   6599 	}
   6600 
   6601 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   6602 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   6603 		    reg >> GG82563_PAGE_SHIFT);
   6604 	} else {
   6605 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   6606 		    reg >> GG82563_PAGE_SHIFT);
   6607 	}
   6608 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   6609 	delay(200);
   6610 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   6611 	delay(200);
   6612 
   6613 	wm_put_swfw_semaphore(sc, sem);
   6614 }
   6615 
   6616 /*
   6617  * wm_gmii_bm_readreg:	[mii interface function]
   6618  *
   6619  *	Read a PHY register on the kumeran
   6620  * This could be handled by the PHY layer if we didn't have to lock the
   6621  * ressource ...
   6622  */
   6623 static int
   6624 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   6625 {
   6626 	struct wm_softc *sc = device_private(self);
   6627 	int sem;
   6628 	int rv;
   6629 
   6630 	sem = swfwphysem[sc->sc_funcid];
   6631 	if (wm_get_swfw_semaphore(sc, sem)) {
   6632 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6633 		    __func__);
   6634 		return 0;
   6635 	}
   6636 
   6637 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   6638 		if (phy == 1)
   6639 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   6640 			    reg);
   6641 		else
   6642 			wm_gmii_i82544_writereg(self, phy,
   6643 			    GG82563_PHY_PAGE_SELECT,
   6644 			    reg >> GG82563_PAGE_SHIFT);
   6645 	}
   6646 
   6647 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   6648 	wm_put_swfw_semaphore(sc, sem);
   6649 	return rv;
   6650 }
   6651 
   6652 /*
   6653  * wm_gmii_bm_writereg:	[mii interface function]
   6654  *
   6655  *	Write a PHY register on the kumeran.
   6656  * This could be handled by the PHY layer if we didn't have to lock the
   6657  * ressource ...
   6658  */
   6659 static void
   6660 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   6661 {
   6662 	struct wm_softc *sc = device_private(self);
   6663 	int sem;
   6664 
   6665 	sem = swfwphysem[sc->sc_funcid];
   6666 	if (wm_get_swfw_semaphore(sc, sem)) {
   6667 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6668 		    __func__);
   6669 		return;
   6670 	}
   6671 
   6672 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   6673 		if (phy == 1)
   6674 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   6675 			    reg);
   6676 		else
   6677 			wm_gmii_i82544_writereg(self, phy,
   6678 			    GG82563_PHY_PAGE_SELECT,
   6679 			    reg >> GG82563_PAGE_SHIFT);
   6680 	}
   6681 
   6682 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   6683 	wm_put_swfw_semaphore(sc, sem);
   6684 }
   6685 
   6686 static void
   6687 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   6688 {
   6689 	struct wm_softc *sc = device_private(self);
   6690 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   6691 	uint16_t wuce;
   6692 
   6693 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   6694 	if (sc->sc_type == WM_T_PCH) {
   6695 		/* XXX e1000 driver do nothing... why? */
   6696 	}
   6697 
   6698 	/* Set page 769 */
   6699 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6700 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   6701 
   6702 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   6703 
   6704 	wuce &= ~BM_WUC_HOST_WU_BIT;
   6705 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   6706 	    wuce | BM_WUC_ENABLE_BIT);
   6707 
   6708 	/* Select page 800 */
   6709 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6710 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   6711 
   6712 	/* Write page 800 */
   6713 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   6714 
   6715 	if (rd)
   6716 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   6717 	else
   6718 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   6719 
   6720 	/* Set page 769 */
   6721 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6722 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   6723 
   6724 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   6725 }
   6726 
   6727 /*
   6728  * wm_gmii_hv_readreg:	[mii interface function]
   6729  *
   6730  *	Read a PHY register on the kumeran
   6731  * This could be handled by the PHY layer if we didn't have to lock the
   6732  * ressource ...
   6733  */
   6734 static int
   6735 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   6736 {
   6737 	struct wm_softc *sc = device_private(self);
   6738 	uint16_t page = BM_PHY_REG_PAGE(reg);
   6739 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   6740 	uint16_t val;
   6741 	int rv;
   6742 
   6743 	if (wm_get_swfwhw_semaphore(sc)) {
   6744 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6745 		    __func__);
   6746 		return 0;
   6747 	}
   6748 
   6749 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   6750 	if (sc->sc_phytype == WMPHY_82577) {
   6751 		/* XXX must write */
   6752 	}
   6753 
   6754 	/* Page 800 works differently than the rest so it has its own func */
   6755 	if (page == BM_WUC_PAGE) {
   6756 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   6757 		return val;
   6758 	}
   6759 
   6760 	/*
   6761 	 * Lower than page 768 works differently than the rest so it has its
   6762 	 * own func
   6763 	 */
   6764 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   6765 		printf("gmii_hv_readreg!!!\n");
   6766 		return 0;
   6767 	}
   6768 
   6769 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   6770 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6771 		    page << BME1000_PAGE_SHIFT);
   6772 	}
   6773 
   6774 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   6775 	wm_put_swfwhw_semaphore(sc);
   6776 	return rv;
   6777 }
   6778 
   6779 /*
   6780  * wm_gmii_hv_writereg:	[mii interface function]
   6781  *
   6782  *	Write a PHY register on the kumeran.
   6783  * This could be handled by the PHY layer if we didn't have to lock the
   6784  * ressource ...
   6785  */
   6786 static void
   6787 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   6788 {
   6789 	struct wm_softc *sc = device_private(self);
   6790 	uint16_t page = BM_PHY_REG_PAGE(reg);
   6791 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   6792 
   6793 	if (wm_get_swfwhw_semaphore(sc)) {
   6794 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6795 		    __func__);
   6796 		return;
   6797 	}
   6798 
   6799 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   6800 
   6801 	/* Page 800 works differently than the rest so it has its own func */
   6802 	if (page == BM_WUC_PAGE) {
   6803 		uint16_t tmp;
   6804 
   6805 		tmp = val;
   6806 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   6807 		return;
   6808 	}
   6809 
   6810 	/*
   6811 	 * Lower than page 768 works differently than the rest so it has its
   6812 	 * own func
   6813 	 */
   6814 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   6815 		printf("gmii_hv_writereg!!!\n");
   6816 		return;
   6817 	}
   6818 
   6819 	/*
   6820 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   6821 	 * Power Down (whenever bit 11 of the PHY control register is set)
   6822 	 */
   6823 
   6824 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   6825 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   6826 		    page << BME1000_PAGE_SHIFT);
   6827 	}
   6828 
   6829 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   6830 	wm_put_swfwhw_semaphore(sc);
   6831 }
   6832 
   6833 /*
   6834  * wm_gmii_82580_readreg:	[mii interface function]
   6835  *
   6836  *	Read a PHY register on the 82580 and I350.
   6837  * This could be handled by the PHY layer if we didn't have to lock the
   6838  * ressource ...
   6839  */
   6840 static int
   6841 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   6842 {
   6843 	struct wm_softc *sc = device_private(self);
   6844 	int sem;
   6845 	int rv;
   6846 
   6847 	sem = swfwphysem[sc->sc_funcid];
   6848 	if (wm_get_swfw_semaphore(sc, sem)) {
   6849 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6850 		    __func__);
   6851 		return 0;
   6852 	}
   6853 
   6854 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   6855 
   6856 	wm_put_swfw_semaphore(sc, sem);
   6857 	return rv;
   6858 }
   6859 
   6860 /*
   6861  * wm_gmii_82580_writereg:	[mii interface function]
   6862  *
   6863  *	Write a PHY register on the 82580 and I350.
   6864  * This could be handled by the PHY layer if we didn't have to lock the
   6865  * ressource ...
   6866  */
   6867 static void
   6868 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   6869 {
   6870 	struct wm_softc *sc = device_private(self);
   6871 	int sem;
   6872 
   6873 	sem = swfwphysem[sc->sc_funcid];
   6874 	if (wm_get_swfw_semaphore(sc, sem)) {
   6875 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   6876 		    __func__);
   6877 		return;
   6878 	}
   6879 
   6880 	wm_gmii_i82544_writereg(self, phy, reg, val);
   6881 
   6882 	wm_put_swfw_semaphore(sc, sem);
   6883 }
   6884 
   6885 /*
   6886  * wm_gmii_statchg:	[mii interface function]
   6887  *
   6888  *	Callback from MII layer when media changes.
   6889  */
   6890 static void
   6891 wm_gmii_statchg(struct ifnet *ifp)
   6892 {
   6893 	struct wm_softc *sc = ifp->if_softc;
   6894 	struct mii_data *mii = &sc->sc_mii;
   6895 
   6896 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   6897 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   6898 	sc->sc_fcrtl &= ~FCRTL_XONE;
   6899 
   6900 	/*
   6901 	 * Get flow control negotiation result.
   6902 	 */
   6903 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   6904 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   6905 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   6906 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   6907 	}
   6908 
   6909 	if (sc->sc_flowflags & IFM_FLOW) {
   6910 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   6911 			sc->sc_ctrl |= CTRL_TFCE;
   6912 			sc->sc_fcrtl |= FCRTL_XONE;
   6913 		}
   6914 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   6915 			sc->sc_ctrl |= CTRL_RFCE;
   6916 	}
   6917 
   6918 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6919 		DPRINTF(WM_DEBUG_LINK,
   6920 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   6921 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6922 	} else {
   6923 		DPRINTF(WM_DEBUG_LINK,
   6924 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   6925 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   6926 	}
   6927 
   6928 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6929 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6930 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   6931 						 : WMREG_FCRTL, sc->sc_fcrtl);
   6932 	if (sc->sc_type == WM_T_80003) {
   6933 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   6934 		case IFM_1000_T:
   6935 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   6936 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   6937 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   6938 			break;
   6939 		default:
   6940 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   6941 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   6942 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   6943 			break;
   6944 		}
   6945 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6946 	}
   6947 }
   6948 
   6949 /*
   6950  * wm_kmrn_readreg:
   6951  *
   6952  *	Read a kumeran register
   6953  */
   6954 static int
   6955 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   6956 {
   6957 	int rv;
   6958 
   6959 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
   6960 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   6961 			aprint_error_dev(sc->sc_dev,
   6962 			    "%s: failed to get semaphore\n", __func__);
   6963 			return 0;
   6964 		}
   6965 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
   6966 		if (wm_get_swfwhw_semaphore(sc)) {
   6967 			aprint_error_dev(sc->sc_dev,
   6968 			    "%s: failed to get semaphore\n", __func__);
   6969 			return 0;
   6970 		}
   6971 	}
   6972 
   6973 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   6974 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   6975 	    KUMCTRLSTA_REN);
   6976 	CSR_WRITE_FLUSH(sc);
   6977 	delay(2);
   6978 
   6979 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   6980 
   6981 	if (sc->sc_flags == WM_F_LOCK_SWFW)
   6982 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   6983 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
   6984 		wm_put_swfwhw_semaphore(sc);
   6985 
   6986 	return rv;
   6987 }
   6988 
   6989 /*
   6990  * wm_kmrn_writereg:
   6991  *
   6992  *	Write a kumeran register
   6993  */
   6994 static void
   6995 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   6996 {
   6997 
   6998 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
   6999 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   7000 			aprint_error_dev(sc->sc_dev,
   7001 			    "%s: failed to get semaphore\n", __func__);
   7002 			return;
   7003 		}
   7004 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
   7005 		if (wm_get_swfwhw_semaphore(sc)) {
   7006 			aprint_error_dev(sc->sc_dev,
   7007 			    "%s: failed to get semaphore\n", __func__);
   7008 			return;
   7009 		}
   7010 	}
   7011 
   7012 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   7013 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   7014 	    (val & KUMCTRLSTA_MASK));
   7015 
   7016 	if (sc->sc_flags == WM_F_LOCK_SWFW)
   7017 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   7018 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
   7019 		wm_put_swfwhw_semaphore(sc);
   7020 }
   7021 
   7022 /* SGMII related */
   7023 
   7024 /*
   7025  * wm_sgmii_uses_mdio
   7026  *
   7027  * Check whether the transaction is to the internal PHY or the external
   7028  * MDIO interface. Return true if it's MDIO.
   7029  */
   7030 static bool
   7031 wm_sgmii_uses_mdio(struct wm_softc *sc)
   7032 {
   7033 	uint32_t reg;
   7034 	bool ismdio = false;
   7035 
   7036 	switch (sc->sc_type) {
   7037 	case WM_T_82575:
   7038 	case WM_T_82576:
   7039 		reg = CSR_READ(sc, WMREG_MDIC);
   7040 		ismdio = ((reg & MDIC_DEST) != 0);
   7041 		break;
   7042 	case WM_T_82580:
   7043 	case WM_T_82580ER:
   7044 	case WM_T_I350:
   7045 	case WM_T_I354:
   7046 	case WM_T_I210:
   7047 	case WM_T_I211:
   7048 		reg = CSR_READ(sc, WMREG_MDICNFG);
   7049 		ismdio = ((reg & MDICNFG_DEST) != 0);
   7050 		break;
   7051 	default:
   7052 		break;
   7053 	}
   7054 
   7055 	return ismdio;
   7056 }
   7057 
   7058 /*
   7059  * wm_sgmii_readreg:	[mii interface function]
   7060  *
   7061  *	Read a PHY register on the SGMII
   7062  * This could be handled by the PHY layer if we didn't have to lock the
   7063  * ressource ...
   7064  */
   7065 static int
   7066 wm_sgmii_readreg(device_t self, int phy, int reg)
   7067 {
   7068 	struct wm_softc *sc = device_private(self);
   7069 	uint32_t i2ccmd;
   7070 	int i, rv;
   7071 
   7072 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7073 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7074 		    __func__);
   7075 		return 0;
   7076 	}
   7077 
   7078 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7079 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7080 	    | I2CCMD_OPCODE_READ;
   7081 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7082 
   7083 	/* Poll the ready bit */
   7084 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7085 		delay(50);
   7086 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7087 		if (i2ccmd & I2CCMD_READY)
   7088 			break;
   7089 	}
   7090 	if ((i2ccmd & I2CCMD_READY) == 0)
   7091 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   7092 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7093 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7094 
   7095 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   7096 
   7097 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7098 	return rv;
   7099 }
   7100 
   7101 /*
   7102  * wm_sgmii_writereg:	[mii interface function]
   7103  *
   7104  *	Write a PHY register on the SGMII.
   7105  * This could be handled by the PHY layer if we didn't have to lock the
   7106  * ressource ...
   7107  */
   7108 static void
   7109 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   7110 {
   7111 	struct wm_softc *sc = device_private(self);
   7112 	uint32_t i2ccmd;
   7113 	int i;
   7114 
   7115 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   7116 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7117 		    __func__);
   7118 		return;
   7119 	}
   7120 
   7121 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   7122 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   7123 	    | I2CCMD_OPCODE_WRITE;
   7124 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   7125 
   7126 	/* Poll the ready bit */
   7127 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   7128 		delay(50);
   7129 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   7130 		if (i2ccmd & I2CCMD_READY)
   7131 			break;
   7132 	}
   7133 	if ((i2ccmd & I2CCMD_READY) == 0)
   7134 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   7135 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   7136 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   7137 
   7138 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   7139 }
   7140 
   7141 /* TBI related */
   7142 
   7143 /* XXX Currently TBI only */
   7144 static int
   7145 wm_check_for_link(struct wm_softc *sc)
   7146 {
   7147 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7148 	uint32_t rxcw;
   7149 	uint32_t ctrl;
   7150 	uint32_t status;
   7151 	uint32_t sig;
   7152 
   7153 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
   7154 		sc->sc_tbi_linkup = 1;
   7155 		return 0;
   7156 	}
   7157 
   7158 	rxcw = CSR_READ(sc, WMREG_RXCW);
   7159 	ctrl = CSR_READ(sc, WMREG_CTRL);
   7160 	status = CSR_READ(sc, WMREG_STATUS);
   7161 
   7162 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   7163 
   7164 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   7165 		device_xname(sc->sc_dev), __func__,
   7166 		((ctrl & CTRL_SWDPIN(1)) == sig),
   7167 		((status & STATUS_LU) != 0),
   7168 		((rxcw & RXCW_C) != 0)
   7169 		    ));
   7170 
   7171 	/*
   7172 	 * SWDPIN   LU RXCW
   7173 	 *      0    0    0
   7174 	 *      0    0    1	(should not happen)
   7175 	 *      0    1    0	(should not happen)
   7176 	 *      0    1    1	(should not happen)
   7177 	 *      1    0    0	Disable autonego and force linkup
   7178 	 *      1    0    1	got /C/ but not linkup yet
   7179 	 *      1    1    0	(linkup)
   7180 	 *      1    1    1	If IFM_AUTO, back to autonego
   7181 	 *
   7182 	 */
   7183 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   7184 	    && ((status & STATUS_LU) == 0)
   7185 	    && ((rxcw & RXCW_C) == 0)) {
   7186 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   7187 			__func__));
   7188 		sc->sc_tbi_linkup = 0;
   7189 		/* Disable auto-negotiation in the TXCW register */
   7190 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   7191 
   7192 		/*
   7193 		 * Force link-up and also force full-duplex.
   7194 		 *
   7195 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   7196 		 * so we should update sc->sc_ctrl
   7197 		 */
   7198 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   7199 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7200 	} else if (((status & STATUS_LU) != 0)
   7201 	    && ((rxcw & RXCW_C) != 0)
   7202 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   7203 		sc->sc_tbi_linkup = 1;
   7204 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   7205 			__func__));
   7206 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7207 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   7208 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   7209 	    && ((rxcw & RXCW_C) != 0)) {
   7210 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   7211 	} else {
   7212 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   7213 			status));
   7214 	}
   7215 
   7216 	return 0;
   7217 }
   7218 
   7219 /*
   7220  * wm_tbi_mediainit:
   7221  *
   7222  *	Initialize media for use on 1000BASE-X devices.
   7223  */
   7224 static void
   7225 wm_tbi_mediainit(struct wm_softc *sc)
   7226 {
   7227 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7228 	const char *sep = "";
   7229 
   7230 	if (sc->sc_type < WM_T_82543)
   7231 		sc->sc_tipg = TIPG_WM_DFLT;
   7232 	else
   7233 		sc->sc_tipg = TIPG_LG_DFLT;
   7234 
   7235 	sc->sc_tbi_anegticks = 5;
   7236 
   7237 	/* Initialize our media structures */
   7238 	sc->sc_mii.mii_ifp = ifp;
   7239 
   7240 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   7241 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
   7242 	    wm_tbi_mediastatus);
   7243 
   7244 	/*
   7245 	 * SWD Pins:
   7246 	 *
   7247 	 *	0 = Link LED (output)
   7248 	 *	1 = Loss Of Signal (input)
   7249 	 */
   7250 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   7251 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   7252 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
   7253 		sc->sc_ctrl &= ~CTRL_LRST;
   7254 
   7255 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7256 
   7257 #define	ADD(ss, mm, dd)							\
   7258 do {									\
   7259 	aprint_normal("%s%s", sep, ss);					\
   7260 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   7261 	sep = ", ";							\
   7262 } while (/*CONSTCOND*/0)
   7263 
   7264 	aprint_normal_dev(sc->sc_dev, "");
   7265 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   7266 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   7267 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   7268 	aprint_normal("\n");
   7269 
   7270 #undef ADD
   7271 
   7272 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   7273 }
   7274 
   7275 /*
   7276  * wm_tbi_mediastatus:	[ifmedia interface function]
   7277  *
   7278  *	Get the current interface media status on a 1000BASE-X device.
   7279  */
   7280 static void
   7281 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   7282 {
   7283 	struct wm_softc *sc = ifp->if_softc;
   7284 	uint32_t ctrl, status;
   7285 
   7286 	ifmr->ifm_status = IFM_AVALID;
   7287 	ifmr->ifm_active = IFM_ETHER;
   7288 
   7289 	status = CSR_READ(sc, WMREG_STATUS);
   7290 	if ((status & STATUS_LU) == 0) {
   7291 		ifmr->ifm_active |= IFM_NONE;
   7292 		return;
   7293 	}
   7294 
   7295 	ifmr->ifm_status |= IFM_ACTIVE;
   7296 	ifmr->ifm_active |= IFM_1000_SX;
   7297 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   7298 		ifmr->ifm_active |= IFM_FDX;
   7299 	else
   7300 		ifmr->ifm_active |= IFM_HDX;
   7301 	ctrl = CSR_READ(sc, WMREG_CTRL);
   7302 	if (ctrl & CTRL_RFCE)
   7303 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   7304 	if (ctrl & CTRL_TFCE)
   7305 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   7306 }
   7307 
   7308 /*
   7309  * wm_tbi_mediachange:	[ifmedia interface function]
   7310  *
   7311  *	Set hardware to newly-selected media on a 1000BASE-X device.
   7312  */
   7313 static int
   7314 wm_tbi_mediachange(struct ifnet *ifp)
   7315 {
   7316 	struct wm_softc *sc = ifp->if_softc;
   7317 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7318 	uint32_t status;
   7319 	int i;
   7320 
   7321 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
   7322 		return 0;
   7323 
   7324 	sc->sc_txcw = 0;
   7325 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
   7326 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   7327 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   7328 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7329 		sc->sc_txcw |= TXCW_ANE;
   7330 	} else {
   7331 		/*
   7332 		 * If autonegotiation is turned off, force link up and turn on
   7333 		 * full duplex
   7334 		 */
   7335 		sc->sc_txcw &= ~TXCW_ANE;
   7336 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
   7337 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   7338 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7339 		CSR_WRITE_FLUSH(sc);
   7340 		delay(1000);
   7341 	}
   7342 
   7343 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   7344 		    device_xname(sc->sc_dev),sc->sc_txcw));
   7345 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7346 	CSR_WRITE_FLUSH(sc);
   7347 	delay(10000);
   7348 
   7349 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   7350 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   7351 
   7352 	/*
   7353 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   7354 	 * optics detect a signal, 0 if they don't.
   7355 	 */
   7356 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   7357 		/* Have signal; wait for the link to come up. */
   7358 
   7359 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7360 			/*
   7361 			 * Reset the link, and let autonegotiation do its thing
   7362 			 */
   7363 			sc->sc_ctrl |= CTRL_LRST;
   7364 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7365 			CSR_WRITE_FLUSH(sc);
   7366 			delay(1000);
   7367 			sc->sc_ctrl &= ~CTRL_LRST;
   7368 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7369 			CSR_WRITE_FLUSH(sc);
   7370 			delay(1000);
   7371 		}
   7372 
   7373 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   7374 			delay(10000);
   7375 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   7376 				break;
   7377 		}
   7378 
   7379 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   7380 			    device_xname(sc->sc_dev),i));
   7381 
   7382 		status = CSR_READ(sc, WMREG_STATUS);
   7383 		DPRINTF(WM_DEBUG_LINK,
   7384 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   7385 			device_xname(sc->sc_dev),status, STATUS_LU));
   7386 		if (status & STATUS_LU) {
   7387 			/* Link is up. */
   7388 			DPRINTF(WM_DEBUG_LINK,
   7389 			    ("%s: LINK: set media -> link up %s\n",
   7390 			    device_xname(sc->sc_dev),
   7391 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7392 
   7393 			/*
   7394 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7395 			 * so we should update sc->sc_ctrl
   7396 			 */
   7397 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7398 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7399 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7400 			if (status & STATUS_FD)
   7401 				sc->sc_tctl |=
   7402 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7403 			else
   7404 				sc->sc_tctl |=
   7405 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7406 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   7407 				sc->sc_fcrtl |= FCRTL_XONE;
   7408 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7409 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7410 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7411 				      sc->sc_fcrtl);
   7412 			sc->sc_tbi_linkup = 1;
   7413 		} else {
   7414 			if (i == WM_LINKUP_TIMEOUT)
   7415 				wm_check_for_link(sc);
   7416 			/* Link is down. */
   7417 			DPRINTF(WM_DEBUG_LINK,
   7418 			    ("%s: LINK: set media -> link down\n",
   7419 			    device_xname(sc->sc_dev)));
   7420 			sc->sc_tbi_linkup = 0;
   7421 		}
   7422 	} else {
   7423 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   7424 		    device_xname(sc->sc_dev)));
   7425 		sc->sc_tbi_linkup = 0;
   7426 	}
   7427 
   7428 	wm_tbi_set_linkled(sc);
   7429 
   7430 	return 0;
   7431 }
   7432 
   7433 /*
   7434  * wm_tbi_set_linkled:
   7435  *
   7436  *	Update the link LED on 1000BASE-X devices.
   7437  */
   7438 static void
   7439 wm_tbi_set_linkled(struct wm_softc *sc)
   7440 {
   7441 
   7442 	if (sc->sc_tbi_linkup)
   7443 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7444 	else
   7445 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7446 
   7447 	/* 82540 or newer devices are active low */
   7448 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7449 
   7450 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7451 }
   7452 
   7453 /*
   7454  * wm_tbi_check_link:
   7455  *
   7456  *	Check the link on 1000BASE-X devices.
   7457  */
   7458 static void
   7459 wm_tbi_check_link(struct wm_softc *sc)
   7460 {
   7461 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7462 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7463 	uint32_t status;
   7464 
   7465 	KASSERT(WM_TX_LOCKED(sc));
   7466 
   7467 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
   7468 		sc->sc_tbi_linkup = 1;
   7469 		return;
   7470 	}
   7471 
   7472 	status = CSR_READ(sc, WMREG_STATUS);
   7473 
   7474 	/* XXX is this needed? */
   7475 	(void)CSR_READ(sc, WMREG_RXCW);
   7476 	(void)CSR_READ(sc, WMREG_CTRL);
   7477 
   7478 	/* set link status */
   7479 	if ((status & STATUS_LU) == 0) {
   7480 		DPRINTF(WM_DEBUG_LINK,
   7481 		    ("%s: LINK: checklink -> down\n",
   7482 			device_xname(sc->sc_dev)));
   7483 		sc->sc_tbi_linkup = 0;
   7484 	} else if (sc->sc_tbi_linkup == 0) {
   7485 		DPRINTF(WM_DEBUG_LINK,
   7486 		    ("%s: LINK: checklink -> up %s\n",
   7487 			device_xname(sc->sc_dev),
   7488 			(status & STATUS_FD) ? "FDX" : "HDX"));
   7489 		sc->sc_tbi_linkup = 1;
   7490 	}
   7491 
   7492 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
   7493 	    && ((status & STATUS_LU) == 0)) {
   7494 		sc->sc_tbi_linkup = 0;
   7495 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
   7496 			/* RXCFG storm! */
   7497 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
   7498 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
   7499 			wm_init_locked(ifp);
   7500 			WM_TX_UNLOCK(sc);
   7501 			ifp->if_start(ifp);
   7502 			WM_TX_LOCK(sc);
   7503 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7504 			/* If the timer expired, retry autonegotiation */
   7505 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
   7506 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   7507 				sc->sc_tbi_ticks = 0;
   7508 				/*
   7509 				 * Reset the link, and let autonegotiation do
   7510 				 * its thing
   7511 				 */
   7512 				sc->sc_ctrl |= CTRL_LRST;
   7513 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7514 				CSR_WRITE_FLUSH(sc);
   7515 				delay(1000);
   7516 				sc->sc_ctrl &= ~CTRL_LRST;
   7517 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7518 				CSR_WRITE_FLUSH(sc);
   7519 				delay(1000);
   7520 				CSR_WRITE(sc, WMREG_TXCW,
   7521 				    sc->sc_txcw & ~TXCW_ANE);
   7522 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   7523 			}
   7524 		}
   7525 	}
   7526 
   7527 	wm_tbi_set_linkled(sc);
   7528 }
   7529 
   7530 /*
   7531  * NVM related.
   7532  * Microwire, SPI (w/wo EERD) and Flash.
   7533  */
   7534 
   7535 /* Both spi and uwire */
   7536 
   7537 /*
   7538  * wm_eeprom_sendbits:
   7539  *
   7540  *	Send a series of bits to the EEPROM.
   7541  */
   7542 static void
   7543 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   7544 {
   7545 	uint32_t reg;
   7546 	int x;
   7547 
   7548 	reg = CSR_READ(sc, WMREG_EECD);
   7549 
   7550 	for (x = nbits; x > 0; x--) {
   7551 		if (bits & (1U << (x - 1)))
   7552 			reg |= EECD_DI;
   7553 		else
   7554 			reg &= ~EECD_DI;
   7555 		CSR_WRITE(sc, WMREG_EECD, reg);
   7556 		CSR_WRITE_FLUSH(sc);
   7557 		delay(2);
   7558 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   7559 		CSR_WRITE_FLUSH(sc);
   7560 		delay(2);
   7561 		CSR_WRITE(sc, WMREG_EECD, reg);
   7562 		CSR_WRITE_FLUSH(sc);
   7563 		delay(2);
   7564 	}
   7565 }
   7566 
   7567 /*
   7568  * wm_eeprom_recvbits:
   7569  *
   7570  *	Receive a series of bits from the EEPROM.
   7571  */
   7572 static void
   7573 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   7574 {
   7575 	uint32_t reg, val;
   7576 	int x;
   7577 
   7578 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   7579 
   7580 	val = 0;
   7581 	for (x = nbits; x > 0; x--) {
   7582 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   7583 		CSR_WRITE_FLUSH(sc);
   7584 		delay(2);
   7585 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   7586 			val |= (1U << (x - 1));
   7587 		CSR_WRITE(sc, WMREG_EECD, reg);
   7588 		CSR_WRITE_FLUSH(sc);
   7589 		delay(2);
   7590 	}
   7591 	*valp = val;
   7592 }
   7593 
   7594 /* Microwire */
   7595 
   7596 /*
   7597  * wm_nvm_read_uwire:
   7598  *
   7599  *	Read a word from the EEPROM using the MicroWire protocol.
   7600  */
   7601 static int
   7602 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   7603 {
   7604 	uint32_t reg, val;
   7605 	int i;
   7606 
   7607 	for (i = 0; i < wordcnt; i++) {
   7608 		/* Clear SK and DI. */
   7609 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   7610 		CSR_WRITE(sc, WMREG_EECD, reg);
   7611 
   7612 		/*
   7613 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   7614 		 * and Xen.
   7615 		 *
   7616 		 * We use this workaround only for 82540 because qemu's
   7617 		 * e1000 act as 82540.
   7618 		 */
   7619 		if (sc->sc_type == WM_T_82540) {
   7620 			reg |= EECD_SK;
   7621 			CSR_WRITE(sc, WMREG_EECD, reg);
   7622 			reg &= ~EECD_SK;
   7623 			CSR_WRITE(sc, WMREG_EECD, reg);
   7624 			CSR_WRITE_FLUSH(sc);
   7625 			delay(2);
   7626 		}
   7627 		/* XXX: end of workaround */
   7628 
   7629 		/* Set CHIP SELECT. */
   7630 		reg |= EECD_CS;
   7631 		CSR_WRITE(sc, WMREG_EECD, reg);
   7632 		CSR_WRITE_FLUSH(sc);
   7633 		delay(2);
   7634 
   7635 		/* Shift in the READ command. */
   7636 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   7637 
   7638 		/* Shift in address. */
   7639 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
   7640 
   7641 		/* Shift out the data. */
   7642 		wm_eeprom_recvbits(sc, &val, 16);
   7643 		data[i] = val & 0xffff;
   7644 
   7645 		/* Clear CHIP SELECT. */
   7646 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   7647 		CSR_WRITE(sc, WMREG_EECD, reg);
   7648 		CSR_WRITE_FLUSH(sc);
   7649 		delay(2);
   7650 	}
   7651 
   7652 	return 0;
   7653 }
   7654 
   7655 /* SPI */
   7656 
   7657 /* Set SPI related information */
   7658 static void
   7659 wm_set_spiaddrbits(struct wm_softc *sc)
   7660 {
   7661 	uint32_t reg;
   7662 
   7663 	sc->sc_flags |= WM_F_EEPROM_SPI;
   7664 	reg = CSR_READ(sc, WMREG_EECD);
   7665 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   7666 }
   7667 
   7668 /*
   7669  * wm_nvm_ready_spi:
   7670  *
   7671  *	Wait for a SPI EEPROM to be ready for commands.
   7672  */
   7673 static int
   7674 wm_nvm_ready_spi(struct wm_softc *sc)
   7675 {
   7676 	uint32_t val;
   7677 	int usec;
   7678 
   7679 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   7680 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   7681 		wm_eeprom_recvbits(sc, &val, 8);
   7682 		if ((val & SPI_SR_RDY) == 0)
   7683 			break;
   7684 	}
   7685 	if (usec >= SPI_MAX_RETRIES) {
   7686 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   7687 		return 1;
   7688 	}
   7689 	return 0;
   7690 }
   7691 
   7692 /*
   7693  * wm_nvm_read_spi:
   7694  *
   7695  *	Read a work from the EEPROM using the SPI protocol.
   7696  */
   7697 static int
   7698 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   7699 {
   7700 	uint32_t reg, val;
   7701 	int i;
   7702 	uint8_t opc;
   7703 
   7704 	/* Clear SK and CS. */
   7705 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   7706 	CSR_WRITE(sc, WMREG_EECD, reg);
   7707 	CSR_WRITE_FLUSH(sc);
   7708 	delay(2);
   7709 
   7710 	if (wm_nvm_ready_spi(sc))
   7711 		return 1;
   7712 
   7713 	/* Toggle CS to flush commands. */
   7714 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   7715 	CSR_WRITE_FLUSH(sc);
   7716 	delay(2);
   7717 	CSR_WRITE(sc, WMREG_EECD, reg);
   7718 	CSR_WRITE_FLUSH(sc);
   7719 	delay(2);
   7720 
   7721 	opc = SPI_OPC_READ;
   7722 	if (sc->sc_ee_addrbits == 8 && word >= 128)
   7723 		opc |= SPI_OPC_A8;
   7724 
   7725 	wm_eeprom_sendbits(sc, opc, 8);
   7726 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
   7727 
   7728 	for (i = 0; i < wordcnt; i++) {
   7729 		wm_eeprom_recvbits(sc, &val, 16);
   7730 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   7731 	}
   7732 
   7733 	/* Raise CS and clear SK. */
   7734 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   7735 	CSR_WRITE(sc, WMREG_EECD, reg);
   7736 	CSR_WRITE_FLUSH(sc);
   7737 	delay(2);
   7738 
   7739 	return 0;
   7740 }
   7741 
   7742 /* Using with EERD */
   7743 
   7744 static int
   7745 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   7746 {
   7747 	uint32_t attempts = 100000;
   7748 	uint32_t i, reg = 0;
   7749 	int32_t done = -1;
   7750 
   7751 	for (i = 0; i < attempts; i++) {
   7752 		reg = CSR_READ(sc, rw);
   7753 
   7754 		if (reg & EERD_DONE) {
   7755 			done = 0;
   7756 			break;
   7757 		}
   7758 		delay(5);
   7759 	}
   7760 
   7761 	return done;
   7762 }
   7763 
   7764 static int
   7765 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   7766     uint16_t *data)
   7767 {
   7768 	int i, eerd = 0;
   7769 	int error = 0;
   7770 
   7771 	for (i = 0; i < wordcnt; i++) {
   7772 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   7773 
   7774 		CSR_WRITE(sc, WMREG_EERD, eerd);
   7775 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   7776 		if (error != 0)
   7777 			break;
   7778 
   7779 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   7780 	}
   7781 
   7782 	return error;
   7783 }
   7784 
   7785 /* Flash */
   7786 
   7787 static int
   7788 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   7789 {
   7790 	uint32_t eecd;
   7791 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   7792 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   7793 	uint8_t sig_byte = 0;
   7794 
   7795 	switch (sc->sc_type) {
   7796 	case WM_T_ICH8:
   7797 	case WM_T_ICH9:
   7798 		eecd = CSR_READ(sc, WMREG_EECD);
   7799 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   7800 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   7801 			return 0;
   7802 		}
   7803 		/* FALLTHROUGH */
   7804 	default:
   7805 		/* Default to 0 */
   7806 		*bank = 0;
   7807 
   7808 		/* Check bank 0 */
   7809 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   7810 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   7811 			*bank = 0;
   7812 			return 0;
   7813 		}
   7814 
   7815 		/* Check bank 1 */
   7816 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   7817 		    &sig_byte);
   7818 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   7819 			*bank = 1;
   7820 			return 0;
   7821 		}
   7822 	}
   7823 
   7824 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   7825 		device_xname(sc->sc_dev)));
   7826 	return -1;
   7827 }
   7828 
   7829 /******************************************************************************
   7830  * This function does initial flash setup so that a new read/write/erase cycle
   7831  * can be started.
   7832  *
   7833  * sc - The pointer to the hw structure
   7834  ****************************************************************************/
   7835 static int32_t
   7836 wm_ich8_cycle_init(struct wm_softc *sc)
   7837 {
   7838 	uint16_t hsfsts;
   7839 	int32_t error = 1;
   7840 	int32_t i     = 0;
   7841 
   7842 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7843 
   7844 	/* May be check the Flash Des Valid bit in Hw status */
   7845 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   7846 		return error;
   7847 	}
   7848 
   7849 	/* Clear FCERR in Hw status by writing 1 */
   7850 	/* Clear DAEL in Hw status by writing a 1 */
   7851 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   7852 
   7853 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   7854 
   7855 	/*
   7856 	 * Either we should have a hardware SPI cycle in progress bit to check
   7857 	 * against, in order to start a new cycle or FDONE bit should be
   7858 	 * changed in the hardware so that it is 1 after harware reset, which
   7859 	 * can then be used as an indication whether a cycle is in progress or
   7860 	 * has been completed .. we should also have some software semaphore
   7861 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   7862 	 * threads access to those bits can be sequentiallized or a way so that
   7863 	 * 2 threads dont start the cycle at the same time
   7864 	 */
   7865 
   7866 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   7867 		/*
   7868 		 * There is no cycle running at present, so we can start a
   7869 		 * cycle
   7870 		 */
   7871 
   7872 		/* Begin by setting Flash Cycle Done. */
   7873 		hsfsts |= HSFSTS_DONE;
   7874 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   7875 		error = 0;
   7876 	} else {
   7877 		/*
   7878 		 * otherwise poll for sometime so the current cycle has a
   7879 		 * chance to end before giving up.
   7880 		 */
   7881 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   7882 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7883 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   7884 				error = 0;
   7885 				break;
   7886 			}
   7887 			delay(1);
   7888 		}
   7889 		if (error == 0) {
   7890 			/*
   7891 			 * Successful in waiting for previous cycle to timeout,
   7892 			 * now set the Flash Cycle Done.
   7893 			 */
   7894 			hsfsts |= HSFSTS_DONE;
   7895 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   7896 		}
   7897 	}
   7898 	return error;
   7899 }
   7900 
   7901 /******************************************************************************
   7902  * This function starts a flash cycle and waits for its completion
   7903  *
   7904  * sc - The pointer to the hw structure
   7905  ****************************************************************************/
   7906 static int32_t
   7907 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   7908 {
   7909 	uint16_t hsflctl;
   7910 	uint16_t hsfsts;
   7911 	int32_t error = 1;
   7912 	uint32_t i = 0;
   7913 
   7914 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   7915 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   7916 	hsflctl |= HSFCTL_GO;
   7917 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   7918 
   7919 	/* Wait till FDONE bit is set to 1 */
   7920 	do {
   7921 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   7922 		if (hsfsts & HSFSTS_DONE)
   7923 			break;
   7924 		delay(1);
   7925 		i++;
   7926 	} while (i < timeout);
   7927 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   7928 		error = 0;
   7929 
   7930 	return error;
   7931 }
   7932 
   7933 /******************************************************************************
   7934  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   7935  *
   7936  * sc - The pointer to the hw structure
   7937  * index - The index of the byte or word to read.
   7938  * size - Size of data to read, 1=byte 2=word
   7939  * data - Pointer to the word to store the value read.
   7940  *****************************************************************************/
   7941 static int32_t
   7942 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   7943     uint32_t size, uint16_t *data)
   7944 {
   7945 	uint16_t hsfsts;
   7946 	uint16_t hsflctl;
   7947 	uint32_t flash_linear_address;
   7948 	uint32_t flash_data = 0;
   7949 	int32_t error = 1;
   7950 	int32_t count = 0;
   7951 
   7952 	if (size < 1  || size > 2 || data == 0x0 ||
   7953 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   7954 		return error;
   7955 
   7956 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   7957 	    sc->sc_ich8_flash_base;
   7958 
   7959 	do {
   7960 		delay(1);
   7961 		/* Steps */
   7962 		error = wm_ich8_cycle_init(sc);
   7963 		if (error)
   7964 			break;
   7965 
   7966 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   7967 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   7968 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   7969 		    & HSFCTL_BCOUNT_MASK;
   7970 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   7971 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   7972 
   7973 		/*
   7974 		 * Write the last 24 bits of index into Flash Linear address
   7975 		 * field in Flash Address
   7976 		 */
   7977 		/* TODO: TBD maybe check the index against the size of flash */
   7978 
   7979 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   7980 
   7981 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   7982 
   7983 		/*
   7984 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   7985 		 * the whole sequence a few more times, else read in (shift in)
   7986 		 * the Flash Data0, the order is least significant byte first
   7987 		 * msb to lsb
   7988 		 */
   7989 		if (error == 0) {
   7990 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   7991 			if (size == 1)
   7992 				*data = (uint8_t)(flash_data & 0x000000FF);
   7993 			else if (size == 2)
   7994 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   7995 			break;
   7996 		} else {
   7997 			/*
   7998 			 * If we've gotten here, then things are probably
   7999 			 * completely hosed, but if the error condition is
   8000 			 * detected, it won't hurt to give it another try...
   8001 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   8002 			 */
   8003 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   8004 			if (hsfsts & HSFSTS_ERR) {
   8005 				/* Repeat for some time before giving up. */
   8006 				continue;
   8007 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   8008 				break;
   8009 		}
   8010 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   8011 
   8012 	return error;
   8013 }
   8014 
   8015 /******************************************************************************
   8016  * Reads a single byte from the NVM using the ICH8 flash access registers.
   8017  *
   8018  * sc - pointer to wm_hw structure
   8019  * index - The index of the byte to read.
   8020  * data - Pointer to a byte to store the value read.
   8021  *****************************************************************************/
   8022 static int32_t
   8023 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   8024 {
   8025 	int32_t status;
   8026 	uint16_t word = 0;
   8027 
   8028 	status = wm_read_ich8_data(sc, index, 1, &word);
   8029 	if (status == 0)
   8030 		*data = (uint8_t)word;
   8031 	else
   8032 		*data = 0;
   8033 
   8034 	return status;
   8035 }
   8036 
   8037 /******************************************************************************
   8038  * Reads a word from the NVM using the ICH8 flash access registers.
   8039  *
   8040  * sc - pointer to wm_hw structure
   8041  * index - The starting byte index of the word to read.
   8042  * data - Pointer to a word to store the value read.
   8043  *****************************************************************************/
   8044 static int32_t
   8045 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   8046 {
   8047 	int32_t status;
   8048 
   8049 	status = wm_read_ich8_data(sc, index, 2, data);
   8050 	return status;
   8051 }
   8052 
   8053 /******************************************************************************
   8054  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   8055  * register.
   8056  *
   8057  * sc - Struct containing variables accessed by shared code
   8058  * offset - offset of word in the EEPROM to read
   8059  * data - word read from the EEPROM
   8060  * words - number of words to read
   8061  *****************************************************************************/
   8062 static int
   8063 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   8064 {
   8065 	int32_t  error = 0;
   8066 	uint32_t flash_bank = 0;
   8067 	uint32_t act_offset = 0;
   8068 	uint32_t bank_offset = 0;
   8069 	uint16_t word = 0;
   8070 	uint16_t i = 0;
   8071 
   8072 	/*
   8073 	 * We need to know which is the valid flash bank.  In the event
   8074 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   8075 	 * managing flash_bank.  So it cannot be trusted and needs
   8076 	 * to be updated with each read.
   8077 	 */
   8078 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   8079 	if (error) {
   8080 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
   8081 		    __func__);
   8082 		flash_bank = 0;
   8083 	}
   8084 
   8085 	/*
   8086 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   8087 	 * size
   8088 	 */
   8089 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   8090 
   8091 	error = wm_get_swfwhw_semaphore(sc);
   8092 	if (error) {
   8093 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8094 		    __func__);
   8095 		return error;
   8096 	}
   8097 
   8098 	for (i = 0; i < words; i++) {
   8099 		/* The NVM part needs a byte offset, hence * 2 */
   8100 		act_offset = bank_offset + ((offset + i) * 2);
   8101 		error = wm_read_ich8_word(sc, act_offset, &word);
   8102 		if (error) {
   8103 			aprint_error_dev(sc->sc_dev,
   8104 			    "%s: failed to read NVM\n", __func__);
   8105 			break;
   8106 		}
   8107 		data[i] = word;
   8108 	}
   8109 
   8110 	wm_put_swfwhw_semaphore(sc);
   8111 	return error;
   8112 }
   8113 
   8114 /* Lock, detecting NVM type, validate checksum and read */
   8115 
   8116 /*
   8117  * wm_nvm_acquire:
   8118  *
   8119  *	Perform the EEPROM handshake required on some chips.
   8120  */
   8121 static int
   8122 wm_nvm_acquire(struct wm_softc *sc)
   8123 {
   8124 	uint32_t reg;
   8125 	int x;
   8126 	int ret = 0;
   8127 
   8128 	/* always success */
   8129 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   8130 		return 0;
   8131 
   8132 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8133 		ret = wm_get_swfwhw_semaphore(sc);
   8134 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8135 		/* This will also do wm_get_swsm_semaphore() if needed */
   8136 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   8137 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8138 		ret = wm_get_swsm_semaphore(sc);
   8139 	}
   8140 
   8141 	if (ret) {
   8142 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8143 			__func__);
   8144 		return 1;
   8145 	}
   8146 
   8147 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   8148 		reg = CSR_READ(sc, WMREG_EECD);
   8149 
   8150 		/* Request EEPROM access. */
   8151 		reg |= EECD_EE_REQ;
   8152 		CSR_WRITE(sc, WMREG_EECD, reg);
   8153 
   8154 		/* ..and wait for it to be granted. */
   8155 		for (x = 0; x < 1000; x++) {
   8156 			reg = CSR_READ(sc, WMREG_EECD);
   8157 			if (reg & EECD_EE_GNT)
   8158 				break;
   8159 			delay(5);
   8160 		}
   8161 		if ((reg & EECD_EE_GNT) == 0) {
   8162 			aprint_error_dev(sc->sc_dev,
   8163 			    "could not acquire EEPROM GNT\n");
   8164 			reg &= ~EECD_EE_REQ;
   8165 			CSR_WRITE(sc, WMREG_EECD, reg);
   8166 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8167 				wm_put_swfwhw_semaphore(sc);
   8168 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   8169 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   8170 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   8171 				wm_put_swsm_semaphore(sc);
   8172 			return 1;
   8173 		}
   8174 	}
   8175 
   8176 	return 0;
   8177 }
   8178 
   8179 /*
   8180  * wm_nvm_release:
   8181  *
   8182  *	Release the EEPROM mutex.
   8183  */
   8184 static void
   8185 wm_nvm_release(struct wm_softc *sc)
   8186 {
   8187 	uint32_t reg;
   8188 
   8189 	/* always success */
   8190 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   8191 		return;
   8192 
   8193 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   8194 		reg = CSR_READ(sc, WMREG_EECD);
   8195 		reg &= ~EECD_EE_REQ;
   8196 		CSR_WRITE(sc, WMREG_EECD, reg);
   8197 	}
   8198 
   8199 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8200 		wm_put_swfwhw_semaphore(sc);
   8201 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8202 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   8203 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   8204 		wm_put_swsm_semaphore(sc);
   8205 }
   8206 
   8207 static int
   8208 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   8209 {
   8210 	uint32_t eecd = 0;
   8211 
   8212 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   8213 	    || sc->sc_type == WM_T_82583) {
   8214 		eecd = CSR_READ(sc, WMREG_EECD);
   8215 
   8216 		/* Isolate bits 15 & 16 */
   8217 		eecd = ((eecd >> 15) & 0x03);
   8218 
   8219 		/* If both bits are set, device is Flash type */
   8220 		if (eecd == 0x03)
   8221 			return 0;
   8222 	}
   8223 	return 1;
   8224 }
   8225 
   8226 #define NVM_CHECKSUM			0xBABA
   8227 #define EEPROM_SIZE			0x0040
   8228 #define NVM_COMPAT			0x0003
   8229 #define NVM_COMPAT_VALID_CHECKSUM	0x0001
   8230 #define NVM_FUTURE_INIT_WORD1			0x0019
   8231 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM	0x0040
   8232 
   8233 /*
   8234  * wm_nvm_validate_checksum
   8235  *
   8236  * The checksum is defined as the sum of the first 64 (16 bit) words.
   8237  */
   8238 static int
   8239 wm_nvm_validate_checksum(struct wm_softc *sc)
   8240 {
   8241 	uint16_t checksum;
   8242 	uint16_t eeprom_data;
   8243 #ifdef WM_DEBUG
   8244 	uint16_t csum_wordaddr, valid_checksum;
   8245 #endif
   8246 	int i;
   8247 
   8248 	checksum = 0;
   8249 
   8250 	/* Don't check for I211 */
   8251 	if (sc->sc_type == WM_T_I211)
   8252 		return 0;
   8253 
   8254 #ifdef WM_DEBUG
   8255 	if (sc->sc_type == WM_T_PCH_LPT) {
   8256 		csum_wordaddr = NVM_COMPAT;
   8257 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   8258 	} else {
   8259 		csum_wordaddr = NVM_FUTURE_INIT_WORD1;
   8260 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   8261 	}
   8262 
   8263 	/* Dump EEPROM image for debug */
   8264 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   8265 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   8266 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   8267 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   8268 		if ((eeprom_data & valid_checksum) == 0) {
   8269 			DPRINTF(WM_DEBUG_NVM,
   8270 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   8271 				device_xname(sc->sc_dev), eeprom_data,
   8272 				    valid_checksum));
   8273 		}
   8274 	}
   8275 
   8276 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   8277 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   8278 		for (i = 0; i < EEPROM_SIZE; i++) {
   8279 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   8280 				printf("XX ");
   8281 			else
   8282 				printf("%04x ", eeprom_data);
   8283 			if (i % 8 == 7)
   8284 				printf("\n");
   8285 		}
   8286 	}
   8287 
   8288 #endif /* WM_DEBUG */
   8289 
   8290 	for (i = 0; i < EEPROM_SIZE; i++) {
   8291 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   8292 			return 1;
   8293 		checksum += eeprom_data;
   8294 	}
   8295 
   8296 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   8297 #ifdef WM_DEBUG
   8298 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   8299 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   8300 #endif
   8301 	}
   8302 
   8303 	return 0;
   8304 }
   8305 
   8306 /*
   8307  * wm_nvm_read:
   8308  *
   8309  *	Read data from the serial EEPROM.
   8310  */
   8311 static int
   8312 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   8313 {
   8314 	int rv;
   8315 
   8316 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   8317 		return 1;
   8318 
   8319 	if (wm_nvm_acquire(sc))
   8320 		return 1;
   8321 
   8322 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   8323 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   8324 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   8325 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   8326 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   8327 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   8328 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   8329 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   8330 	else
   8331 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   8332 
   8333 	wm_nvm_release(sc);
   8334 	return rv;
   8335 }
   8336 
   8337 /*
   8338  * Hardware semaphores.
   8339  * Very complexed...
   8340  */
   8341 
   8342 static int
   8343 wm_get_swsm_semaphore(struct wm_softc *sc)
   8344 {
   8345 	int32_t timeout;
   8346 	uint32_t swsm;
   8347 
   8348 	/* Get the SW semaphore. */
   8349 	timeout = 1000 + 1; /* XXX */
   8350 	while (timeout) {
   8351 		swsm = CSR_READ(sc, WMREG_SWSM);
   8352 
   8353 		if ((swsm & SWSM_SMBI) == 0)
   8354 			break;
   8355 
   8356 		delay(50);
   8357 		timeout--;
   8358 	}
   8359 
   8360 	if (timeout == 0) {
   8361 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   8362 		return 1;
   8363 	}
   8364 
   8365 	/* Get the FW semaphore. */
   8366 	timeout = 1000 + 1; /* XXX */
   8367 	while (timeout) {
   8368 		swsm = CSR_READ(sc, WMREG_SWSM);
   8369 		swsm |= SWSM_SWESMBI;
   8370 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   8371 		/* If we managed to set the bit we got the semaphore. */
   8372 		swsm = CSR_READ(sc, WMREG_SWSM);
   8373 		if (swsm & SWSM_SWESMBI)
   8374 			break;
   8375 
   8376 		delay(50);
   8377 		timeout--;
   8378 	}
   8379 
   8380 	if (timeout == 0) {
   8381 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   8382 		/* Release semaphores */
   8383 		wm_put_swsm_semaphore(sc);
   8384 		return 1;
   8385 	}
   8386 	return 0;
   8387 }
   8388 
   8389 static void
   8390 wm_put_swsm_semaphore(struct wm_softc *sc)
   8391 {
   8392 	uint32_t swsm;
   8393 
   8394 	swsm = CSR_READ(sc, WMREG_SWSM);
   8395 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   8396 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   8397 }
   8398 
   8399 static int
   8400 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   8401 {
   8402 	uint32_t swfw_sync;
   8403 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   8404 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   8405 	int timeout = 200;
   8406 
   8407 	for (timeout = 0; timeout < 200; timeout++) {
   8408 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8409 			if (wm_get_swsm_semaphore(sc)) {
   8410 				aprint_error_dev(sc->sc_dev,
   8411 				    "%s: failed to get semaphore\n",
   8412 				    __func__);
   8413 				return 1;
   8414 			}
   8415 		}
   8416 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   8417 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   8418 			swfw_sync |= swmask;
   8419 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   8420 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   8421 				wm_put_swsm_semaphore(sc);
   8422 			return 0;
   8423 		}
   8424 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   8425 			wm_put_swsm_semaphore(sc);
   8426 		delay(5000);
   8427 	}
   8428 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   8429 	    device_xname(sc->sc_dev), mask, swfw_sync);
   8430 	return 1;
   8431 }
   8432 
   8433 static void
   8434 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   8435 {
   8436 	uint32_t swfw_sync;
   8437 
   8438 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   8439 		while (wm_get_swsm_semaphore(sc) != 0)
   8440 			continue;
   8441 	}
   8442 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   8443 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   8444 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   8445 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   8446 		wm_put_swsm_semaphore(sc);
   8447 }
   8448 
   8449 static int
   8450 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   8451 {
   8452 	uint32_t ext_ctrl;
   8453 	int timeout = 200;
   8454 
   8455 	for (timeout = 0; timeout < 200; timeout++) {
   8456 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   8457 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
   8458 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   8459 
   8460 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   8461 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
   8462 			return 0;
   8463 		delay(5000);
   8464 	}
   8465 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   8466 	    device_xname(sc->sc_dev), ext_ctrl);
   8467 	return 1;
   8468 }
   8469 
   8470 static void
   8471 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   8472 {
   8473 	uint32_t ext_ctrl;
   8474 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   8475 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
   8476 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   8477 }
   8478 
   8479 static int
   8480 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   8481 {
   8482 	int i = 0;
   8483 	uint32_t reg;
   8484 
   8485 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8486 	do {
   8487 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   8488 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   8489 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8490 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   8491 			break;
   8492 		delay(2*1000);
   8493 		i++;
   8494 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   8495 
   8496 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   8497 		wm_put_hw_semaphore_82573(sc);
   8498 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   8499 		    device_xname(sc->sc_dev));
   8500 		return -1;
   8501 	}
   8502 
   8503 	return 0;
   8504 }
   8505 
   8506 static void
   8507 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   8508 {
   8509 	uint32_t reg;
   8510 
   8511 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8512 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   8513 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   8514 }
   8515 
   8516 /*
   8517  * Management mode and power management related subroutines.
   8518  * BMC, AMT, suspend/resume and EEE.
   8519  */
   8520 
   8521 static int
   8522 wm_check_mng_mode(struct wm_softc *sc)
   8523 {
   8524 	int rv;
   8525 
   8526 	switch (sc->sc_type) {
   8527 	case WM_T_ICH8:
   8528 	case WM_T_ICH9:
   8529 	case WM_T_ICH10:
   8530 	case WM_T_PCH:
   8531 	case WM_T_PCH2:
   8532 	case WM_T_PCH_LPT:
   8533 		rv = wm_check_mng_mode_ich8lan(sc);
   8534 		break;
   8535 	case WM_T_82574:
   8536 	case WM_T_82583:
   8537 		rv = wm_check_mng_mode_82574(sc);
   8538 		break;
   8539 	case WM_T_82571:
   8540 	case WM_T_82572:
   8541 	case WM_T_82573:
   8542 	case WM_T_80003:
   8543 		rv = wm_check_mng_mode_generic(sc);
   8544 		break;
   8545 	default:
   8546 		/* noting to do */
   8547 		rv = 0;
   8548 		break;
   8549 	}
   8550 
   8551 	return rv;
   8552 }
   8553 
   8554 static int
   8555 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   8556 {
   8557 	uint32_t fwsm;
   8558 
   8559 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8560 
   8561 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   8562 		return 1;
   8563 
   8564 	return 0;
   8565 }
   8566 
   8567 static int
   8568 wm_check_mng_mode_82574(struct wm_softc *sc)
   8569 {
   8570 	uint16_t data;
   8571 
   8572 	wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
   8573 
   8574 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
   8575 		return 1;
   8576 
   8577 	return 0;
   8578 }
   8579 
   8580 static int
   8581 wm_check_mng_mode_generic(struct wm_softc *sc)
   8582 {
   8583 	uint32_t fwsm;
   8584 
   8585 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8586 
   8587 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   8588 		return 1;
   8589 
   8590 	return 0;
   8591 }
   8592 
   8593 static int
   8594 wm_enable_mng_pass_thru(struct wm_softc *sc)
   8595 {
   8596 	uint32_t manc, fwsm, factps;
   8597 
   8598 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   8599 		return 0;
   8600 
   8601 	manc = CSR_READ(sc, WMREG_MANC);
   8602 
   8603 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   8604 		device_xname(sc->sc_dev), manc));
   8605 	if ((manc & MANC_RECV_TCO_EN) == 0)
   8606 		return 0;
   8607 
   8608 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   8609 		fwsm = CSR_READ(sc, WMREG_FWSM);
   8610 		factps = CSR_READ(sc, WMREG_FACTPS);
   8611 		if (((factps & FACTPS_MNGCG) == 0)
   8612 		    && ((fwsm & FWSM_MODE_MASK)
   8613 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   8614 			return 1;
   8615 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   8616 		uint16_t data;
   8617 
   8618 		factps = CSR_READ(sc, WMREG_FACTPS);
   8619 		wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
   8620 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   8621 			device_xname(sc->sc_dev), factps, data));
   8622 		if (((factps & FACTPS_MNGCG) == 0)
   8623 		    && ((data & EEPROM_CFG2_MNGM_MASK)
   8624 			== (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
   8625 			return 1;
   8626 	} else if (((manc & MANC_SMBUS_EN) != 0)
   8627 	    && ((manc & MANC_ASF_EN) == 0))
   8628 		return 1;
   8629 
   8630 	return 0;
   8631 }
   8632 
   8633 static int
   8634 wm_check_reset_block(struct wm_softc *sc)
   8635 {
   8636 	uint32_t reg;
   8637 
   8638 	switch (sc->sc_type) {
   8639 	case WM_T_ICH8:
   8640 	case WM_T_ICH9:
   8641 	case WM_T_ICH10:
   8642 	case WM_T_PCH:
   8643 	case WM_T_PCH2:
   8644 	case WM_T_PCH_LPT:
   8645 		reg = CSR_READ(sc, WMREG_FWSM);
   8646 		if ((reg & FWSM_RSPCIPHY) != 0)
   8647 			return 0;
   8648 		else
   8649 			return -1;
   8650 		break;
   8651 	case WM_T_82571:
   8652 	case WM_T_82572:
   8653 	case WM_T_82573:
   8654 	case WM_T_82574:
   8655 	case WM_T_82583:
   8656 	case WM_T_80003:
   8657 		reg = CSR_READ(sc, WMREG_MANC);
   8658 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   8659 			return -1;
   8660 		else
   8661 			return 0;
   8662 		break;
   8663 	default:
   8664 		/* no problem */
   8665 		break;
   8666 	}
   8667 
   8668 	return 0;
   8669 }
   8670 
   8671 static void
   8672 wm_get_hw_control(struct wm_softc *sc)
   8673 {
   8674 	uint32_t reg;
   8675 
   8676 	switch (sc->sc_type) {
   8677 	case WM_T_82573:
   8678 		reg = CSR_READ(sc, WMREG_SWSM);
   8679 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   8680 		break;
   8681 	case WM_T_82571:
   8682 	case WM_T_82572:
   8683 	case WM_T_82574:
   8684 	case WM_T_82583:
   8685 	case WM_T_80003:
   8686 	case WM_T_ICH8:
   8687 	case WM_T_ICH9:
   8688 	case WM_T_ICH10:
   8689 	case WM_T_PCH:
   8690 	case WM_T_PCH2:
   8691 	case WM_T_PCH_LPT:
   8692 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8693 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   8694 		break;
   8695 	default:
   8696 		break;
   8697 	}
   8698 }
   8699 
   8700 static void
   8701 wm_release_hw_control(struct wm_softc *sc)
   8702 {
   8703 	uint32_t reg;
   8704 
   8705 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   8706 		return;
   8707 
   8708 	if (sc->sc_type == WM_T_82573) {
   8709 		reg = CSR_READ(sc, WMREG_SWSM);
   8710 		reg &= ~SWSM_DRV_LOAD;
   8711 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   8712 	} else {
   8713 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8714 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   8715 	}
   8716 }
   8717 
   8718 static void
   8719 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   8720 {
   8721 	uint32_t reg;
   8722 
   8723 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   8724 
   8725 	if (on != 0)
   8726 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   8727 	else
   8728 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   8729 
   8730 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   8731 }
   8732 
   8733 static void
   8734 wm_smbustopci(struct wm_softc *sc)
   8735 {
   8736 	uint32_t fwsm;
   8737 
   8738 	fwsm = CSR_READ(sc, WMREG_FWSM);
   8739 	if (((fwsm & FWSM_FW_VALID) == 0)
   8740 	    && ((wm_check_reset_block(sc) == 0))) {
   8741 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   8742 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   8743 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8744 		CSR_WRITE_FLUSH(sc);
   8745 		delay(10);
   8746 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   8747 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8748 		CSR_WRITE_FLUSH(sc);
   8749 		delay(50*1000);
   8750 
   8751 		/*
   8752 		 * Gate automatic PHY configuration by hardware on non-managed
   8753 		 * 82579
   8754 		 */
   8755 		if (sc->sc_type == WM_T_PCH2)
   8756 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   8757 	}
   8758 }
   8759 
   8760 static void
   8761 wm_init_manageability(struct wm_softc *sc)
   8762 {
   8763 
   8764 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   8765 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   8766 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   8767 
   8768 		/* Disable hardware interception of ARP */
   8769 		manc &= ~MANC_ARP_EN;
   8770 
   8771 		/* Enable receiving management packets to the host */
   8772 		if (sc->sc_type >= WM_T_82571) {
   8773 			manc |= MANC_EN_MNG2HOST;
   8774 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   8775 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   8776 
   8777 		}
   8778 
   8779 		CSR_WRITE(sc, WMREG_MANC, manc);
   8780 	}
   8781 }
   8782 
   8783 static void
   8784 wm_release_manageability(struct wm_softc *sc)
   8785 {
   8786 
   8787 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   8788 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   8789 
   8790 		manc |= MANC_ARP_EN;
   8791 		if (sc->sc_type >= WM_T_82571)
   8792 			manc &= ~MANC_EN_MNG2HOST;
   8793 
   8794 		CSR_WRITE(sc, WMREG_MANC, manc);
   8795 	}
   8796 }
   8797 
   8798 static void
   8799 wm_get_wakeup(struct wm_softc *sc)
   8800 {
   8801 
   8802 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   8803 	switch (sc->sc_type) {
   8804 	case WM_T_82573:
   8805 	case WM_T_82583:
   8806 		sc->sc_flags |= WM_F_HAS_AMT;
   8807 		/* FALLTHROUGH */
   8808 	case WM_T_80003:
   8809 	case WM_T_82541:
   8810 	case WM_T_82547:
   8811 	case WM_T_82571:
   8812 	case WM_T_82572:
   8813 	case WM_T_82574:
   8814 	case WM_T_82575:
   8815 	case WM_T_82576:
   8816 	case WM_T_82580:
   8817 	case WM_T_82580ER:
   8818 	case WM_T_I350:
   8819 	case WM_T_I354:
   8820 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   8821 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   8822 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   8823 		break;
   8824 	case WM_T_ICH8:
   8825 	case WM_T_ICH9:
   8826 	case WM_T_ICH10:
   8827 	case WM_T_PCH:
   8828 	case WM_T_PCH2:
   8829 	case WM_T_PCH_LPT:
   8830 		sc->sc_flags |= WM_F_HAS_AMT;
   8831 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   8832 		break;
   8833 	default:
   8834 		break;
   8835 	}
   8836 
   8837 	/* 1: HAS_MANAGE */
   8838 	if (wm_enable_mng_pass_thru(sc) != 0)
   8839 		sc->sc_flags |= WM_F_HAS_MANAGE;
   8840 
   8841 #ifdef WM_DEBUG
   8842 	printf("\n");
   8843 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   8844 		printf("HAS_AMT,");
   8845 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   8846 		printf("ARC_SUBSYS_VALID,");
   8847 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   8848 		printf("ASF_FIRMWARE_PRES,");
   8849 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   8850 		printf("HAS_MANAGE,");
   8851 	printf("\n");
   8852 #endif
   8853 	/*
   8854 	 * Note that the WOL flags is set after the resetting of the eeprom
   8855 	 * stuff
   8856 	 */
   8857 }
   8858 
   8859 #ifdef WM_WOL
   8860 /* WOL in the newer chipset interfaces (pchlan) */
   8861 static void
   8862 wm_enable_phy_wakeup(struct wm_softc *sc)
   8863 {
   8864 #if 0
   8865 	uint16_t preg;
   8866 
   8867 	/* Copy MAC RARs to PHY RARs */
   8868 
   8869 	/* Copy MAC MTA to PHY MTA */
   8870 
   8871 	/* Configure PHY Rx Control register */
   8872 
   8873 	/* Enable PHY wakeup in MAC register */
   8874 
   8875 	/* Configure and enable PHY wakeup in PHY registers */
   8876 
   8877 	/* Activate PHY wakeup */
   8878 
   8879 	/* XXX */
   8880 #endif
   8881 }
   8882 
   8883 /* Power down workaround on D3 */
   8884 static void
   8885 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   8886 {
   8887 	uint32_t reg;
   8888 	int i;
   8889 
   8890 	for (i = 0; i < 2; i++) {
   8891 		/* Disable link */
   8892 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   8893 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   8894 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   8895 
   8896 		/*
   8897 		 * Call gig speed drop workaround on Gig disable before
   8898 		 * accessing any PHY registers
   8899 		 */
   8900 		if (sc->sc_type == WM_T_ICH8)
   8901 			wm_gig_downshift_workaround_ich8lan(sc);
   8902 
   8903 		/* Write VR power-down enable */
   8904 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   8905 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   8906 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   8907 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   8908 
   8909 		/* Read it back and test */
   8910 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   8911 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   8912 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   8913 			break;
   8914 
   8915 		/* Issue PHY reset and repeat at most one more time */
   8916 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8917 	}
   8918 }
   8919 
   8920 static void
   8921 wm_enable_wakeup(struct wm_softc *sc)
   8922 {
   8923 	uint32_t reg, pmreg;
   8924 	pcireg_t pmode;
   8925 
   8926 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   8927 		&pmreg, NULL) == 0)
   8928 		return;
   8929 
   8930 	/* Advertise the wakeup capability */
   8931 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   8932 	    | CTRL_SWDPIN(3));
   8933 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   8934 
   8935 	/* ICH workaround */
   8936 	switch (sc->sc_type) {
   8937 	case WM_T_ICH8:
   8938 	case WM_T_ICH9:
   8939 	case WM_T_ICH10:
   8940 	case WM_T_PCH:
   8941 	case WM_T_PCH2:
   8942 	case WM_T_PCH_LPT:
   8943 		/* Disable gig during WOL */
   8944 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   8945 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   8946 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   8947 		if (sc->sc_type == WM_T_PCH)
   8948 			wm_gmii_reset(sc);
   8949 
   8950 		/* Power down workaround */
   8951 		if (sc->sc_phytype == WMPHY_82577) {
   8952 			struct mii_softc *child;
   8953 
   8954 			/* Assume that the PHY is copper */
   8955 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   8956 			if (child->mii_mpd_rev <= 2)
   8957 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   8958 				    (768 << 5) | 25, 0x0444); /* magic num */
   8959 		}
   8960 		break;
   8961 	default:
   8962 		break;
   8963 	}
   8964 
   8965 	/* Keep the laser running on fiber adapters */
   8966 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
   8967 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
   8968 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8969 		reg |= CTRL_EXT_SWDPIN(3);
   8970 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8971 	}
   8972 
   8973 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   8974 #if 0	/* for the multicast packet */
   8975 	reg |= WUFC_MC;
   8976 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   8977 #endif
   8978 
   8979 	if (sc->sc_type == WM_T_PCH) {
   8980 		wm_enable_phy_wakeup(sc);
   8981 	} else {
   8982 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   8983 		CSR_WRITE(sc, WMREG_WUFC, reg);
   8984 	}
   8985 
   8986 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   8987 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   8988 		|| (sc->sc_type == WM_T_PCH2))
   8989 		    && (sc->sc_phytype == WMPHY_IGP_3))
   8990 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   8991 
   8992 	/* Request PME */
   8993 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   8994 #if 0
   8995 	/* Disable WOL */
   8996 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   8997 #else
   8998 	/* For WOL */
   8999 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   9000 #endif
   9001 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   9002 }
   9003 #endif /* WM_WOL */
   9004 
   9005 /* EEE */
   9006 
   9007 static void
   9008 wm_set_eee_i350(struct wm_softc *sc)
   9009 {
   9010 	uint32_t ipcnfg, eeer;
   9011 
   9012 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   9013 	eeer = CSR_READ(sc, WMREG_EEER);
   9014 
   9015 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   9016 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   9017 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   9018 		    | EEER_LPI_FC);
   9019 	} else {
   9020 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   9021 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   9022 		    | EEER_LPI_FC);
   9023 	}
   9024 
   9025 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   9026 	CSR_WRITE(sc, WMREG_EEER, eeer);
   9027 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   9028 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   9029 }
   9030 
   9031 /*
   9032  * Workarounds (mainly PHY related).
   9033  * Basically, PHY's workarounds are in the PHY drivers.
   9034  */
   9035 
   9036 /* Work-around for 82566 Kumeran PCS lock loss */
   9037 static void
   9038 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   9039 {
   9040 	int miistatus, active, i;
   9041 	int reg;
   9042 
   9043 	miistatus = sc->sc_mii.mii_media_status;
   9044 
   9045 	/* If the link is not up, do nothing */
   9046 	if ((miistatus & IFM_ACTIVE) != 0)
   9047 		return;
   9048 
   9049 	active = sc->sc_mii.mii_media_active;
   9050 
   9051 	/* Nothing to do if the link is other than 1Gbps */
   9052 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   9053 		return;
   9054 
   9055 	for (i = 0; i < 10; i++) {
   9056 		/* read twice */
   9057 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   9058 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   9059 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
   9060 			goto out;	/* GOOD! */
   9061 
   9062 		/* Reset the PHY */
   9063 		wm_gmii_reset(sc);
   9064 		delay(5*1000);
   9065 	}
   9066 
   9067 	/* Disable GigE link negotiation */
   9068 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   9069 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   9070 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   9071 
   9072 	/*
   9073 	 * Call gig speed drop workaround on Gig disable before accessing
   9074 	 * any PHY registers.
   9075 	 */
   9076 	wm_gig_downshift_workaround_ich8lan(sc);
   9077 
   9078 out:
   9079 	return;
   9080 }
   9081 
   9082 /* WOL from S5 stops working */
   9083 static void
   9084 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   9085 {
   9086 	uint16_t kmrn_reg;
   9087 
   9088 	/* Only for igp3 */
   9089 	if (sc->sc_phytype == WMPHY_IGP_3) {
   9090 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   9091 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   9092 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   9093 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   9094 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   9095 	}
   9096 }
   9097 
   9098 /*
   9099  * Workaround for pch's PHYs
   9100  * XXX should be moved to new PHY driver?
   9101  */
   9102 static void
   9103 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   9104 {
   9105 	if (sc->sc_phytype == WMPHY_82577)
   9106 		wm_set_mdio_slow_mode_hv(sc);
   9107 
   9108 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   9109 
   9110 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   9111 
   9112 	/* 82578 */
   9113 	if (sc->sc_phytype == WMPHY_82578) {
   9114 		/* PCH rev. < 3 */
   9115 		if (sc->sc_rev < 3) {
   9116 			/* XXX 6 bit shift? Why? Is it page2? */
   9117 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   9118 			    0x66c0);
   9119 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   9120 			    0xffff);
   9121 		}
   9122 
   9123 		/* XXX phy rev. < 2 */
   9124 	}
   9125 
   9126 	/* Select page 0 */
   9127 
   9128 	/* XXX acquire semaphore */
   9129 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   9130 	/* XXX release semaphore */
   9131 
   9132 	/*
   9133 	 * Configure the K1 Si workaround during phy reset assuming there is
   9134 	 * link so that it disables K1 if link is in 1Gbps.
   9135 	 */
   9136 	wm_k1_gig_workaround_hv(sc, 1);
   9137 }
   9138 
   9139 static void
   9140 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   9141 {
   9142 
   9143 	wm_set_mdio_slow_mode_hv(sc);
   9144 }
   9145 
   9146 static void
   9147 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   9148 {
   9149 	int k1_enable = sc->sc_nvm_k1_enabled;
   9150 
   9151 	/* XXX acquire semaphore */
   9152 
   9153 	if (link) {
   9154 		k1_enable = 0;
   9155 
   9156 		/* Link stall fix for link up */
   9157 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   9158 	} else {
   9159 		/* Link stall fix for link down */
   9160 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   9161 	}
   9162 
   9163 	wm_configure_k1_ich8lan(sc, k1_enable);
   9164 
   9165 	/* XXX release semaphore */
   9166 }
   9167 
   9168 static void
   9169 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   9170 {
   9171 	uint32_t reg;
   9172 
   9173 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   9174 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   9175 	    reg | HV_KMRN_MDIO_SLOW);
   9176 }
   9177 
   9178 static void
   9179 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   9180 {
   9181 	uint32_t ctrl, ctrl_ext, tmp;
   9182 	uint16_t kmrn_reg;
   9183 
   9184 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   9185 
   9186 	if (k1_enable)
   9187 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   9188 	else
   9189 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   9190 
   9191 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   9192 
   9193 	delay(20);
   9194 
   9195 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9196 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9197 
   9198 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   9199 	tmp |= CTRL_FRCSPD;
   9200 
   9201 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   9202 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   9203 	CSR_WRITE_FLUSH(sc);
   9204 	delay(20);
   9205 
   9206 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   9207 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9208 	CSR_WRITE_FLUSH(sc);
   9209 	delay(20);
   9210 }
   9211 
   9212 /* special case - for 82575 - need to do manual init ... */
   9213 static void
   9214 wm_reset_init_script_82575(struct wm_softc *sc)
   9215 {
   9216 	/*
   9217 	 * remark: this is untested code - we have no board without EEPROM
   9218 	 *  same setup as mentioned int the freeBSD driver for the i82575
   9219 	 */
   9220 
   9221 	/* SerDes configuration via SERDESCTRL */
   9222 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   9223 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   9224 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   9225 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   9226 
   9227 	/* CCM configuration via CCMCTL register */
   9228 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   9229 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   9230 
   9231 	/* PCIe lanes configuration */
   9232 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   9233 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   9234 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   9235 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   9236 
   9237 	/* PCIe PLL Configuration */
   9238 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   9239 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   9240 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   9241 }
   9242