Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.399
      1 /*	$NetBSD: if_wm.c,v 1.399 2016/05/18 06:59:59 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.399 2016/05/18 06:59:59 knakahara Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 
    107 #include <sys/rndsource.h>
    108 
    109 #include <net/if.h>
    110 #include <net/if_dl.h>
    111 #include <net/if_media.h>
    112 #include <net/if_ether.h>
    113 
    114 #include <net/bpf.h>
    115 
    116 #include <netinet/in.h>			/* XXX for struct ip */
    117 #include <netinet/in_systm.h>		/* XXX for struct ip */
    118 #include <netinet/ip.h>			/* XXX for struct ip */
    119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    121 
    122 #include <sys/bus.h>
    123 #include <sys/intr.h>
    124 #include <machine/endian.h>
    125 
    126 #include <dev/mii/mii.h>
    127 #include <dev/mii/miivar.h>
    128 #include <dev/mii/miidevs.h>
    129 #include <dev/mii/mii_bitbang.h>
    130 #include <dev/mii/ikphyreg.h>
    131 #include <dev/mii/igphyreg.h>
    132 #include <dev/mii/igphyvar.h>
    133 #include <dev/mii/inbmphyreg.h>
    134 
    135 #include <dev/pci/pcireg.h>
    136 #include <dev/pci/pcivar.h>
    137 #include <dev/pci/pcidevs.h>
    138 
    139 #include <dev/pci/if_wmreg.h>
    140 #include <dev/pci/if_wmvar.h>
    141 
    142 #ifdef WM_DEBUG
    143 #define	WM_DEBUG_LINK		0x01
    144 #define	WM_DEBUG_TX		0x02
    145 #define	WM_DEBUG_RX		0x04
    146 #define	WM_DEBUG_GMII		0x08
    147 #define	WM_DEBUG_MANAGE		0x10
    148 #define	WM_DEBUG_NVM		0x20
    149 #define	WM_DEBUG_INIT		0x40
    150 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    151     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT;
    152 
    153 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    154 #else
    155 #define	DPRINTF(x, y)	/* nothing */
    156 #endif /* WM_DEBUG */
    157 
    158 #ifdef NET_MPSAFE
    159 #define WM_MPSAFE	1
    160 #endif
    161 
    162 /*
    163  * This device driver's max interrupt numbers.
    164  */
    165 #define WM_MAX_NTXINTR		16
    166 #define WM_MAX_NRXINTR		16
    167 #define WM_MAX_NINTR		(WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
    168 
    169 /*
    170  * Transmit descriptor list size.  Due to errata, we can only have
    171  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    172  * on >= 82544.  We tell the upper layers that they can queue a lot
    173  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    174  * of them at a time.
    175  *
    176  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    177  * chains containing many small mbufs have been observed in zero-copy
    178  * situations with jumbo frames.
    179  */
    180 #define	WM_NTXSEGS		256
    181 #define	WM_IFQUEUELEN		256
    182 #define	WM_TXQUEUELEN_MAX	64
    183 #define	WM_TXQUEUELEN_MAX_82547	16
    184 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    185 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    186 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    187 #define	WM_NTXDESC_82542	256
    188 #define	WM_NTXDESC_82544	4096
    189 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    190 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    191 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    192 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    193 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    194 
    195 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    196 
    197 /*
    198  * Receive descriptor list size.  We have one Rx buffer for normal
    199  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    200  * packet.  We allocate 256 receive descriptors, each with a 2k
    201  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    202  */
    203 #define	WM_NRXDESC		256
    204 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    205 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    206 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    207 
    208 typedef union txdescs {
    209 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    210 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    211 } txdescs_t;
    212 
    213 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    214 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    215 
    216 /*
    217  * Software state for transmit jobs.
    218  */
    219 struct wm_txsoft {
    220 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    221 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    222 	int txs_firstdesc;		/* first descriptor in packet */
    223 	int txs_lastdesc;		/* last descriptor in packet */
    224 	int txs_ndesc;			/* # of descriptors used */
    225 };
    226 
    227 /*
    228  * Software state for receive buffers.  Each descriptor gets a
    229  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    230  * more than one buffer, we chain them together.
    231  */
    232 struct wm_rxsoft {
    233 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    234 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    235 };
    236 
    237 #define WM_LINKUP_TIMEOUT	50
    238 
    239 static uint16_t swfwphysem[] = {
    240 	SWFW_PHY0_SM,
    241 	SWFW_PHY1_SM,
    242 	SWFW_PHY2_SM,
    243 	SWFW_PHY3_SM
    244 };
    245 
    246 static const uint32_t wm_82580_rxpbs_table[] = {
    247 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    248 };
    249 
    250 struct wm_softc;
    251 
    252 struct wm_txqueue {
    253 	kmutex_t *txq_lock;		/* lock for tx operations */
    254 
    255 	struct wm_softc *txq_sc;
    256 
    257 	int txq_id;			/* index of transmit queues */
    258 	int txq_intr_idx;		/* index of MSI-X tables */
    259 
    260 	/* Software state for the transmit descriptors. */
    261 	int txq_num;			/* must be a power of two */
    262 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    263 
    264 	/* TX control data structures. */
    265 	int txq_ndesc;			/* must be a power of two */
    266 	size_t txq_descsize;		/* a tx descriptor size */
    267 	txdescs_t *txq_descs_u;
    268         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    269 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    270 	int txq_desc_rseg;		/* real number of control segment */
    271 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    272 #define	txq_descs	txq_descs_u->sctxu_txdescs
    273 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    274 
    275 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    276 
    277 	int txq_free;			/* number of free Tx descriptors */
    278 	int txq_next;			/* next ready Tx descriptor */
    279 
    280 	int txq_sfree;			/* number of free Tx jobs */
    281 	int txq_snext;			/* next free Tx job */
    282 	int txq_sdirty;			/* dirty Tx jobs */
    283 
    284 	/* These 4 variables are used only on the 82547. */
    285 	int txq_fifo_size;		/* Tx FIFO size */
    286 	int txq_fifo_head;		/* current head of FIFO */
    287 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    288 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    289 
    290 	/* XXX which event counter is required? */
    291 };
    292 
    293 struct wm_rxqueue {
    294 	kmutex_t *rxq_lock;		/* lock for rx operations */
    295 
    296 	struct wm_softc *rxq_sc;
    297 
    298 	int rxq_id;			/* index of receive queues */
    299 	int rxq_intr_idx;		/* index of MSI-X tables */
    300 
    301 	/* Software state for the receive descriptors. */
    302 	wiseman_rxdesc_t *rxq_descs;
    303 
    304 	/* RX control data structures. */
    305 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    306 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    307 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    308 	int rxq_desc_rseg;		/* real number of control segment */
    309 	size_t rxq_desc_size;		/* control data size */
    310 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    311 
    312 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    313 
    314 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    315 	int rxq_discard;
    316 	int rxq_len;
    317 	struct mbuf *rxq_head;
    318 	struct mbuf *rxq_tail;
    319 	struct mbuf **rxq_tailp;
    320 
    321 	/* XXX which event counter is required? */
    322 };
    323 
    324 /*
    325  * Software state per device.
    326  */
    327 struct wm_softc {
    328 	device_t sc_dev;		/* generic device information */
    329 	bus_space_tag_t sc_st;		/* bus space tag */
    330 	bus_space_handle_t sc_sh;	/* bus space handle */
    331 	bus_size_t sc_ss;		/* bus space size */
    332 	bus_space_tag_t sc_iot;		/* I/O space tag */
    333 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    334 	bus_size_t sc_ios;		/* I/O space size */
    335 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    336 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    337 	bus_size_t sc_flashs;		/* flash registers space size */
    338 	off_t sc_flashreg_offset;	/*
    339 					 * offset to flash registers from
    340 					 * start of BAR
    341 					 */
    342 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    343 
    344 	struct ethercom sc_ethercom;	/* ethernet common data */
    345 	struct mii_data sc_mii;		/* MII/media information */
    346 
    347 	pci_chipset_tag_t sc_pc;
    348 	pcitag_t sc_pcitag;
    349 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    350 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    351 
    352 	uint16_t sc_pcidevid;		/* PCI device ID */
    353 	wm_chip_type sc_type;		/* MAC type */
    354 	int sc_rev;			/* MAC revision */
    355 	wm_phy_type sc_phytype;		/* PHY type */
    356 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    357 #define	WM_MEDIATYPE_UNKNOWN		0x00
    358 #define	WM_MEDIATYPE_FIBER		0x01
    359 #define	WM_MEDIATYPE_COPPER		0x02
    360 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    361 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    362 	int sc_flags;			/* flags; see below */
    363 	int sc_if_flags;		/* last if_flags */
    364 	int sc_flowflags;		/* 802.3x flow control flags */
    365 	int sc_align_tweak;
    366 
    367 	void *sc_ihs[WM_MAX_NINTR];	/*
    368 					 * interrupt cookie.
    369 					 * legacy and msi use sc_ihs[0].
    370 					 */
    371 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    372 	int sc_nintrs;			/* number of interrupts */
    373 
    374 	int sc_link_intr_idx;		/* index of MSI-X tables */
    375 
    376 	callout_t sc_tick_ch;		/* tick callout */
    377 	bool sc_stopping;
    378 
    379 	int sc_nvm_ver_major;
    380 	int sc_nvm_ver_minor;
    381 	int sc_nvm_ver_build;
    382 	int sc_nvm_addrbits;		/* NVM address bits */
    383 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    384 	int sc_ich8_flash_base;
    385 	int sc_ich8_flash_bank_size;
    386 	int sc_nvm_k1_enabled;
    387 
    388 	int sc_ntxqueues;
    389 	struct wm_txqueue *sc_txq;
    390 
    391 	int sc_nrxqueues;
    392 	struct wm_rxqueue *sc_rxq;
    393 
    394 #ifdef WM_EVENT_COUNTERS
    395 	/* Event counters. */
    396 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    397 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    398 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    399 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    400 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    401 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    402 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    403 
    404 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    405 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    406 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    407 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    408 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    409 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    410 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    411 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    412 
    413 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    414 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped(too many segs) */
    415 
    416 	struct evcnt sc_ev_tu;		/* Tx underrun */
    417 
    418 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    419 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    420 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    421 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    422 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    423 #endif /* WM_EVENT_COUNTERS */
    424 
    425 	/* This variable are used only on the 82547. */
    426 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    427 
    428 	uint32_t sc_ctrl;		/* prototype CTRL register */
    429 #if 0
    430 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    431 #endif
    432 	uint32_t sc_icr;		/* prototype interrupt bits */
    433 	uint32_t sc_itr;		/* prototype intr throttling reg */
    434 	uint32_t sc_tctl;		/* prototype TCTL register */
    435 	uint32_t sc_rctl;		/* prototype RCTL register */
    436 	uint32_t sc_txcw;		/* prototype TXCW register */
    437 	uint32_t sc_tipg;		/* prototype TIPG register */
    438 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    439 	uint32_t sc_pba;		/* prototype PBA register */
    440 
    441 	int sc_tbi_linkup;		/* TBI link status */
    442 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    443 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    444 
    445 	int sc_mchash_type;		/* multicast filter offset */
    446 
    447 	krndsource_t rnd_source;	/* random source */
    448 
    449 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    450 
    451 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    452 };
    453 
    454 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
    455 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
    456 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
    457 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
    458 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
    459 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
    460 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    461 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    462 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    463 
    464 #ifdef WM_MPSAFE
    465 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    466 #else
    467 #define CALLOUT_FLAGS	0
    468 #endif
    469 
    470 #define	WM_RXCHAIN_RESET(rxq)						\
    471 do {									\
    472 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    473 	*(rxq)->rxq_tailp = NULL;					\
    474 	(rxq)->rxq_len = 0;						\
    475 } while (/*CONSTCOND*/0)
    476 
    477 #define	WM_RXCHAIN_LINK(rxq, m)						\
    478 do {									\
    479 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    480 	(rxq)->rxq_tailp = &(m)->m_next;				\
    481 } while (/*CONSTCOND*/0)
    482 
    483 #ifdef WM_EVENT_COUNTERS
    484 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    485 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    486 #else
    487 #define	WM_EVCNT_INCR(ev)	/* nothing */
    488 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    489 #endif
    490 
    491 #define	CSR_READ(sc, reg)						\
    492 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    493 #define	CSR_WRITE(sc, reg, val)						\
    494 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    495 #define	CSR_WRITE_FLUSH(sc)						\
    496 	(void) CSR_READ((sc), WMREG_STATUS)
    497 
    498 #define ICH8_FLASH_READ32(sc, reg)					\
    499 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    500 	    (reg) + sc->sc_flashreg_offset)
    501 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    502 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    503 	    (reg) + sc->sc_flashreg_offset, (data))
    504 
    505 #define ICH8_FLASH_READ16(sc, reg)					\
    506 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    507 	    (reg) + sc->sc_flashreg_offset)
    508 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    509 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    510 	    (reg) + sc->sc_flashreg_offset, (data))
    511 
    512 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    513 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    514 
    515 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    516 #define	WM_CDTXADDR_HI(txq, x)						\
    517 	(sizeof(bus_addr_t) == 8 ?					\
    518 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    519 
    520 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    521 #define	WM_CDRXADDR_HI(rxq, x)						\
    522 	(sizeof(bus_addr_t) == 8 ?					\
    523 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    524 
    525 /*
    526  * Register read/write functions.
    527  * Other than CSR_{READ|WRITE}().
    528  */
    529 #if 0
    530 static inline uint32_t wm_io_read(struct wm_softc *, int);
    531 #endif
    532 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    533 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    534 	uint32_t, uint32_t);
    535 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    536 
    537 /*
    538  * Descriptor sync/init functions.
    539  */
    540 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    541 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    542 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    543 
    544 /*
    545  * Device driver interface functions and commonly used functions.
    546  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    547  */
    548 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    549 static int	wm_match(device_t, cfdata_t, void *);
    550 static void	wm_attach(device_t, device_t, void *);
    551 static int	wm_detach(device_t, int);
    552 static bool	wm_suspend(device_t, const pmf_qual_t *);
    553 static bool	wm_resume(device_t, const pmf_qual_t *);
    554 static void	wm_watchdog(struct ifnet *);
    555 static void	wm_tick(void *);
    556 static int	wm_ifflags_cb(struct ethercom *);
    557 static int	wm_ioctl(struct ifnet *, u_long, void *);
    558 /* MAC address related */
    559 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    560 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    561 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    562 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    563 static void	wm_set_filter(struct wm_softc *);
    564 /* Reset and init related */
    565 static void	wm_set_vlan(struct wm_softc *);
    566 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    567 static void	wm_get_auto_rd_done(struct wm_softc *);
    568 static void	wm_lan_init_done(struct wm_softc *);
    569 static void	wm_get_cfg_done(struct wm_softc *);
    570 static void	wm_initialize_hardware_bits(struct wm_softc *);
    571 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    572 static void	wm_reset(struct wm_softc *);
    573 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    574 static void	wm_rxdrain(struct wm_rxqueue *);
    575 static void	wm_rss_getkey(uint8_t *);
    576 static void	wm_init_rss(struct wm_softc *);
    577 static void	wm_adjust_qnum(struct wm_softc *, int);
    578 static int	wm_setup_legacy(struct wm_softc *);
    579 static int	wm_setup_msix(struct wm_softc *);
    580 static int	wm_init(struct ifnet *);
    581 static int	wm_init_locked(struct ifnet *);
    582 static void	wm_stop(struct ifnet *, int);
    583 static void	wm_stop_locked(struct ifnet *, int);
    584 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    585 static void	wm_82547_txfifo_stall(void *);
    586 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    587 /* DMA related */
    588 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    589 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    590 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    591 static void	wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
    592 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    593 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    594 static void	wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
    595 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    596 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    597 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    598 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    599 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    600 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    601 static void	wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
    602 static int	wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
    603 static int	wm_alloc_txrx_queues(struct wm_softc *);
    604 static void	wm_free_txrx_queues(struct wm_softc *);
    605 static int	wm_init_txrx_queues(struct wm_softc *);
    606 /* Start */
    607 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    608     uint32_t *, uint8_t *);
    609 static void	wm_start(struct ifnet *);
    610 static void	wm_start_locked(struct ifnet *);
    611 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    612     uint32_t *, uint32_t *, bool *);
    613 static void	wm_nq_start(struct ifnet *);
    614 static void	wm_nq_start_locked(struct ifnet *);
    615 /* Interrupt */
    616 static int	wm_txeof(struct wm_softc *);
    617 static void	wm_rxeof(struct wm_rxqueue *);
    618 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    619 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    620 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    621 static void	wm_linkintr(struct wm_softc *, uint32_t);
    622 static int	wm_intr_legacy(void *);
    623 static int	wm_txintr_msix(void *);
    624 static int	wm_rxintr_msix(void *);
    625 static int	wm_linkintr_msix(void *);
    626 
    627 /*
    628  * Media related.
    629  * GMII, SGMII, TBI, SERDES and SFP.
    630  */
    631 /* Common */
    632 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    633 /* GMII related */
    634 static void	wm_gmii_reset(struct wm_softc *);
    635 static int	wm_get_phy_id_82575(struct wm_softc *);
    636 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    637 static int	wm_gmii_mediachange(struct ifnet *);
    638 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    639 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    640 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    641 static int	wm_gmii_i82543_readreg(device_t, int, int);
    642 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    643 static int	wm_gmii_i82544_readreg(device_t, int, int);
    644 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    645 static int	wm_gmii_i80003_readreg(device_t, int, int);
    646 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    647 static int	wm_gmii_bm_readreg(device_t, int, int);
    648 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    649 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    650 static int	wm_gmii_hv_readreg(device_t, int, int);
    651 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    652 static int	wm_gmii_82580_readreg(device_t, int, int);
    653 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    654 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    655 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    656 static void	wm_gmii_statchg(struct ifnet *);
    657 static int	wm_kmrn_readreg(struct wm_softc *, int);
    658 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    659 /* SGMII */
    660 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    661 static int	wm_sgmii_readreg(device_t, int, int);
    662 static void	wm_sgmii_writereg(device_t, int, int, int);
    663 /* TBI related */
    664 static void	wm_tbi_mediainit(struct wm_softc *);
    665 static int	wm_tbi_mediachange(struct ifnet *);
    666 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    667 static int	wm_check_for_link(struct wm_softc *);
    668 static void	wm_tbi_tick(struct wm_softc *);
    669 /* SERDES related */
    670 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    671 static int	wm_serdes_mediachange(struct ifnet *);
    672 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    673 static void	wm_serdes_tick(struct wm_softc *);
    674 /* SFP related */
    675 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    676 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    677 
    678 /*
    679  * NVM related.
    680  * Microwire, SPI (w/wo EERD) and Flash.
    681  */
    682 /* Misc functions */
    683 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    684 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    685 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    686 /* Microwire */
    687 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    688 /* SPI */
    689 static int	wm_nvm_ready_spi(struct wm_softc *);
    690 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    691 /* Using with EERD */
    692 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    693 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    694 /* Flash */
    695 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    696     unsigned int *);
    697 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    698 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    699 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    700 	uint32_t *);
    701 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    702 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    703 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    704 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    705 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    706 /* iNVM */
    707 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    708 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    709 /* Lock, detecting NVM type, validate checksum and read */
    710 static int	wm_nvm_acquire(struct wm_softc *);
    711 static void	wm_nvm_release(struct wm_softc *);
    712 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    713 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    714 static int	wm_nvm_validate_checksum(struct wm_softc *);
    715 static void	wm_nvm_version_invm(struct wm_softc *);
    716 static void	wm_nvm_version(struct wm_softc *);
    717 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    718 
    719 /*
    720  * Hardware semaphores.
    721  * Very complexed...
    722  */
    723 static int	wm_get_swsm_semaphore(struct wm_softc *);
    724 static void	wm_put_swsm_semaphore(struct wm_softc *);
    725 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    726 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    727 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    728 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    729 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    730 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    731 
    732 /*
    733  * Management mode and power management related subroutines.
    734  * BMC, AMT, suspend/resume and EEE.
    735  */
    736 #ifdef WM_WOL
    737 static int	wm_check_mng_mode(struct wm_softc *);
    738 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    739 static int	wm_check_mng_mode_82574(struct wm_softc *);
    740 static int	wm_check_mng_mode_generic(struct wm_softc *);
    741 #endif
    742 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    743 static bool	wm_phy_resetisblocked(struct wm_softc *);
    744 static void	wm_get_hw_control(struct wm_softc *);
    745 static void	wm_release_hw_control(struct wm_softc *);
    746 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    747 static void	wm_smbustopci(struct wm_softc *);
    748 static void	wm_init_manageability(struct wm_softc *);
    749 static void	wm_release_manageability(struct wm_softc *);
    750 static void	wm_get_wakeup(struct wm_softc *);
    751 #ifdef WM_WOL
    752 static void	wm_enable_phy_wakeup(struct wm_softc *);
    753 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    754 static void	wm_enable_wakeup(struct wm_softc *);
    755 #endif
    756 /* LPLU (Low Power Link Up) */
    757 static void	wm_lplu_d0_disable(struct wm_softc *);
    758 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    759 /* EEE */
    760 static void	wm_set_eee_i350(struct wm_softc *);
    761 
    762 /*
    763  * Workarounds (mainly PHY related).
    764  * Basically, PHY's workarounds are in the PHY drivers.
    765  */
    766 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    767 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    768 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    769 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    770 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    771 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    772 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    773 static void	wm_reset_init_script_82575(struct wm_softc *);
    774 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    775 static void	wm_pll_workaround_i210(struct wm_softc *);
    776 
    777 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    778     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    779 
    780 /*
    781  * Devices supported by this driver.
    782  */
    783 static const struct wm_product {
    784 	pci_vendor_id_t		wmp_vendor;
    785 	pci_product_id_t	wmp_product;
    786 	const char		*wmp_name;
    787 	wm_chip_type		wmp_type;
    788 	uint32_t		wmp_flags;
    789 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    790 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    791 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    792 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    793 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    794 } wm_products[] = {
    795 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    796 	  "Intel i82542 1000BASE-X Ethernet",
    797 	  WM_T_82542_2_1,	WMP_F_FIBER },
    798 
    799 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    800 	  "Intel i82543GC 1000BASE-X Ethernet",
    801 	  WM_T_82543,		WMP_F_FIBER },
    802 
    803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    804 	  "Intel i82543GC 1000BASE-T Ethernet",
    805 	  WM_T_82543,		WMP_F_COPPER },
    806 
    807 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    808 	  "Intel i82544EI 1000BASE-T Ethernet",
    809 	  WM_T_82544,		WMP_F_COPPER },
    810 
    811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    812 	  "Intel i82544EI 1000BASE-X Ethernet",
    813 	  WM_T_82544,		WMP_F_FIBER },
    814 
    815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    816 	  "Intel i82544GC 1000BASE-T Ethernet",
    817 	  WM_T_82544,		WMP_F_COPPER },
    818 
    819 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    820 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    821 	  WM_T_82544,		WMP_F_COPPER },
    822 
    823 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    824 	  "Intel i82540EM 1000BASE-T Ethernet",
    825 	  WM_T_82540,		WMP_F_COPPER },
    826 
    827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    828 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    829 	  WM_T_82540,		WMP_F_COPPER },
    830 
    831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    832 	  "Intel i82540EP 1000BASE-T Ethernet",
    833 	  WM_T_82540,		WMP_F_COPPER },
    834 
    835 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    836 	  "Intel i82540EP 1000BASE-T Ethernet",
    837 	  WM_T_82540,		WMP_F_COPPER },
    838 
    839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    840 	  "Intel i82540EP 1000BASE-T Ethernet",
    841 	  WM_T_82540,		WMP_F_COPPER },
    842 
    843 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    844 	  "Intel i82545EM 1000BASE-T Ethernet",
    845 	  WM_T_82545,		WMP_F_COPPER },
    846 
    847 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    848 	  "Intel i82545GM 1000BASE-T Ethernet",
    849 	  WM_T_82545_3,		WMP_F_COPPER },
    850 
    851 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    852 	  "Intel i82545GM 1000BASE-X Ethernet",
    853 	  WM_T_82545_3,		WMP_F_FIBER },
    854 
    855 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    856 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    857 	  WM_T_82545_3,		WMP_F_SERDES },
    858 
    859 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    860 	  "Intel i82546EB 1000BASE-T Ethernet",
    861 	  WM_T_82546,		WMP_F_COPPER },
    862 
    863 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    864 	  "Intel i82546EB 1000BASE-T Ethernet",
    865 	  WM_T_82546,		WMP_F_COPPER },
    866 
    867 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    868 	  "Intel i82545EM 1000BASE-X Ethernet",
    869 	  WM_T_82545,		WMP_F_FIBER },
    870 
    871 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    872 	  "Intel i82546EB 1000BASE-X Ethernet",
    873 	  WM_T_82546,		WMP_F_FIBER },
    874 
    875 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    876 	  "Intel i82546GB 1000BASE-T Ethernet",
    877 	  WM_T_82546_3,		WMP_F_COPPER },
    878 
    879 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    880 	  "Intel i82546GB 1000BASE-X Ethernet",
    881 	  WM_T_82546_3,		WMP_F_FIBER },
    882 
    883 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    884 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    885 	  WM_T_82546_3,		WMP_F_SERDES },
    886 
    887 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    888 	  "i82546GB quad-port Gigabit Ethernet",
    889 	  WM_T_82546_3,		WMP_F_COPPER },
    890 
    891 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    892 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    893 	  WM_T_82546_3,		WMP_F_COPPER },
    894 
    895 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    896 	  "Intel PRO/1000MT (82546GB)",
    897 	  WM_T_82546_3,		WMP_F_COPPER },
    898 
    899 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    900 	  "Intel i82541EI 1000BASE-T Ethernet",
    901 	  WM_T_82541,		WMP_F_COPPER },
    902 
    903 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    904 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    905 	  WM_T_82541,		WMP_F_COPPER },
    906 
    907 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    908 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    909 	  WM_T_82541,		WMP_F_COPPER },
    910 
    911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    912 	  "Intel i82541ER 1000BASE-T Ethernet",
    913 	  WM_T_82541_2,		WMP_F_COPPER },
    914 
    915 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    916 	  "Intel i82541GI 1000BASE-T Ethernet",
    917 	  WM_T_82541_2,		WMP_F_COPPER },
    918 
    919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    920 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    921 	  WM_T_82541_2,		WMP_F_COPPER },
    922 
    923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    924 	  "Intel i82541PI 1000BASE-T Ethernet",
    925 	  WM_T_82541_2,		WMP_F_COPPER },
    926 
    927 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    928 	  "Intel i82547EI 1000BASE-T Ethernet",
    929 	  WM_T_82547,		WMP_F_COPPER },
    930 
    931 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    932 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    933 	  WM_T_82547,		WMP_F_COPPER },
    934 
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    936 	  "Intel i82547GI 1000BASE-T Ethernet",
    937 	  WM_T_82547_2,		WMP_F_COPPER },
    938 
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    940 	  "Intel PRO/1000 PT (82571EB)",
    941 	  WM_T_82571,		WMP_F_COPPER },
    942 
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    944 	  "Intel PRO/1000 PF (82571EB)",
    945 	  WM_T_82571,		WMP_F_FIBER },
    946 
    947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    948 	  "Intel PRO/1000 PB (82571EB)",
    949 	  WM_T_82571,		WMP_F_SERDES },
    950 
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    952 	  "Intel PRO/1000 QT (82571EB)",
    953 	  WM_T_82571,		WMP_F_COPPER },
    954 
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    956 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    957 	  WM_T_82571,		WMP_F_COPPER, },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    960 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    961 	  WM_T_82571,		WMP_F_COPPER, },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    964 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    965 	  WM_T_82571,		WMP_F_SERDES, },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    968 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    969 	  WM_T_82571,		WMP_F_SERDES, },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    972 	  "Intel 82571EB Quad 1000baseX Ethernet",
    973 	  WM_T_82571,		WMP_F_FIBER, },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    976 	  "Intel i82572EI 1000baseT Ethernet",
    977 	  WM_T_82572,		WMP_F_COPPER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    980 	  "Intel i82572EI 1000baseX Ethernet",
    981 	  WM_T_82572,		WMP_F_FIBER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    984 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    985 	  WM_T_82572,		WMP_F_SERDES },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    988 	  "Intel i82572EI 1000baseT Ethernet",
    989 	  WM_T_82572,		WMP_F_COPPER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    992 	  "Intel i82573E",
    993 	  WM_T_82573,		WMP_F_COPPER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    996 	  "Intel i82573E IAMT",
    997 	  WM_T_82573,		WMP_F_COPPER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1000 	  "Intel i82573L Gigabit Ethernet",
   1001 	  WM_T_82573,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1004 	  "Intel i82574L",
   1005 	  WM_T_82574,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1008 	  "Intel i82574L",
   1009 	  WM_T_82574,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1012 	  "Intel i82583V",
   1013 	  WM_T_82583,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1016 	  "i80003 dual 1000baseT Ethernet",
   1017 	  WM_T_80003,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1020 	  "i80003 dual 1000baseX Ethernet",
   1021 	  WM_T_80003,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1024 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1025 	  WM_T_80003,		WMP_F_SERDES },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1028 	  "Intel i80003 1000baseT Ethernet",
   1029 	  WM_T_80003,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1032 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1033 	  WM_T_80003,		WMP_F_SERDES },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1036 	  "Intel i82801H (M_AMT) LAN Controller",
   1037 	  WM_T_ICH8,		WMP_F_COPPER },
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1039 	  "Intel i82801H (AMT) LAN Controller",
   1040 	  WM_T_ICH8,		WMP_F_COPPER },
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1042 	  "Intel i82801H LAN Controller",
   1043 	  WM_T_ICH8,		WMP_F_COPPER },
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1045 	  "Intel i82801H (IFE) LAN Controller",
   1046 	  WM_T_ICH8,		WMP_F_COPPER },
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1048 	  "Intel i82801H (M) LAN Controller",
   1049 	  WM_T_ICH8,		WMP_F_COPPER },
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1051 	  "Intel i82801H IFE (GT) LAN Controller",
   1052 	  WM_T_ICH8,		WMP_F_COPPER },
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1054 	  "Intel i82801H IFE (G) LAN Controller",
   1055 	  WM_T_ICH8,		WMP_F_COPPER },
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1057 	  "82801I (AMT) LAN Controller",
   1058 	  WM_T_ICH9,		WMP_F_COPPER },
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1060 	  "82801I LAN Controller",
   1061 	  WM_T_ICH9,		WMP_F_COPPER },
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1063 	  "82801I (G) LAN Controller",
   1064 	  WM_T_ICH9,		WMP_F_COPPER },
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1066 	  "82801I (GT) LAN Controller",
   1067 	  WM_T_ICH9,		WMP_F_COPPER },
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1069 	  "82801I (C) LAN Controller",
   1070 	  WM_T_ICH9,		WMP_F_COPPER },
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1072 	  "82801I mobile LAN Controller",
   1073 	  WM_T_ICH9,		WMP_F_COPPER },
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1075 	  "82801I mobile (V) LAN Controller",
   1076 	  WM_T_ICH9,		WMP_F_COPPER },
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1078 	  "82801I mobile (AMT) LAN Controller",
   1079 	  WM_T_ICH9,		WMP_F_COPPER },
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1081 	  "82567LM-4 LAN Controller",
   1082 	  WM_T_ICH9,		WMP_F_COPPER },
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1084 	  "82567V-3 LAN Controller",
   1085 	  WM_T_ICH9,		WMP_F_COPPER },
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1087 	  "82567LM-2 LAN Controller",
   1088 	  WM_T_ICH10,		WMP_F_COPPER },
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1090 	  "82567LF-2 LAN Controller",
   1091 	  WM_T_ICH10,		WMP_F_COPPER },
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1093 	  "82567LM-3 LAN Controller",
   1094 	  WM_T_ICH10,		WMP_F_COPPER },
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1096 	  "82567LF-3 LAN Controller",
   1097 	  WM_T_ICH10,		WMP_F_COPPER },
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1099 	  "82567V-2 LAN Controller",
   1100 	  WM_T_ICH10,		WMP_F_COPPER },
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1102 	  "82567V-3? LAN Controller",
   1103 	  WM_T_ICH10,		WMP_F_COPPER },
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1105 	  "HANKSVILLE LAN Controller",
   1106 	  WM_T_ICH10,		WMP_F_COPPER },
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1108 	  "PCH LAN (82577LM) Controller",
   1109 	  WM_T_PCH,		WMP_F_COPPER },
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1111 	  "PCH LAN (82577LC) Controller",
   1112 	  WM_T_PCH,		WMP_F_COPPER },
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1114 	  "PCH LAN (82578DM) Controller",
   1115 	  WM_T_PCH,		WMP_F_COPPER },
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1117 	  "PCH LAN (82578DC) Controller",
   1118 	  WM_T_PCH,		WMP_F_COPPER },
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1120 	  "PCH2 LAN (82579LM) Controller",
   1121 	  WM_T_PCH2,		WMP_F_COPPER },
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1123 	  "PCH2 LAN (82579V) Controller",
   1124 	  WM_T_PCH2,		WMP_F_COPPER },
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1126 	  "82575EB dual-1000baseT Ethernet",
   1127 	  WM_T_82575,		WMP_F_COPPER },
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1129 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1130 	  WM_T_82575,		WMP_F_SERDES },
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1132 	  "82575GB quad-1000baseT Ethernet",
   1133 	  WM_T_82575,		WMP_F_COPPER },
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1135 	  "82575GB quad-1000baseT Ethernet (PM)",
   1136 	  WM_T_82575,		WMP_F_COPPER },
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1138 	  "82576 1000BaseT Ethernet",
   1139 	  WM_T_82576,		WMP_F_COPPER },
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1141 	  "82576 1000BaseX Ethernet",
   1142 	  WM_T_82576,		WMP_F_FIBER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1145 	  "82576 gigabit Ethernet (SERDES)",
   1146 	  WM_T_82576,		WMP_F_SERDES },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1149 	  "82576 quad-1000BaseT Ethernet",
   1150 	  WM_T_82576,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1153 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1154 	  WM_T_82576,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1157 	  "82576 gigabit Ethernet",
   1158 	  WM_T_82576,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1161 	  "82576 gigabit Ethernet (SERDES)",
   1162 	  WM_T_82576,		WMP_F_SERDES },
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1164 	  "82576 quad-gigabit Ethernet (SERDES)",
   1165 	  WM_T_82576,		WMP_F_SERDES },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1168 	  "82580 1000BaseT Ethernet",
   1169 	  WM_T_82580,		WMP_F_COPPER },
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1171 	  "82580 1000BaseX Ethernet",
   1172 	  WM_T_82580,		WMP_F_FIBER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1175 	  "82580 1000BaseT Ethernet (SERDES)",
   1176 	  WM_T_82580,		WMP_F_SERDES },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1179 	  "82580 gigabit Ethernet (SGMII)",
   1180 	  WM_T_82580,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1182 	  "82580 dual-1000BaseT Ethernet",
   1183 	  WM_T_82580,		WMP_F_COPPER },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1186 	  "82580 quad-1000BaseX Ethernet",
   1187 	  WM_T_82580,		WMP_F_FIBER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1190 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1191 	  WM_T_82580,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1194 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1195 	  WM_T_82580,		WMP_F_SERDES },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1198 	  "DH89XXCC 1000BASE-KX Ethernet",
   1199 	  WM_T_82580,		WMP_F_SERDES },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1202 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1203 	  WM_T_82580,		WMP_F_SERDES },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1206 	  "I350 Gigabit Network Connection",
   1207 	  WM_T_I350,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1210 	  "I350 Gigabit Fiber Network Connection",
   1211 	  WM_T_I350,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1214 	  "I350 Gigabit Backplane Connection",
   1215 	  WM_T_I350,		WMP_F_SERDES },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1218 	  "I350 Quad Port Gigabit Ethernet",
   1219 	  WM_T_I350,		WMP_F_SERDES },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1222 	  "I350 Gigabit Connection",
   1223 	  WM_T_I350,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1226 	  "I354 Gigabit Ethernet (KX)",
   1227 	  WM_T_I354,		WMP_F_SERDES },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1230 	  "I354 Gigabit Ethernet (SGMII)",
   1231 	  WM_T_I354,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1234 	  "I354 Gigabit Ethernet (2.5G)",
   1235 	  WM_T_I354,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1238 	  "I210-T1 Ethernet Server Adapter",
   1239 	  WM_T_I210,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1242 	  "I210 Ethernet (Copper OEM)",
   1243 	  WM_T_I210,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1246 	  "I210 Ethernet (Copper IT)",
   1247 	  WM_T_I210,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1250 	  "I210 Ethernet (FLASH less)",
   1251 	  WM_T_I210,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1254 	  "I210 Gigabit Ethernet (Fiber)",
   1255 	  WM_T_I210,		WMP_F_FIBER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1258 	  "I210 Gigabit Ethernet (SERDES)",
   1259 	  WM_T_I210,		WMP_F_SERDES },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1262 	  "I210 Gigabit Ethernet (FLASH less)",
   1263 	  WM_T_I210,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1266 	  "I210 Gigabit Ethernet (SGMII)",
   1267 	  WM_T_I210,		WMP_F_COPPER },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1270 	  "I211 Ethernet (COPPER)",
   1271 	  WM_T_I211,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1273 	  "I217 V Ethernet Connection",
   1274 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1276 	  "I217 LM Ethernet Connection",
   1277 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1279 	  "I218 V Ethernet Connection",
   1280 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1282 	  "I218 V Ethernet Connection",
   1283 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1285 	  "I218 V Ethernet Connection",
   1286 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1288 	  "I218 LM Ethernet Connection",
   1289 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1291 	  "I218 LM Ethernet Connection",
   1292 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1294 	  "I218 LM Ethernet Connection",
   1295 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1296 #if 0
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1298 	  "I219 V Ethernet Connection",
   1299 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1301 	  "I219 V Ethernet Connection",
   1302 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1304 	  "I219 LM Ethernet Connection",
   1305 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1307 	  "I219 LM Ethernet Connection",
   1308 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1309 #endif
   1310 	{ 0,			0,
   1311 	  NULL,
   1312 	  0,			0 },
   1313 };
   1314 
   1315 #ifdef WM_EVENT_COUNTERS
   1316 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1317 #endif /* WM_EVENT_COUNTERS */
   1318 
   1319 
   1320 /*
   1321  * Register read/write functions.
   1322  * Other than CSR_{READ|WRITE}().
   1323  */
   1324 
   1325 #if 0 /* Not currently used */
   1326 static inline uint32_t
   1327 wm_io_read(struct wm_softc *sc, int reg)
   1328 {
   1329 
   1330 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1331 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1332 }
   1333 #endif
   1334 
   1335 static inline void
   1336 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1337 {
   1338 
   1339 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1340 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1341 }
   1342 
   1343 static inline void
   1344 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1345     uint32_t data)
   1346 {
   1347 	uint32_t regval;
   1348 	int i;
   1349 
   1350 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1351 
   1352 	CSR_WRITE(sc, reg, regval);
   1353 
   1354 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1355 		delay(5);
   1356 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1357 			break;
   1358 	}
   1359 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1360 		aprint_error("%s: WARNING:"
   1361 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1362 		    device_xname(sc->sc_dev), reg);
   1363 	}
   1364 }
   1365 
   1366 static inline void
   1367 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1368 {
   1369 	wa->wa_low = htole32(v & 0xffffffffU);
   1370 	if (sizeof(bus_addr_t) == 8)
   1371 		wa->wa_high = htole32((uint64_t) v >> 32);
   1372 	else
   1373 		wa->wa_high = 0;
   1374 }
   1375 
   1376 /*
   1377  * Descriptor sync/init functions.
   1378  */
   1379 static inline void
   1380 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1381 {
   1382 	struct wm_softc *sc = txq->txq_sc;
   1383 
   1384 	/* If it will wrap around, sync to the end of the ring. */
   1385 	if ((start + num) > WM_NTXDESC(txq)) {
   1386 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1387 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1388 		    (WM_NTXDESC(txq) - start), ops);
   1389 		num -= (WM_NTXDESC(txq) - start);
   1390 		start = 0;
   1391 	}
   1392 
   1393 	/* Now sync whatever is left. */
   1394 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1395 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1396 }
   1397 
   1398 static inline void
   1399 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1400 {
   1401 	struct wm_softc *sc = rxq->rxq_sc;
   1402 
   1403 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1404 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1405 }
   1406 
   1407 static inline void
   1408 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1409 {
   1410 	struct wm_softc *sc = rxq->rxq_sc;
   1411 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1412 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1413 	struct mbuf *m = rxs->rxs_mbuf;
   1414 
   1415 	/*
   1416 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1417 	 * so that the payload after the Ethernet header is aligned
   1418 	 * to a 4-byte boundary.
   1419 
   1420 	 * XXX BRAINDAMAGE ALERT!
   1421 	 * The stupid chip uses the same size for every buffer, which
   1422 	 * is set in the Receive Control register.  We are using the 2K
   1423 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1424 	 * reason, we can't "scoot" packets longer than the standard
   1425 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1426 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1427 	 * the upper layer copy the headers.
   1428 	 */
   1429 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1430 
   1431 	wm_set_dma_addr(&rxd->wrx_addr,
   1432 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1433 	rxd->wrx_len = 0;
   1434 	rxd->wrx_cksum = 0;
   1435 	rxd->wrx_status = 0;
   1436 	rxd->wrx_errors = 0;
   1437 	rxd->wrx_special = 0;
   1438 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1439 
   1440 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1441 }
   1442 
   1443 /*
   1444  * Device driver interface functions and commonly used functions.
   1445  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1446  */
   1447 
   1448 /* Lookup supported device table */
   1449 static const struct wm_product *
   1450 wm_lookup(const struct pci_attach_args *pa)
   1451 {
   1452 	const struct wm_product *wmp;
   1453 
   1454 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1455 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1456 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1457 			return wmp;
   1458 	}
   1459 	return NULL;
   1460 }
   1461 
   1462 /* The match function (ca_match) */
   1463 static int
   1464 wm_match(device_t parent, cfdata_t cf, void *aux)
   1465 {
   1466 	struct pci_attach_args *pa = aux;
   1467 
   1468 	if (wm_lookup(pa) != NULL)
   1469 		return 1;
   1470 
   1471 	return 0;
   1472 }
   1473 
   1474 /* The attach function (ca_attach) */
   1475 static void
   1476 wm_attach(device_t parent, device_t self, void *aux)
   1477 {
   1478 	struct wm_softc *sc = device_private(self);
   1479 	struct pci_attach_args *pa = aux;
   1480 	prop_dictionary_t dict;
   1481 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1482 	pci_chipset_tag_t pc = pa->pa_pc;
   1483 	int counts[PCI_INTR_TYPE_SIZE];
   1484 	pci_intr_type_t max_type;
   1485 	const char *eetype, *xname;
   1486 	bus_space_tag_t memt;
   1487 	bus_space_handle_t memh;
   1488 	bus_size_t memsize;
   1489 	int memh_valid;
   1490 	int i, error;
   1491 	const struct wm_product *wmp;
   1492 	prop_data_t ea;
   1493 	prop_number_t pn;
   1494 	uint8_t enaddr[ETHER_ADDR_LEN];
   1495 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1496 	pcireg_t preg, memtype;
   1497 	uint16_t eeprom_data, apme_mask;
   1498 	bool force_clear_smbi;
   1499 	uint32_t link_mode;
   1500 	uint32_t reg;
   1501 
   1502 	sc->sc_dev = self;
   1503 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1504 	sc->sc_stopping = false;
   1505 
   1506 	wmp = wm_lookup(pa);
   1507 #ifdef DIAGNOSTIC
   1508 	if (wmp == NULL) {
   1509 		printf("\n");
   1510 		panic("wm_attach: impossible");
   1511 	}
   1512 #endif
   1513 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1514 
   1515 	sc->sc_pc = pa->pa_pc;
   1516 	sc->sc_pcitag = pa->pa_tag;
   1517 
   1518 	if (pci_dma64_available(pa))
   1519 		sc->sc_dmat = pa->pa_dmat64;
   1520 	else
   1521 		sc->sc_dmat = pa->pa_dmat;
   1522 
   1523 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1524 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1525 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1526 
   1527 	sc->sc_type = wmp->wmp_type;
   1528 	if (sc->sc_type < WM_T_82543) {
   1529 		if (sc->sc_rev < 2) {
   1530 			aprint_error_dev(sc->sc_dev,
   1531 			    "i82542 must be at least rev. 2\n");
   1532 			return;
   1533 		}
   1534 		if (sc->sc_rev < 3)
   1535 			sc->sc_type = WM_T_82542_2_0;
   1536 	}
   1537 
   1538 	/*
   1539 	 * Disable MSI for Errata:
   1540 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1541 	 *
   1542 	 *  82544: Errata 25
   1543 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1544 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1545 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1546 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1547 	 *
   1548 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1549 	 *
   1550 	 *  82571 & 82572: Errata 63
   1551 	 */
   1552 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1553 	    || (sc->sc_type == WM_T_82572))
   1554 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1555 
   1556 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1557 	    || (sc->sc_type == WM_T_82580)
   1558 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1559 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1560 		sc->sc_flags |= WM_F_NEWQUEUE;
   1561 
   1562 	/* Set device properties (mactype) */
   1563 	dict = device_properties(sc->sc_dev);
   1564 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1565 
   1566 	/*
   1567 	 * Map the device.  All devices support memory-mapped acccess,
   1568 	 * and it is really required for normal operation.
   1569 	 */
   1570 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1571 	switch (memtype) {
   1572 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1573 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1574 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1575 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1576 		break;
   1577 	default:
   1578 		memh_valid = 0;
   1579 		break;
   1580 	}
   1581 
   1582 	if (memh_valid) {
   1583 		sc->sc_st = memt;
   1584 		sc->sc_sh = memh;
   1585 		sc->sc_ss = memsize;
   1586 	} else {
   1587 		aprint_error_dev(sc->sc_dev,
   1588 		    "unable to map device registers\n");
   1589 		return;
   1590 	}
   1591 
   1592 	/*
   1593 	 * In addition, i82544 and later support I/O mapped indirect
   1594 	 * register access.  It is not desirable (nor supported in
   1595 	 * this driver) to use it for normal operation, though it is
   1596 	 * required to work around bugs in some chip versions.
   1597 	 */
   1598 	if (sc->sc_type >= WM_T_82544) {
   1599 		/* First we have to find the I/O BAR. */
   1600 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1601 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1602 			if (memtype == PCI_MAPREG_TYPE_IO)
   1603 				break;
   1604 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1605 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1606 				i += 4;	/* skip high bits, too */
   1607 		}
   1608 		if (i < PCI_MAPREG_END) {
   1609 			/*
   1610 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1611 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1612 			 * It's no problem because newer chips has no this
   1613 			 * bug.
   1614 			 *
   1615 			 * The i8254x doesn't apparently respond when the
   1616 			 * I/O BAR is 0, which looks somewhat like it's not
   1617 			 * been configured.
   1618 			 */
   1619 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1620 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1621 				aprint_error_dev(sc->sc_dev,
   1622 				    "WARNING: I/O BAR at zero.\n");
   1623 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1624 					0, &sc->sc_iot, &sc->sc_ioh,
   1625 					NULL, &sc->sc_ios) == 0) {
   1626 				sc->sc_flags |= WM_F_IOH_VALID;
   1627 			} else {
   1628 				aprint_error_dev(sc->sc_dev,
   1629 				    "WARNING: unable to map I/O space\n");
   1630 			}
   1631 		}
   1632 
   1633 	}
   1634 
   1635 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1636 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1637 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1638 	if (sc->sc_type < WM_T_82542_2_1)
   1639 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1640 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1641 
   1642 	/* power up chip */
   1643 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1644 	    NULL)) && error != EOPNOTSUPP) {
   1645 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1646 		return;
   1647 	}
   1648 
   1649 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1650 
   1651 	/* Allocation settings */
   1652 	max_type = PCI_INTR_TYPE_MSIX;
   1653 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   1654 	counts[PCI_INTR_TYPE_MSI] = 1;
   1655 	counts[PCI_INTR_TYPE_INTX] = 1;
   1656 
   1657 alloc_retry:
   1658 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1659 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1660 		return;
   1661 	}
   1662 
   1663 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1664 		error = wm_setup_msix(sc);
   1665 		if (error) {
   1666 			pci_intr_release(pc, sc->sc_intrs,
   1667 			    counts[PCI_INTR_TYPE_MSIX]);
   1668 
   1669 			/* Setup for MSI: Disable MSI-X */
   1670 			max_type = PCI_INTR_TYPE_MSI;
   1671 			counts[PCI_INTR_TYPE_MSI] = 1;
   1672 			counts[PCI_INTR_TYPE_INTX] = 1;
   1673 			goto alloc_retry;
   1674 		}
   1675 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1676 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1677 		error = wm_setup_legacy(sc);
   1678 		if (error) {
   1679 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1680 			    counts[PCI_INTR_TYPE_MSI]);
   1681 
   1682 			/* The next try is for INTx: Disable MSI */
   1683 			max_type = PCI_INTR_TYPE_INTX;
   1684 			counts[PCI_INTR_TYPE_INTX] = 1;
   1685 			goto alloc_retry;
   1686 		}
   1687 	} else {
   1688 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1689 		error = wm_setup_legacy(sc);
   1690 		if (error) {
   1691 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1692 			    counts[PCI_INTR_TYPE_INTX]);
   1693 			return;
   1694 		}
   1695 	}
   1696 
   1697 	/*
   1698 	 * Check the function ID (unit number of the chip).
   1699 	 */
   1700 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1701 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1702 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1703 	    || (sc->sc_type == WM_T_82580)
   1704 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1705 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1706 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1707 	else
   1708 		sc->sc_funcid = 0;
   1709 
   1710 	/*
   1711 	 * Determine a few things about the bus we're connected to.
   1712 	 */
   1713 	if (sc->sc_type < WM_T_82543) {
   1714 		/* We don't really know the bus characteristics here. */
   1715 		sc->sc_bus_speed = 33;
   1716 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1717 		/*
   1718 		 * CSA (Communication Streaming Architecture) is about as fast
   1719 		 * a 32-bit 66MHz PCI Bus.
   1720 		 */
   1721 		sc->sc_flags |= WM_F_CSA;
   1722 		sc->sc_bus_speed = 66;
   1723 		aprint_verbose_dev(sc->sc_dev,
   1724 		    "Communication Streaming Architecture\n");
   1725 		if (sc->sc_type == WM_T_82547) {
   1726 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1727 			callout_setfunc(&sc->sc_txfifo_ch,
   1728 					wm_82547_txfifo_stall, sc);
   1729 			aprint_verbose_dev(sc->sc_dev,
   1730 			    "using 82547 Tx FIFO stall work-around\n");
   1731 		}
   1732 	} else if (sc->sc_type >= WM_T_82571) {
   1733 		sc->sc_flags |= WM_F_PCIE;
   1734 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1735 		    && (sc->sc_type != WM_T_ICH10)
   1736 		    && (sc->sc_type != WM_T_PCH)
   1737 		    && (sc->sc_type != WM_T_PCH2)
   1738 		    && (sc->sc_type != WM_T_PCH_LPT)
   1739 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1740 			/* ICH* and PCH* have no PCIe capability registers */
   1741 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1742 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1743 				NULL) == 0)
   1744 				aprint_error_dev(sc->sc_dev,
   1745 				    "unable to find PCIe capability\n");
   1746 		}
   1747 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1748 	} else {
   1749 		reg = CSR_READ(sc, WMREG_STATUS);
   1750 		if (reg & STATUS_BUS64)
   1751 			sc->sc_flags |= WM_F_BUS64;
   1752 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1753 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1754 
   1755 			sc->sc_flags |= WM_F_PCIX;
   1756 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1757 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1758 				aprint_error_dev(sc->sc_dev,
   1759 				    "unable to find PCIX capability\n");
   1760 			else if (sc->sc_type != WM_T_82545_3 &&
   1761 				 sc->sc_type != WM_T_82546_3) {
   1762 				/*
   1763 				 * Work around a problem caused by the BIOS
   1764 				 * setting the max memory read byte count
   1765 				 * incorrectly.
   1766 				 */
   1767 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1768 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1769 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1770 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1771 
   1772 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1773 				    PCIX_CMD_BYTECNT_SHIFT;
   1774 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1775 				    PCIX_STATUS_MAXB_SHIFT;
   1776 				if (bytecnt > maxb) {
   1777 					aprint_verbose_dev(sc->sc_dev,
   1778 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1779 					    512 << bytecnt, 512 << maxb);
   1780 					pcix_cmd = (pcix_cmd &
   1781 					    ~PCIX_CMD_BYTECNT_MASK) |
   1782 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1783 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1784 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1785 					    pcix_cmd);
   1786 				}
   1787 			}
   1788 		}
   1789 		/*
   1790 		 * The quad port adapter is special; it has a PCIX-PCIX
   1791 		 * bridge on the board, and can run the secondary bus at
   1792 		 * a higher speed.
   1793 		 */
   1794 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1795 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1796 								      : 66;
   1797 		} else if (sc->sc_flags & WM_F_PCIX) {
   1798 			switch (reg & STATUS_PCIXSPD_MASK) {
   1799 			case STATUS_PCIXSPD_50_66:
   1800 				sc->sc_bus_speed = 66;
   1801 				break;
   1802 			case STATUS_PCIXSPD_66_100:
   1803 				sc->sc_bus_speed = 100;
   1804 				break;
   1805 			case STATUS_PCIXSPD_100_133:
   1806 				sc->sc_bus_speed = 133;
   1807 				break;
   1808 			default:
   1809 				aprint_error_dev(sc->sc_dev,
   1810 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1811 				    reg & STATUS_PCIXSPD_MASK);
   1812 				sc->sc_bus_speed = 66;
   1813 				break;
   1814 			}
   1815 		} else
   1816 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1817 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1818 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1819 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1820 	}
   1821 
   1822 	/* clear interesting stat counters */
   1823 	CSR_READ(sc, WMREG_COLC);
   1824 	CSR_READ(sc, WMREG_RXERRC);
   1825 
   1826 	/* get PHY control from SMBus to PCIe */
   1827 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1828 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1829 		wm_smbustopci(sc);
   1830 
   1831 	/* Reset the chip to a known state. */
   1832 	wm_reset(sc);
   1833 
   1834 	/* Get some information about the EEPROM. */
   1835 	switch (sc->sc_type) {
   1836 	case WM_T_82542_2_0:
   1837 	case WM_T_82542_2_1:
   1838 	case WM_T_82543:
   1839 	case WM_T_82544:
   1840 		/* Microwire */
   1841 		sc->sc_nvm_wordsize = 64;
   1842 		sc->sc_nvm_addrbits = 6;
   1843 		break;
   1844 	case WM_T_82540:
   1845 	case WM_T_82545:
   1846 	case WM_T_82545_3:
   1847 	case WM_T_82546:
   1848 	case WM_T_82546_3:
   1849 		/* Microwire */
   1850 		reg = CSR_READ(sc, WMREG_EECD);
   1851 		if (reg & EECD_EE_SIZE) {
   1852 			sc->sc_nvm_wordsize = 256;
   1853 			sc->sc_nvm_addrbits = 8;
   1854 		} else {
   1855 			sc->sc_nvm_wordsize = 64;
   1856 			sc->sc_nvm_addrbits = 6;
   1857 		}
   1858 		sc->sc_flags |= WM_F_LOCK_EECD;
   1859 		break;
   1860 	case WM_T_82541:
   1861 	case WM_T_82541_2:
   1862 	case WM_T_82547:
   1863 	case WM_T_82547_2:
   1864 		sc->sc_flags |= WM_F_LOCK_EECD;
   1865 		reg = CSR_READ(sc, WMREG_EECD);
   1866 		if (reg & EECD_EE_TYPE) {
   1867 			/* SPI */
   1868 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1869 			wm_nvm_set_addrbits_size_eecd(sc);
   1870 		} else {
   1871 			/* Microwire */
   1872 			if ((reg & EECD_EE_ABITS) != 0) {
   1873 				sc->sc_nvm_wordsize = 256;
   1874 				sc->sc_nvm_addrbits = 8;
   1875 			} else {
   1876 				sc->sc_nvm_wordsize = 64;
   1877 				sc->sc_nvm_addrbits = 6;
   1878 			}
   1879 		}
   1880 		break;
   1881 	case WM_T_82571:
   1882 	case WM_T_82572:
   1883 		/* SPI */
   1884 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1885 		wm_nvm_set_addrbits_size_eecd(sc);
   1886 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1887 		break;
   1888 	case WM_T_82573:
   1889 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1890 		/* FALLTHROUGH */
   1891 	case WM_T_82574:
   1892 	case WM_T_82583:
   1893 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1894 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1895 			sc->sc_nvm_wordsize = 2048;
   1896 		} else {
   1897 			/* SPI */
   1898 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1899 			wm_nvm_set_addrbits_size_eecd(sc);
   1900 		}
   1901 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1902 		break;
   1903 	case WM_T_82575:
   1904 	case WM_T_82576:
   1905 	case WM_T_82580:
   1906 	case WM_T_I350:
   1907 	case WM_T_I354:
   1908 	case WM_T_80003:
   1909 		/* SPI */
   1910 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1911 		wm_nvm_set_addrbits_size_eecd(sc);
   1912 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1913 		    | WM_F_LOCK_SWSM;
   1914 		break;
   1915 	case WM_T_ICH8:
   1916 	case WM_T_ICH9:
   1917 	case WM_T_ICH10:
   1918 	case WM_T_PCH:
   1919 	case WM_T_PCH2:
   1920 	case WM_T_PCH_LPT:
   1921 		/* FLASH */
   1922 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1923 		sc->sc_nvm_wordsize = 2048;
   1924 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   1925 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1926 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1927 			aprint_error_dev(sc->sc_dev,
   1928 			    "can't map FLASH registers\n");
   1929 			goto out;
   1930 		}
   1931 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1932 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1933 		    ICH_FLASH_SECTOR_SIZE;
   1934 		sc->sc_ich8_flash_bank_size =
   1935 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1936 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   1937 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1938 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1939 		sc->sc_flashreg_offset = 0;
   1940 		break;
   1941 	case WM_T_PCH_SPT:
   1942 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   1943 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1944 		sc->sc_flasht = sc->sc_st;
   1945 		sc->sc_flashh = sc->sc_sh;
   1946 		sc->sc_ich8_flash_base = 0;
   1947 		sc->sc_nvm_wordsize =
   1948 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   1949 			* NVM_SIZE_MULTIPLIER;
   1950 		/* It is size in bytes, we want words */
   1951 		sc->sc_nvm_wordsize /= 2;
   1952 		/* assume 2 banks */
   1953 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   1954 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   1955 		break;
   1956 	case WM_T_I210:
   1957 	case WM_T_I211:
   1958 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1959 			wm_nvm_set_addrbits_size_eecd(sc);
   1960 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1961 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1962 		} else {
   1963 			sc->sc_nvm_wordsize = INVM_SIZE;
   1964 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1965 			sc->sc_flags |= WM_F_LOCK_SWFW;
   1966 		}
   1967 		break;
   1968 	default:
   1969 		break;
   1970 	}
   1971 
   1972 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1973 	switch (sc->sc_type) {
   1974 	case WM_T_82571:
   1975 	case WM_T_82572:
   1976 		reg = CSR_READ(sc, WMREG_SWSM2);
   1977 		if ((reg & SWSM2_LOCK) == 0) {
   1978 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1979 			force_clear_smbi = true;
   1980 		} else
   1981 			force_clear_smbi = false;
   1982 		break;
   1983 	case WM_T_82573:
   1984 	case WM_T_82574:
   1985 	case WM_T_82583:
   1986 		force_clear_smbi = true;
   1987 		break;
   1988 	default:
   1989 		force_clear_smbi = false;
   1990 		break;
   1991 	}
   1992 	if (force_clear_smbi) {
   1993 		reg = CSR_READ(sc, WMREG_SWSM);
   1994 		if ((reg & SWSM_SMBI) != 0)
   1995 			aprint_error_dev(sc->sc_dev,
   1996 			    "Please update the Bootagent\n");
   1997 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1998 	}
   1999 
   2000 	/*
   2001 	 * Defer printing the EEPROM type until after verifying the checksum
   2002 	 * This allows the EEPROM type to be printed correctly in the case
   2003 	 * that no EEPROM is attached.
   2004 	 */
   2005 	/*
   2006 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2007 	 * this for later, so we can fail future reads from the EEPROM.
   2008 	 */
   2009 	if (wm_nvm_validate_checksum(sc)) {
   2010 		/*
   2011 		 * Read twice again because some PCI-e parts fail the
   2012 		 * first check due to the link being in sleep state.
   2013 		 */
   2014 		if (wm_nvm_validate_checksum(sc))
   2015 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2016 	}
   2017 
   2018 	/* Set device properties (macflags) */
   2019 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2020 
   2021 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2022 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2023 	else {
   2024 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2025 		    sc->sc_nvm_wordsize);
   2026 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2027 			aprint_verbose("iNVM");
   2028 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2029 			aprint_verbose("FLASH(HW)");
   2030 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2031 			aprint_verbose("FLASH");
   2032 		else {
   2033 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2034 				eetype = "SPI";
   2035 			else
   2036 				eetype = "MicroWire";
   2037 			aprint_verbose("(%d address bits) %s EEPROM",
   2038 			    sc->sc_nvm_addrbits, eetype);
   2039 		}
   2040 	}
   2041 	wm_nvm_version(sc);
   2042 	aprint_verbose("\n");
   2043 
   2044 	/* Check for I21[01] PLL workaround */
   2045 	if (sc->sc_type == WM_T_I210)
   2046 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2047 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2048 		/* NVM image release 3.25 has a workaround */
   2049 		if ((sc->sc_nvm_ver_major < 3)
   2050 		    || ((sc->sc_nvm_ver_major == 3)
   2051 			&& (sc->sc_nvm_ver_minor < 25))) {
   2052 			aprint_verbose_dev(sc->sc_dev,
   2053 			    "ROM image version %d.%d is older than 3.25\n",
   2054 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2055 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2056 		}
   2057 	}
   2058 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2059 		wm_pll_workaround_i210(sc);
   2060 
   2061 	wm_get_wakeup(sc);
   2062 	switch (sc->sc_type) {
   2063 	case WM_T_82571:
   2064 	case WM_T_82572:
   2065 	case WM_T_82573:
   2066 	case WM_T_82574:
   2067 	case WM_T_82583:
   2068 	case WM_T_80003:
   2069 	case WM_T_ICH8:
   2070 	case WM_T_ICH9:
   2071 	case WM_T_ICH10:
   2072 	case WM_T_PCH:
   2073 	case WM_T_PCH2:
   2074 	case WM_T_PCH_LPT:
   2075 	case WM_T_PCH_SPT:
   2076 		/* Non-AMT based hardware can now take control from firmware */
   2077 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2078 			wm_get_hw_control(sc);
   2079 		break;
   2080 	default:
   2081 		break;
   2082 	}
   2083 
   2084 	/*
   2085 	 * Read the Ethernet address from the EEPROM, if not first found
   2086 	 * in device properties.
   2087 	 */
   2088 	ea = prop_dictionary_get(dict, "mac-address");
   2089 	if (ea != NULL) {
   2090 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2091 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2092 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2093 	} else {
   2094 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2095 			aprint_error_dev(sc->sc_dev,
   2096 			    "unable to read Ethernet address\n");
   2097 			goto out;
   2098 		}
   2099 	}
   2100 
   2101 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2102 	    ether_sprintf(enaddr));
   2103 
   2104 	/*
   2105 	 * Read the config info from the EEPROM, and set up various
   2106 	 * bits in the control registers based on their contents.
   2107 	 */
   2108 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2109 	if (pn != NULL) {
   2110 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2111 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2112 	} else {
   2113 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2114 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2115 			goto out;
   2116 		}
   2117 	}
   2118 
   2119 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2120 	if (pn != NULL) {
   2121 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2122 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2123 	} else {
   2124 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2125 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2126 			goto out;
   2127 		}
   2128 	}
   2129 
   2130 	/* check for WM_F_WOL */
   2131 	switch (sc->sc_type) {
   2132 	case WM_T_82542_2_0:
   2133 	case WM_T_82542_2_1:
   2134 	case WM_T_82543:
   2135 		/* dummy? */
   2136 		eeprom_data = 0;
   2137 		apme_mask = NVM_CFG3_APME;
   2138 		break;
   2139 	case WM_T_82544:
   2140 		apme_mask = NVM_CFG2_82544_APM_EN;
   2141 		eeprom_data = cfg2;
   2142 		break;
   2143 	case WM_T_82546:
   2144 	case WM_T_82546_3:
   2145 	case WM_T_82571:
   2146 	case WM_T_82572:
   2147 	case WM_T_82573:
   2148 	case WM_T_82574:
   2149 	case WM_T_82583:
   2150 	case WM_T_80003:
   2151 	default:
   2152 		apme_mask = NVM_CFG3_APME;
   2153 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2154 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2155 		break;
   2156 	case WM_T_82575:
   2157 	case WM_T_82576:
   2158 	case WM_T_82580:
   2159 	case WM_T_I350:
   2160 	case WM_T_I354: /* XXX ok? */
   2161 	case WM_T_ICH8:
   2162 	case WM_T_ICH9:
   2163 	case WM_T_ICH10:
   2164 	case WM_T_PCH:
   2165 	case WM_T_PCH2:
   2166 	case WM_T_PCH_LPT:
   2167 	case WM_T_PCH_SPT:
   2168 		/* XXX The funcid should be checked on some devices */
   2169 		apme_mask = WUC_APME;
   2170 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2171 		break;
   2172 	}
   2173 
   2174 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2175 	if ((eeprom_data & apme_mask) != 0)
   2176 		sc->sc_flags |= WM_F_WOL;
   2177 #ifdef WM_DEBUG
   2178 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2179 		printf("WOL\n");
   2180 #endif
   2181 
   2182 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2183 		/* Check NVM for autonegotiation */
   2184 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2185 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2186 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2187 		}
   2188 	}
   2189 
   2190 	/*
   2191 	 * XXX need special handling for some multiple port cards
   2192 	 * to disable a paticular port.
   2193 	 */
   2194 
   2195 	if (sc->sc_type >= WM_T_82544) {
   2196 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2197 		if (pn != NULL) {
   2198 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2199 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2200 		} else {
   2201 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2202 				aprint_error_dev(sc->sc_dev,
   2203 				    "unable to read SWDPIN\n");
   2204 				goto out;
   2205 			}
   2206 		}
   2207 	}
   2208 
   2209 	if (cfg1 & NVM_CFG1_ILOS)
   2210 		sc->sc_ctrl |= CTRL_ILOS;
   2211 
   2212 	/*
   2213 	 * XXX
   2214 	 * This code isn't correct because pin 2 and 3 are located
   2215 	 * in different position on newer chips. Check all datasheet.
   2216 	 *
   2217 	 * Until resolve this problem, check if a chip < 82580
   2218 	 */
   2219 	if (sc->sc_type <= WM_T_82580) {
   2220 		if (sc->sc_type >= WM_T_82544) {
   2221 			sc->sc_ctrl |=
   2222 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2223 			    CTRL_SWDPIO_SHIFT;
   2224 			sc->sc_ctrl |=
   2225 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2226 			    CTRL_SWDPINS_SHIFT;
   2227 		} else {
   2228 			sc->sc_ctrl |=
   2229 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2230 			    CTRL_SWDPIO_SHIFT;
   2231 		}
   2232 	}
   2233 
   2234 	/* XXX For other than 82580? */
   2235 	if (sc->sc_type == WM_T_82580) {
   2236 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2237 		if (nvmword & __BIT(13))
   2238 			sc->sc_ctrl |= CTRL_ILOS;
   2239 	}
   2240 
   2241 #if 0
   2242 	if (sc->sc_type >= WM_T_82544) {
   2243 		if (cfg1 & NVM_CFG1_IPS0)
   2244 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2245 		if (cfg1 & NVM_CFG1_IPS1)
   2246 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2247 		sc->sc_ctrl_ext |=
   2248 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2249 		    CTRL_EXT_SWDPIO_SHIFT;
   2250 		sc->sc_ctrl_ext |=
   2251 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2252 		    CTRL_EXT_SWDPINS_SHIFT;
   2253 	} else {
   2254 		sc->sc_ctrl_ext |=
   2255 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2256 		    CTRL_EXT_SWDPIO_SHIFT;
   2257 	}
   2258 #endif
   2259 
   2260 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2261 #if 0
   2262 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2263 #endif
   2264 
   2265 	if (sc->sc_type == WM_T_PCH) {
   2266 		uint16_t val;
   2267 
   2268 		/* Save the NVM K1 bit setting */
   2269 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2270 
   2271 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2272 			sc->sc_nvm_k1_enabled = 1;
   2273 		else
   2274 			sc->sc_nvm_k1_enabled = 0;
   2275 	}
   2276 
   2277 	/*
   2278 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2279 	 * media structures accordingly.
   2280 	 */
   2281 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2282 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2283 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2284 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2285 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2286 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2287 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2288 	} else if (sc->sc_type < WM_T_82543 ||
   2289 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2290 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2291 			aprint_error_dev(sc->sc_dev,
   2292 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2293 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2294 		}
   2295 		wm_tbi_mediainit(sc);
   2296 	} else {
   2297 		switch (sc->sc_type) {
   2298 		case WM_T_82575:
   2299 		case WM_T_82576:
   2300 		case WM_T_82580:
   2301 		case WM_T_I350:
   2302 		case WM_T_I354:
   2303 		case WM_T_I210:
   2304 		case WM_T_I211:
   2305 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2306 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2307 			switch (link_mode) {
   2308 			case CTRL_EXT_LINK_MODE_1000KX:
   2309 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2310 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2311 				break;
   2312 			case CTRL_EXT_LINK_MODE_SGMII:
   2313 				if (wm_sgmii_uses_mdio(sc)) {
   2314 					aprint_verbose_dev(sc->sc_dev,
   2315 					    "SGMII(MDIO)\n");
   2316 					sc->sc_flags |= WM_F_SGMII;
   2317 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2318 					break;
   2319 				}
   2320 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2321 				/*FALLTHROUGH*/
   2322 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2323 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2324 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2325 					if (link_mode
   2326 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2327 						sc->sc_mediatype
   2328 						    = WM_MEDIATYPE_COPPER;
   2329 						sc->sc_flags |= WM_F_SGMII;
   2330 					} else {
   2331 						sc->sc_mediatype
   2332 						    = WM_MEDIATYPE_SERDES;
   2333 						aprint_verbose_dev(sc->sc_dev,
   2334 						    "SERDES\n");
   2335 					}
   2336 					break;
   2337 				}
   2338 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2339 					aprint_verbose_dev(sc->sc_dev,
   2340 					    "SERDES\n");
   2341 
   2342 				/* Change current link mode setting */
   2343 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2344 				switch (sc->sc_mediatype) {
   2345 				case WM_MEDIATYPE_COPPER:
   2346 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2347 					break;
   2348 				case WM_MEDIATYPE_SERDES:
   2349 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2350 					break;
   2351 				default:
   2352 					break;
   2353 				}
   2354 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2355 				break;
   2356 			case CTRL_EXT_LINK_MODE_GMII:
   2357 			default:
   2358 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2359 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2360 				break;
   2361 			}
   2362 
   2363 			reg &= ~CTRL_EXT_I2C_ENA;
   2364 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2365 				reg |= CTRL_EXT_I2C_ENA;
   2366 			else
   2367 				reg &= ~CTRL_EXT_I2C_ENA;
   2368 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2369 
   2370 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2371 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2372 			else
   2373 				wm_tbi_mediainit(sc);
   2374 			break;
   2375 		default:
   2376 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2377 				aprint_error_dev(sc->sc_dev,
   2378 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2379 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2380 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2381 		}
   2382 	}
   2383 
   2384 	ifp = &sc->sc_ethercom.ec_if;
   2385 	xname = device_xname(sc->sc_dev);
   2386 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2387 	ifp->if_softc = sc;
   2388 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2389 	ifp->if_ioctl = wm_ioctl;
   2390 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2391 		ifp->if_start = wm_nq_start;
   2392 	else
   2393 		ifp->if_start = wm_start;
   2394 	ifp->if_watchdog = wm_watchdog;
   2395 	ifp->if_init = wm_init;
   2396 	ifp->if_stop = wm_stop;
   2397 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2398 	IFQ_SET_READY(&ifp->if_snd);
   2399 
   2400 	/* Check for jumbo frame */
   2401 	switch (sc->sc_type) {
   2402 	case WM_T_82573:
   2403 		/* XXX limited to 9234 if ASPM is disabled */
   2404 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2405 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2406 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2407 		break;
   2408 	case WM_T_82571:
   2409 	case WM_T_82572:
   2410 	case WM_T_82574:
   2411 	case WM_T_82575:
   2412 	case WM_T_82576:
   2413 	case WM_T_82580:
   2414 	case WM_T_I350:
   2415 	case WM_T_I354: /* XXXX ok? */
   2416 	case WM_T_I210:
   2417 	case WM_T_I211:
   2418 	case WM_T_80003:
   2419 	case WM_T_ICH9:
   2420 	case WM_T_ICH10:
   2421 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2422 	case WM_T_PCH_LPT:
   2423 	case WM_T_PCH_SPT:
   2424 		/* XXX limited to 9234 */
   2425 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2426 		break;
   2427 	case WM_T_PCH:
   2428 		/* XXX limited to 4096 */
   2429 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2430 		break;
   2431 	case WM_T_82542_2_0:
   2432 	case WM_T_82542_2_1:
   2433 	case WM_T_82583:
   2434 	case WM_T_ICH8:
   2435 		/* No support for jumbo frame */
   2436 		break;
   2437 	default:
   2438 		/* ETHER_MAX_LEN_JUMBO */
   2439 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2440 		break;
   2441 	}
   2442 
   2443 	/* If we're a i82543 or greater, we can support VLANs. */
   2444 	if (sc->sc_type >= WM_T_82543)
   2445 		sc->sc_ethercom.ec_capabilities |=
   2446 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2447 
   2448 	/*
   2449 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2450 	 * on i82543 and later.
   2451 	 */
   2452 	if (sc->sc_type >= WM_T_82543) {
   2453 		ifp->if_capabilities |=
   2454 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2455 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2456 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2457 		    IFCAP_CSUM_TCPv6_Tx |
   2458 		    IFCAP_CSUM_UDPv6_Tx;
   2459 	}
   2460 
   2461 	/*
   2462 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2463 	 *
   2464 	 *	82541GI (8086:1076) ... no
   2465 	 *	82572EI (8086:10b9) ... yes
   2466 	 */
   2467 	if (sc->sc_type >= WM_T_82571) {
   2468 		ifp->if_capabilities |=
   2469 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2470 	}
   2471 
   2472 	/*
   2473 	 * If we're a i82544 or greater (except i82547), we can do
   2474 	 * TCP segmentation offload.
   2475 	 */
   2476 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2477 		ifp->if_capabilities |= IFCAP_TSOv4;
   2478 	}
   2479 
   2480 	if (sc->sc_type >= WM_T_82571) {
   2481 		ifp->if_capabilities |= IFCAP_TSOv6;
   2482 	}
   2483 
   2484 #ifdef WM_MPSAFE
   2485 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2486 #else
   2487 	sc->sc_core_lock = NULL;
   2488 #endif
   2489 
   2490 	/* Attach the interface. */
   2491 	if_initialize(ifp);
   2492 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2493 	ether_ifattach(ifp, enaddr);
   2494 	if_register(ifp);
   2495 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2496 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2497 			  RND_FLAG_DEFAULT);
   2498 
   2499 #ifdef WM_EVENT_COUNTERS
   2500 	/* Attach event counters. */
   2501 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2502 	    NULL, xname, "txsstall");
   2503 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2504 	    NULL, xname, "txdstall");
   2505 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2506 	    NULL, xname, "txfifo_stall");
   2507 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2508 	    NULL, xname, "txdw");
   2509 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2510 	    NULL, xname, "txqe");
   2511 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2512 	    NULL, xname, "rxintr");
   2513 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2514 	    NULL, xname, "linkintr");
   2515 
   2516 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2517 	    NULL, xname, "rxipsum");
   2518 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2519 	    NULL, xname, "rxtusum");
   2520 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2521 	    NULL, xname, "txipsum");
   2522 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2523 	    NULL, xname, "txtusum");
   2524 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2525 	    NULL, xname, "txtusum6");
   2526 
   2527 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2528 	    NULL, xname, "txtso");
   2529 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2530 	    NULL, xname, "txtso6");
   2531 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2532 	    NULL, xname, "txtsopain");
   2533 
   2534 	for (i = 0; i < WM_NTXSEGS; i++) {
   2535 		snprintf(wm_txseg_evcnt_names[i],
   2536 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2537 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2538 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2539 	}
   2540 
   2541 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2542 	    NULL, xname, "txdrop");
   2543 
   2544 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2545 	    NULL, xname, "tu");
   2546 
   2547 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2548 	    NULL, xname, "tx_xoff");
   2549 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2550 	    NULL, xname, "tx_xon");
   2551 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2552 	    NULL, xname, "rx_xoff");
   2553 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2554 	    NULL, xname, "rx_xon");
   2555 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2556 	    NULL, xname, "rx_macctl");
   2557 #endif /* WM_EVENT_COUNTERS */
   2558 
   2559 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2560 		pmf_class_network_register(self, ifp);
   2561 	else
   2562 		aprint_error_dev(self, "couldn't establish power handler\n");
   2563 
   2564 	sc->sc_flags |= WM_F_ATTACHED;
   2565  out:
   2566 	return;
   2567 }
   2568 
   2569 /* The detach function (ca_detach) */
   2570 static int
   2571 wm_detach(device_t self, int flags __unused)
   2572 {
   2573 	struct wm_softc *sc = device_private(self);
   2574 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2575 	int i;
   2576 #ifndef WM_MPSAFE
   2577 	int s;
   2578 #endif
   2579 
   2580 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2581 		return 0;
   2582 
   2583 #ifndef WM_MPSAFE
   2584 	s = splnet();
   2585 #endif
   2586 	/* Stop the interface. Callouts are stopped in it. */
   2587 	wm_stop(ifp, 1);
   2588 
   2589 #ifndef WM_MPSAFE
   2590 	splx(s);
   2591 #endif
   2592 
   2593 	pmf_device_deregister(self);
   2594 
   2595 	/* Tell the firmware about the release */
   2596 	WM_CORE_LOCK(sc);
   2597 	wm_release_manageability(sc);
   2598 	wm_release_hw_control(sc);
   2599 	WM_CORE_UNLOCK(sc);
   2600 
   2601 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2602 
   2603 	/* Delete all remaining media. */
   2604 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2605 
   2606 	ether_ifdetach(ifp);
   2607 	if_detach(ifp);
   2608 	if_percpuq_destroy(sc->sc_ipq);
   2609 
   2610 	/* Unload RX dmamaps and free mbufs */
   2611 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   2612 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   2613 		WM_RX_LOCK(rxq);
   2614 		wm_rxdrain(rxq);
   2615 		WM_RX_UNLOCK(rxq);
   2616 	}
   2617 	/* Must unlock here */
   2618 
   2619 	/* Disestablish the interrupt handler */
   2620 	for (i = 0; i < sc->sc_nintrs; i++) {
   2621 		if (sc->sc_ihs[i] != NULL) {
   2622 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2623 			sc->sc_ihs[i] = NULL;
   2624 		}
   2625 	}
   2626 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2627 
   2628 	wm_free_txrx_queues(sc);
   2629 
   2630 	/* Unmap the registers */
   2631 	if (sc->sc_ss) {
   2632 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2633 		sc->sc_ss = 0;
   2634 	}
   2635 	if (sc->sc_ios) {
   2636 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2637 		sc->sc_ios = 0;
   2638 	}
   2639 	if (sc->sc_flashs) {
   2640 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2641 		sc->sc_flashs = 0;
   2642 	}
   2643 
   2644 	if (sc->sc_core_lock)
   2645 		mutex_obj_free(sc->sc_core_lock);
   2646 
   2647 	return 0;
   2648 }
   2649 
   2650 static bool
   2651 wm_suspend(device_t self, const pmf_qual_t *qual)
   2652 {
   2653 	struct wm_softc *sc = device_private(self);
   2654 
   2655 	wm_release_manageability(sc);
   2656 	wm_release_hw_control(sc);
   2657 #ifdef WM_WOL
   2658 	wm_enable_wakeup(sc);
   2659 #endif
   2660 
   2661 	return true;
   2662 }
   2663 
   2664 static bool
   2665 wm_resume(device_t self, const pmf_qual_t *qual)
   2666 {
   2667 	struct wm_softc *sc = device_private(self);
   2668 
   2669 	wm_init_manageability(sc);
   2670 
   2671 	return true;
   2672 }
   2673 
   2674 /*
   2675  * wm_watchdog:		[ifnet interface function]
   2676  *
   2677  *	Watchdog timer handler.
   2678  */
   2679 static void
   2680 wm_watchdog(struct ifnet *ifp)
   2681 {
   2682 	struct wm_softc *sc = ifp->if_softc;
   2683 	struct wm_txqueue *txq = &sc->sc_txq[0];
   2684 
   2685 	/*
   2686 	 * Since we're using delayed interrupts, sweep up
   2687 	 * before we report an error.
   2688 	 */
   2689 	WM_TX_LOCK(txq);
   2690 	wm_txeof(sc);
   2691 	WM_TX_UNLOCK(txq);
   2692 
   2693 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2694 #ifdef WM_DEBUG
   2695 		int i, j;
   2696 		struct wm_txsoft *txs;
   2697 #endif
   2698 		log(LOG_ERR,
   2699 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2700 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2701 		    txq->txq_next);
   2702 		ifp->if_oerrors++;
   2703 #ifdef WM_DEBUG
   2704 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2705 		    i = WM_NEXTTXS(txq, i)) {
   2706 		    txs = &txq->txq_soft[i];
   2707 		    printf("txs %d tx %d -> %d\n",
   2708 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2709 		    for (j = txs->txs_firstdesc; ;
   2710 			j = WM_NEXTTX(txq, j)) {
   2711 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2712 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2713 			printf("\t %#08x%08x\n",
   2714 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2715 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2716 			if (j == txs->txs_lastdesc)
   2717 				break;
   2718 			}
   2719 		}
   2720 #endif
   2721 		/* Reset the interface. */
   2722 		(void) wm_init(ifp);
   2723 	}
   2724 
   2725 	/* Try to get more packets going. */
   2726 	ifp->if_start(ifp);
   2727 }
   2728 
   2729 /*
   2730  * wm_tick:
   2731  *
   2732  *	One second timer, used to check link status, sweep up
   2733  *	completed transmit jobs, etc.
   2734  */
   2735 static void
   2736 wm_tick(void *arg)
   2737 {
   2738 	struct wm_softc *sc = arg;
   2739 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2740 #ifndef WM_MPSAFE
   2741 	int s;
   2742 
   2743 	s = splnet();
   2744 #endif
   2745 
   2746 	WM_CORE_LOCK(sc);
   2747 
   2748 	if (sc->sc_stopping)
   2749 		goto out;
   2750 
   2751 	if (sc->sc_type >= WM_T_82542_2_1) {
   2752 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2753 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2754 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2755 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2756 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2757 	}
   2758 
   2759 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2760 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2761 	    + CSR_READ(sc, WMREG_CRCERRS)
   2762 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2763 	    + CSR_READ(sc, WMREG_SYMERRC)
   2764 	    + CSR_READ(sc, WMREG_RXERRC)
   2765 	    + CSR_READ(sc, WMREG_SEC)
   2766 	    + CSR_READ(sc, WMREG_CEXTERR)
   2767 	    + CSR_READ(sc, WMREG_RLEC);
   2768 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2769 
   2770 	if (sc->sc_flags & WM_F_HAS_MII)
   2771 		mii_tick(&sc->sc_mii);
   2772 	else if ((sc->sc_type >= WM_T_82575)
   2773 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2774 		wm_serdes_tick(sc);
   2775 	else
   2776 		wm_tbi_tick(sc);
   2777 
   2778 out:
   2779 	WM_CORE_UNLOCK(sc);
   2780 #ifndef WM_MPSAFE
   2781 	splx(s);
   2782 #endif
   2783 
   2784 	if (!sc->sc_stopping)
   2785 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2786 }
   2787 
   2788 static int
   2789 wm_ifflags_cb(struct ethercom *ec)
   2790 {
   2791 	struct ifnet *ifp = &ec->ec_if;
   2792 	struct wm_softc *sc = ifp->if_softc;
   2793 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2794 	int rc = 0;
   2795 
   2796 	WM_CORE_LOCK(sc);
   2797 
   2798 	if (change != 0)
   2799 		sc->sc_if_flags = ifp->if_flags;
   2800 
   2801 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2802 		rc = ENETRESET;
   2803 		goto out;
   2804 	}
   2805 
   2806 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2807 		wm_set_filter(sc);
   2808 
   2809 	wm_set_vlan(sc);
   2810 
   2811 out:
   2812 	WM_CORE_UNLOCK(sc);
   2813 
   2814 	return rc;
   2815 }
   2816 
   2817 /*
   2818  * wm_ioctl:		[ifnet interface function]
   2819  *
   2820  *	Handle control requests from the operator.
   2821  */
   2822 static int
   2823 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2824 {
   2825 	struct wm_softc *sc = ifp->if_softc;
   2826 	struct ifreq *ifr = (struct ifreq *) data;
   2827 	struct ifaddr *ifa = (struct ifaddr *)data;
   2828 	struct sockaddr_dl *sdl;
   2829 	int s, error;
   2830 
   2831 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2832 		device_xname(sc->sc_dev), __func__));
   2833 #ifndef WM_MPSAFE
   2834 	s = splnet();
   2835 #endif
   2836 	switch (cmd) {
   2837 	case SIOCSIFMEDIA:
   2838 	case SIOCGIFMEDIA:
   2839 		WM_CORE_LOCK(sc);
   2840 		/* Flow control requires full-duplex mode. */
   2841 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2842 		    (ifr->ifr_media & IFM_FDX) == 0)
   2843 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2844 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2845 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2846 				/* We can do both TXPAUSE and RXPAUSE. */
   2847 				ifr->ifr_media |=
   2848 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2849 			}
   2850 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2851 		}
   2852 		WM_CORE_UNLOCK(sc);
   2853 #ifdef WM_MPSAFE
   2854 		s = splnet();
   2855 #endif
   2856 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2857 #ifdef WM_MPSAFE
   2858 		splx(s);
   2859 #endif
   2860 		break;
   2861 	case SIOCINITIFADDR:
   2862 		WM_CORE_LOCK(sc);
   2863 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2864 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2865 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2866 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2867 			/* unicast address is first multicast entry */
   2868 			wm_set_filter(sc);
   2869 			error = 0;
   2870 			WM_CORE_UNLOCK(sc);
   2871 			break;
   2872 		}
   2873 		WM_CORE_UNLOCK(sc);
   2874 		/*FALLTHROUGH*/
   2875 	default:
   2876 #ifdef WM_MPSAFE
   2877 		s = splnet();
   2878 #endif
   2879 		/* It may call wm_start, so unlock here */
   2880 		error = ether_ioctl(ifp, cmd, data);
   2881 #ifdef WM_MPSAFE
   2882 		splx(s);
   2883 #endif
   2884 		if (error != ENETRESET)
   2885 			break;
   2886 
   2887 		error = 0;
   2888 
   2889 		if (cmd == SIOCSIFCAP) {
   2890 			error = (*ifp->if_init)(ifp);
   2891 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2892 			;
   2893 		else if (ifp->if_flags & IFF_RUNNING) {
   2894 			/*
   2895 			 * Multicast list has changed; set the hardware filter
   2896 			 * accordingly.
   2897 			 */
   2898 			WM_CORE_LOCK(sc);
   2899 			wm_set_filter(sc);
   2900 			WM_CORE_UNLOCK(sc);
   2901 		}
   2902 		break;
   2903 	}
   2904 
   2905 #ifndef WM_MPSAFE
   2906 	splx(s);
   2907 #endif
   2908 	return error;
   2909 }
   2910 
   2911 /* MAC address related */
   2912 
   2913 /*
   2914  * Get the offset of MAC address and return it.
   2915  * If error occured, use offset 0.
   2916  */
   2917 static uint16_t
   2918 wm_check_alt_mac_addr(struct wm_softc *sc)
   2919 {
   2920 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2921 	uint16_t offset = NVM_OFF_MACADDR;
   2922 
   2923 	/* Try to read alternative MAC address pointer */
   2924 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2925 		return 0;
   2926 
   2927 	/* Check pointer if it's valid or not. */
   2928 	if ((offset == 0x0000) || (offset == 0xffff))
   2929 		return 0;
   2930 
   2931 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2932 	/*
   2933 	 * Check whether alternative MAC address is valid or not.
   2934 	 * Some cards have non 0xffff pointer but those don't use
   2935 	 * alternative MAC address in reality.
   2936 	 *
   2937 	 * Check whether the broadcast bit is set or not.
   2938 	 */
   2939 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2940 		if (((myea[0] & 0xff) & 0x01) == 0)
   2941 			return offset; /* Found */
   2942 
   2943 	/* Not found */
   2944 	return 0;
   2945 }
   2946 
   2947 static int
   2948 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2949 {
   2950 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2951 	uint16_t offset = NVM_OFF_MACADDR;
   2952 	int do_invert = 0;
   2953 
   2954 	switch (sc->sc_type) {
   2955 	case WM_T_82580:
   2956 	case WM_T_I350:
   2957 	case WM_T_I354:
   2958 		/* EEPROM Top Level Partitioning */
   2959 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2960 		break;
   2961 	case WM_T_82571:
   2962 	case WM_T_82575:
   2963 	case WM_T_82576:
   2964 	case WM_T_80003:
   2965 	case WM_T_I210:
   2966 	case WM_T_I211:
   2967 		offset = wm_check_alt_mac_addr(sc);
   2968 		if (offset == 0)
   2969 			if ((sc->sc_funcid & 0x01) == 1)
   2970 				do_invert = 1;
   2971 		break;
   2972 	default:
   2973 		if ((sc->sc_funcid & 0x01) == 1)
   2974 			do_invert = 1;
   2975 		break;
   2976 	}
   2977 
   2978 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2979 		myea) != 0)
   2980 		goto bad;
   2981 
   2982 	enaddr[0] = myea[0] & 0xff;
   2983 	enaddr[1] = myea[0] >> 8;
   2984 	enaddr[2] = myea[1] & 0xff;
   2985 	enaddr[3] = myea[1] >> 8;
   2986 	enaddr[4] = myea[2] & 0xff;
   2987 	enaddr[5] = myea[2] >> 8;
   2988 
   2989 	/*
   2990 	 * Toggle the LSB of the MAC address on the second port
   2991 	 * of some dual port cards.
   2992 	 */
   2993 	if (do_invert != 0)
   2994 		enaddr[5] ^= 1;
   2995 
   2996 	return 0;
   2997 
   2998  bad:
   2999 	return -1;
   3000 }
   3001 
   3002 /*
   3003  * wm_set_ral:
   3004  *
   3005  *	Set an entery in the receive address list.
   3006  */
   3007 static void
   3008 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3009 {
   3010 	uint32_t ral_lo, ral_hi;
   3011 
   3012 	if (enaddr != NULL) {
   3013 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3014 		    (enaddr[3] << 24);
   3015 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3016 		ral_hi |= RAL_AV;
   3017 	} else {
   3018 		ral_lo = 0;
   3019 		ral_hi = 0;
   3020 	}
   3021 
   3022 	if (sc->sc_type >= WM_T_82544) {
   3023 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3024 		    ral_lo);
   3025 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3026 		    ral_hi);
   3027 	} else {
   3028 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3029 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3030 	}
   3031 }
   3032 
   3033 /*
   3034  * wm_mchash:
   3035  *
   3036  *	Compute the hash of the multicast address for the 4096-bit
   3037  *	multicast filter.
   3038  */
   3039 static uint32_t
   3040 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3041 {
   3042 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3043 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3044 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3045 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3046 	uint32_t hash;
   3047 
   3048 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3049 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3050 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3051 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3052 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3053 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3054 		return (hash & 0x3ff);
   3055 	}
   3056 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3057 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3058 
   3059 	return (hash & 0xfff);
   3060 }
   3061 
   3062 /*
   3063  * wm_set_filter:
   3064  *
   3065  *	Set up the receive filter.
   3066  */
   3067 static void
   3068 wm_set_filter(struct wm_softc *sc)
   3069 {
   3070 	struct ethercom *ec = &sc->sc_ethercom;
   3071 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3072 	struct ether_multi *enm;
   3073 	struct ether_multistep step;
   3074 	bus_addr_t mta_reg;
   3075 	uint32_t hash, reg, bit;
   3076 	int i, size, ralmax;
   3077 
   3078 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3079 		device_xname(sc->sc_dev), __func__));
   3080 	if (sc->sc_type >= WM_T_82544)
   3081 		mta_reg = WMREG_CORDOVA_MTA;
   3082 	else
   3083 		mta_reg = WMREG_MTA;
   3084 
   3085 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3086 
   3087 	if (ifp->if_flags & IFF_BROADCAST)
   3088 		sc->sc_rctl |= RCTL_BAM;
   3089 	if (ifp->if_flags & IFF_PROMISC) {
   3090 		sc->sc_rctl |= RCTL_UPE;
   3091 		goto allmulti;
   3092 	}
   3093 
   3094 	/*
   3095 	 * Set the station address in the first RAL slot, and
   3096 	 * clear the remaining slots.
   3097 	 */
   3098 	if (sc->sc_type == WM_T_ICH8)
   3099 		size = WM_RAL_TABSIZE_ICH8 -1;
   3100 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3101 	    || (sc->sc_type == WM_T_PCH))
   3102 		size = WM_RAL_TABSIZE_ICH8;
   3103 	else if (sc->sc_type == WM_T_PCH2)
   3104 		size = WM_RAL_TABSIZE_PCH2;
   3105 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3106 		size = WM_RAL_TABSIZE_PCH_LPT;
   3107 	else if (sc->sc_type == WM_T_82575)
   3108 		size = WM_RAL_TABSIZE_82575;
   3109 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3110 		size = WM_RAL_TABSIZE_82576;
   3111 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3112 		size = WM_RAL_TABSIZE_I350;
   3113 	else
   3114 		size = WM_RAL_TABSIZE;
   3115 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3116 
   3117 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3118 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3119 		switch (i) {
   3120 		case 0:
   3121 			/* We can use all entries */
   3122 			ralmax = size;
   3123 			break;
   3124 		case 1:
   3125 			/* Only RAR[0] */
   3126 			ralmax = 1;
   3127 			break;
   3128 		default:
   3129 			/* available SHRA + RAR[0] */
   3130 			ralmax = i + 1;
   3131 		}
   3132 	} else
   3133 		ralmax = size;
   3134 	for (i = 1; i < size; i++) {
   3135 		if (i < ralmax)
   3136 			wm_set_ral(sc, NULL, i);
   3137 	}
   3138 
   3139 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3140 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3141 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3142 	    || (sc->sc_type == WM_T_PCH_SPT))
   3143 		size = WM_ICH8_MC_TABSIZE;
   3144 	else
   3145 		size = WM_MC_TABSIZE;
   3146 	/* Clear out the multicast table. */
   3147 	for (i = 0; i < size; i++)
   3148 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3149 
   3150 	ETHER_FIRST_MULTI(step, ec, enm);
   3151 	while (enm != NULL) {
   3152 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3153 			/*
   3154 			 * We must listen to a range of multicast addresses.
   3155 			 * For now, just accept all multicasts, rather than
   3156 			 * trying to set only those filter bits needed to match
   3157 			 * the range.  (At this time, the only use of address
   3158 			 * ranges is for IP multicast routing, for which the
   3159 			 * range is big enough to require all bits set.)
   3160 			 */
   3161 			goto allmulti;
   3162 		}
   3163 
   3164 		hash = wm_mchash(sc, enm->enm_addrlo);
   3165 
   3166 		reg = (hash >> 5);
   3167 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3168 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3169 		    || (sc->sc_type == WM_T_PCH2)
   3170 		    || (sc->sc_type == WM_T_PCH_LPT)
   3171 		    || (sc->sc_type == WM_T_PCH_SPT))
   3172 			reg &= 0x1f;
   3173 		else
   3174 			reg &= 0x7f;
   3175 		bit = hash & 0x1f;
   3176 
   3177 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3178 		hash |= 1U << bit;
   3179 
   3180 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3181 			/*
   3182 			 * 82544 Errata 9: Certain register cannot be written
   3183 			 * with particular alignments in PCI-X bus operation
   3184 			 * (FCAH, MTA and VFTA).
   3185 			 */
   3186 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3187 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3188 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3189 		} else
   3190 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3191 
   3192 		ETHER_NEXT_MULTI(step, enm);
   3193 	}
   3194 
   3195 	ifp->if_flags &= ~IFF_ALLMULTI;
   3196 	goto setit;
   3197 
   3198  allmulti:
   3199 	ifp->if_flags |= IFF_ALLMULTI;
   3200 	sc->sc_rctl |= RCTL_MPE;
   3201 
   3202  setit:
   3203 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3204 }
   3205 
   3206 /* Reset and init related */
   3207 
   3208 static void
   3209 wm_set_vlan(struct wm_softc *sc)
   3210 {
   3211 
   3212 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3213 		device_xname(sc->sc_dev), __func__));
   3214 	/* Deal with VLAN enables. */
   3215 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3216 		sc->sc_ctrl |= CTRL_VME;
   3217 	else
   3218 		sc->sc_ctrl &= ~CTRL_VME;
   3219 
   3220 	/* Write the control registers. */
   3221 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3222 }
   3223 
   3224 static void
   3225 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3226 {
   3227 	uint32_t gcr;
   3228 	pcireg_t ctrl2;
   3229 
   3230 	gcr = CSR_READ(sc, WMREG_GCR);
   3231 
   3232 	/* Only take action if timeout value is defaulted to 0 */
   3233 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3234 		goto out;
   3235 
   3236 	if ((gcr & GCR_CAP_VER2) == 0) {
   3237 		gcr |= GCR_CMPL_TMOUT_10MS;
   3238 		goto out;
   3239 	}
   3240 
   3241 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3242 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3243 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3244 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3245 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3246 
   3247 out:
   3248 	/* Disable completion timeout resend */
   3249 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3250 
   3251 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3252 }
   3253 
   3254 void
   3255 wm_get_auto_rd_done(struct wm_softc *sc)
   3256 {
   3257 	int i;
   3258 
   3259 	/* wait for eeprom to reload */
   3260 	switch (sc->sc_type) {
   3261 	case WM_T_82571:
   3262 	case WM_T_82572:
   3263 	case WM_T_82573:
   3264 	case WM_T_82574:
   3265 	case WM_T_82583:
   3266 	case WM_T_82575:
   3267 	case WM_T_82576:
   3268 	case WM_T_82580:
   3269 	case WM_T_I350:
   3270 	case WM_T_I354:
   3271 	case WM_T_I210:
   3272 	case WM_T_I211:
   3273 	case WM_T_80003:
   3274 	case WM_T_ICH8:
   3275 	case WM_T_ICH9:
   3276 		for (i = 0; i < 10; i++) {
   3277 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3278 				break;
   3279 			delay(1000);
   3280 		}
   3281 		if (i == 10) {
   3282 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3283 			    "complete\n", device_xname(sc->sc_dev));
   3284 		}
   3285 		break;
   3286 	default:
   3287 		break;
   3288 	}
   3289 }
   3290 
   3291 void
   3292 wm_lan_init_done(struct wm_softc *sc)
   3293 {
   3294 	uint32_t reg = 0;
   3295 	int i;
   3296 
   3297 	/* wait for eeprom to reload */
   3298 	switch (sc->sc_type) {
   3299 	case WM_T_ICH10:
   3300 	case WM_T_PCH:
   3301 	case WM_T_PCH2:
   3302 	case WM_T_PCH_LPT:
   3303 	case WM_T_PCH_SPT:
   3304 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3305 			reg = CSR_READ(sc, WMREG_STATUS);
   3306 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3307 				break;
   3308 			delay(100);
   3309 		}
   3310 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3311 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3312 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3313 		}
   3314 		break;
   3315 	default:
   3316 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3317 		    __func__);
   3318 		break;
   3319 	}
   3320 
   3321 	reg &= ~STATUS_LAN_INIT_DONE;
   3322 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3323 }
   3324 
   3325 void
   3326 wm_get_cfg_done(struct wm_softc *sc)
   3327 {
   3328 	int mask;
   3329 	uint32_t reg;
   3330 	int i;
   3331 
   3332 	/* wait for eeprom to reload */
   3333 	switch (sc->sc_type) {
   3334 	case WM_T_82542_2_0:
   3335 	case WM_T_82542_2_1:
   3336 		/* null */
   3337 		break;
   3338 	case WM_T_82543:
   3339 	case WM_T_82544:
   3340 	case WM_T_82540:
   3341 	case WM_T_82545:
   3342 	case WM_T_82545_3:
   3343 	case WM_T_82546:
   3344 	case WM_T_82546_3:
   3345 	case WM_T_82541:
   3346 	case WM_T_82541_2:
   3347 	case WM_T_82547:
   3348 	case WM_T_82547_2:
   3349 	case WM_T_82573:
   3350 	case WM_T_82574:
   3351 	case WM_T_82583:
   3352 		/* generic */
   3353 		delay(10*1000);
   3354 		break;
   3355 	case WM_T_80003:
   3356 	case WM_T_82571:
   3357 	case WM_T_82572:
   3358 	case WM_T_82575:
   3359 	case WM_T_82576:
   3360 	case WM_T_82580:
   3361 	case WM_T_I350:
   3362 	case WM_T_I354:
   3363 	case WM_T_I210:
   3364 	case WM_T_I211:
   3365 		if (sc->sc_type == WM_T_82571) {
   3366 			/* Only 82571 shares port 0 */
   3367 			mask = EEMNGCTL_CFGDONE_0;
   3368 		} else
   3369 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3370 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3371 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3372 				break;
   3373 			delay(1000);
   3374 		}
   3375 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3376 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3377 				device_xname(sc->sc_dev), __func__));
   3378 		}
   3379 		break;
   3380 	case WM_T_ICH8:
   3381 	case WM_T_ICH9:
   3382 	case WM_T_ICH10:
   3383 	case WM_T_PCH:
   3384 	case WM_T_PCH2:
   3385 	case WM_T_PCH_LPT:
   3386 	case WM_T_PCH_SPT:
   3387 		delay(10*1000);
   3388 		if (sc->sc_type >= WM_T_ICH10)
   3389 			wm_lan_init_done(sc);
   3390 		else
   3391 			wm_get_auto_rd_done(sc);
   3392 
   3393 		reg = CSR_READ(sc, WMREG_STATUS);
   3394 		if ((reg & STATUS_PHYRA) != 0)
   3395 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3396 		break;
   3397 	default:
   3398 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3399 		    __func__);
   3400 		break;
   3401 	}
   3402 }
   3403 
   3404 /* Init hardware bits */
   3405 void
   3406 wm_initialize_hardware_bits(struct wm_softc *sc)
   3407 {
   3408 	uint32_t tarc0, tarc1, reg;
   3409 
   3410 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3411 		device_xname(sc->sc_dev), __func__));
   3412 	/* For 82571 variant, 80003 and ICHs */
   3413 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3414 	    || (sc->sc_type >= WM_T_80003)) {
   3415 
   3416 		/* Transmit Descriptor Control 0 */
   3417 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3418 		reg |= TXDCTL_COUNT_DESC;
   3419 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3420 
   3421 		/* Transmit Descriptor Control 1 */
   3422 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3423 		reg |= TXDCTL_COUNT_DESC;
   3424 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3425 
   3426 		/* TARC0 */
   3427 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3428 		switch (sc->sc_type) {
   3429 		case WM_T_82571:
   3430 		case WM_T_82572:
   3431 		case WM_T_82573:
   3432 		case WM_T_82574:
   3433 		case WM_T_82583:
   3434 		case WM_T_80003:
   3435 			/* Clear bits 30..27 */
   3436 			tarc0 &= ~__BITS(30, 27);
   3437 			break;
   3438 		default:
   3439 			break;
   3440 		}
   3441 
   3442 		switch (sc->sc_type) {
   3443 		case WM_T_82571:
   3444 		case WM_T_82572:
   3445 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3446 
   3447 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3448 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3449 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3450 			/* 8257[12] Errata No.7 */
   3451 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3452 
   3453 			/* TARC1 bit 28 */
   3454 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3455 				tarc1 &= ~__BIT(28);
   3456 			else
   3457 				tarc1 |= __BIT(28);
   3458 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3459 
   3460 			/*
   3461 			 * 8257[12] Errata No.13
   3462 			 * Disable Dyamic Clock Gating.
   3463 			 */
   3464 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3465 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3466 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3467 			break;
   3468 		case WM_T_82573:
   3469 		case WM_T_82574:
   3470 		case WM_T_82583:
   3471 			if ((sc->sc_type == WM_T_82574)
   3472 			    || (sc->sc_type == WM_T_82583))
   3473 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3474 
   3475 			/* Extended Device Control */
   3476 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3477 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3478 			reg |= __BIT(22);	/* Set bit 22 */
   3479 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3480 
   3481 			/* Device Control */
   3482 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3483 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3484 
   3485 			/* PCIe Control Register */
   3486 			/*
   3487 			 * 82573 Errata (unknown).
   3488 			 *
   3489 			 * 82574 Errata 25 and 82583 Errata 12
   3490 			 * "Dropped Rx Packets":
   3491 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3492 			 */
   3493 			reg = CSR_READ(sc, WMREG_GCR);
   3494 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3495 			CSR_WRITE(sc, WMREG_GCR, reg);
   3496 
   3497 			if ((sc->sc_type == WM_T_82574)
   3498 			    || (sc->sc_type == WM_T_82583)) {
   3499 				/*
   3500 				 * Document says this bit must be set for
   3501 				 * proper operation.
   3502 				 */
   3503 				reg = CSR_READ(sc, WMREG_GCR);
   3504 				reg |= __BIT(22);
   3505 				CSR_WRITE(sc, WMREG_GCR, reg);
   3506 
   3507 				/*
   3508 				 * Apply workaround for hardware errata
   3509 				 * documented in errata docs Fixes issue where
   3510 				 * some error prone or unreliable PCIe
   3511 				 * completions are occurring, particularly
   3512 				 * with ASPM enabled. Without fix, issue can
   3513 				 * cause Tx timeouts.
   3514 				 */
   3515 				reg = CSR_READ(sc, WMREG_GCR2);
   3516 				reg |= __BIT(0);
   3517 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3518 			}
   3519 			break;
   3520 		case WM_T_80003:
   3521 			/* TARC0 */
   3522 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3523 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3524 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3525 
   3526 			/* TARC1 bit 28 */
   3527 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3528 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3529 				tarc1 &= ~__BIT(28);
   3530 			else
   3531 				tarc1 |= __BIT(28);
   3532 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3533 			break;
   3534 		case WM_T_ICH8:
   3535 		case WM_T_ICH9:
   3536 		case WM_T_ICH10:
   3537 		case WM_T_PCH:
   3538 		case WM_T_PCH2:
   3539 		case WM_T_PCH_LPT:
   3540 		case WM_T_PCH_SPT:
   3541 			/* TARC0 */
   3542 			if ((sc->sc_type == WM_T_ICH8)
   3543 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3544 				/* Set TARC0 bits 29 and 28 */
   3545 				tarc0 |= __BITS(29, 28);
   3546 			}
   3547 			/* Set TARC0 bits 23,24,26,27 */
   3548 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3549 
   3550 			/* CTRL_EXT */
   3551 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3552 			reg |= __BIT(22);	/* Set bit 22 */
   3553 			/*
   3554 			 * Enable PHY low-power state when MAC is at D3
   3555 			 * w/o WoL
   3556 			 */
   3557 			if (sc->sc_type >= WM_T_PCH)
   3558 				reg |= CTRL_EXT_PHYPDEN;
   3559 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3560 
   3561 			/* TARC1 */
   3562 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3563 			/* bit 28 */
   3564 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3565 				tarc1 &= ~__BIT(28);
   3566 			else
   3567 				tarc1 |= __BIT(28);
   3568 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3569 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3570 
   3571 			/* Device Status */
   3572 			if (sc->sc_type == WM_T_ICH8) {
   3573 				reg = CSR_READ(sc, WMREG_STATUS);
   3574 				reg &= ~__BIT(31);
   3575 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3576 
   3577 			}
   3578 
   3579 			/* IOSFPC */
   3580 			if (sc->sc_type == WM_T_PCH_SPT) {
   3581 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3582 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3583 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3584 			}
   3585 			/*
   3586 			 * Work-around descriptor data corruption issue during
   3587 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3588 			 * capability.
   3589 			 */
   3590 			reg = CSR_READ(sc, WMREG_RFCTL);
   3591 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3592 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3593 			break;
   3594 		default:
   3595 			break;
   3596 		}
   3597 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3598 
   3599 		/*
   3600 		 * 8257[12] Errata No.52 and some others.
   3601 		 * Avoid RSS Hash Value bug.
   3602 		 */
   3603 		switch (sc->sc_type) {
   3604 		case WM_T_82571:
   3605 		case WM_T_82572:
   3606 		case WM_T_82573:
   3607 		case WM_T_80003:
   3608 		case WM_T_ICH8:
   3609 			reg = CSR_READ(sc, WMREG_RFCTL);
   3610 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3611 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3612 			break;
   3613 		default:
   3614 			break;
   3615 		}
   3616 	}
   3617 }
   3618 
   3619 static uint32_t
   3620 wm_rxpbs_adjust_82580(uint32_t val)
   3621 {
   3622 	uint32_t rv = 0;
   3623 
   3624 	if (val < __arraycount(wm_82580_rxpbs_table))
   3625 		rv = wm_82580_rxpbs_table[val];
   3626 
   3627 	return rv;
   3628 }
   3629 
   3630 /*
   3631  * wm_reset:
   3632  *
   3633  *	Reset the i82542 chip.
   3634  */
   3635 static void
   3636 wm_reset(struct wm_softc *sc)
   3637 {
   3638 	int phy_reset = 0;
   3639 	int i, error = 0;
   3640 	uint32_t reg, mask;
   3641 
   3642 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3643 		device_xname(sc->sc_dev), __func__));
   3644 	/*
   3645 	 * Allocate on-chip memory according to the MTU size.
   3646 	 * The Packet Buffer Allocation register must be written
   3647 	 * before the chip is reset.
   3648 	 */
   3649 	switch (sc->sc_type) {
   3650 	case WM_T_82547:
   3651 	case WM_T_82547_2:
   3652 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3653 		    PBA_22K : PBA_30K;
   3654 		for (i = 0; i < sc->sc_ntxqueues; i++) {
   3655 			struct wm_txqueue *txq = &sc->sc_txq[i];
   3656 			txq->txq_fifo_head = 0;
   3657 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3658 			txq->txq_fifo_size =
   3659 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3660 			txq->txq_fifo_stall = 0;
   3661 		}
   3662 		break;
   3663 	case WM_T_82571:
   3664 	case WM_T_82572:
   3665 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3666 	case WM_T_80003:
   3667 		sc->sc_pba = PBA_32K;
   3668 		break;
   3669 	case WM_T_82573:
   3670 		sc->sc_pba = PBA_12K;
   3671 		break;
   3672 	case WM_T_82574:
   3673 	case WM_T_82583:
   3674 		sc->sc_pba = PBA_20K;
   3675 		break;
   3676 	case WM_T_82576:
   3677 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3678 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3679 		break;
   3680 	case WM_T_82580:
   3681 	case WM_T_I350:
   3682 	case WM_T_I354:
   3683 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3684 		break;
   3685 	case WM_T_I210:
   3686 	case WM_T_I211:
   3687 		sc->sc_pba = PBA_34K;
   3688 		break;
   3689 	case WM_T_ICH8:
   3690 		/* Workaround for a bit corruption issue in FIFO memory */
   3691 		sc->sc_pba = PBA_8K;
   3692 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3693 		break;
   3694 	case WM_T_ICH9:
   3695 	case WM_T_ICH10:
   3696 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3697 		    PBA_14K : PBA_10K;
   3698 		break;
   3699 	case WM_T_PCH:
   3700 	case WM_T_PCH2:
   3701 	case WM_T_PCH_LPT:
   3702 	case WM_T_PCH_SPT:
   3703 		sc->sc_pba = PBA_26K;
   3704 		break;
   3705 	default:
   3706 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3707 		    PBA_40K : PBA_48K;
   3708 		break;
   3709 	}
   3710 	/*
   3711 	 * Only old or non-multiqueue devices have the PBA register
   3712 	 * XXX Need special handling for 82575.
   3713 	 */
   3714 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3715 	    || (sc->sc_type == WM_T_82575))
   3716 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3717 
   3718 	/* Prevent the PCI-E bus from sticking */
   3719 	if (sc->sc_flags & WM_F_PCIE) {
   3720 		int timeout = 800;
   3721 
   3722 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3723 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3724 
   3725 		while (timeout--) {
   3726 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3727 			    == 0)
   3728 				break;
   3729 			delay(100);
   3730 		}
   3731 	}
   3732 
   3733 	/* Set the completion timeout for interface */
   3734 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3735 	    || (sc->sc_type == WM_T_82580)
   3736 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3737 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3738 		wm_set_pcie_completion_timeout(sc);
   3739 
   3740 	/* Clear interrupt */
   3741 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3742 	if (sc->sc_nintrs > 1) {
   3743 		if (sc->sc_type != WM_T_82574) {
   3744 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3745 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3746 		} else {
   3747 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3748 		}
   3749 	}
   3750 
   3751 	/* Stop the transmit and receive processes. */
   3752 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3753 	sc->sc_rctl &= ~RCTL_EN;
   3754 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3755 	CSR_WRITE_FLUSH(sc);
   3756 
   3757 	/* XXX set_tbi_sbp_82543() */
   3758 
   3759 	delay(10*1000);
   3760 
   3761 	/* Must acquire the MDIO ownership before MAC reset */
   3762 	switch (sc->sc_type) {
   3763 	case WM_T_82573:
   3764 	case WM_T_82574:
   3765 	case WM_T_82583:
   3766 		error = wm_get_hw_semaphore_82573(sc);
   3767 		break;
   3768 	default:
   3769 		break;
   3770 	}
   3771 
   3772 	/*
   3773 	 * 82541 Errata 29? & 82547 Errata 28?
   3774 	 * See also the description about PHY_RST bit in CTRL register
   3775 	 * in 8254x_GBe_SDM.pdf.
   3776 	 */
   3777 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3778 		CSR_WRITE(sc, WMREG_CTRL,
   3779 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3780 		CSR_WRITE_FLUSH(sc);
   3781 		delay(5000);
   3782 	}
   3783 
   3784 	switch (sc->sc_type) {
   3785 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3786 	case WM_T_82541:
   3787 	case WM_T_82541_2:
   3788 	case WM_T_82547:
   3789 	case WM_T_82547_2:
   3790 		/*
   3791 		 * On some chipsets, a reset through a memory-mapped write
   3792 		 * cycle can cause the chip to reset before completing the
   3793 		 * write cycle.  This causes major headache that can be
   3794 		 * avoided by issuing the reset via indirect register writes
   3795 		 * through I/O space.
   3796 		 *
   3797 		 * So, if we successfully mapped the I/O BAR at attach time,
   3798 		 * use that.  Otherwise, try our luck with a memory-mapped
   3799 		 * reset.
   3800 		 */
   3801 		if (sc->sc_flags & WM_F_IOH_VALID)
   3802 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3803 		else
   3804 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3805 		break;
   3806 	case WM_T_82545_3:
   3807 	case WM_T_82546_3:
   3808 		/* Use the shadow control register on these chips. */
   3809 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3810 		break;
   3811 	case WM_T_80003:
   3812 		mask = swfwphysem[sc->sc_funcid];
   3813 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3814 		wm_get_swfw_semaphore(sc, mask);
   3815 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3816 		wm_put_swfw_semaphore(sc, mask);
   3817 		break;
   3818 	case WM_T_ICH8:
   3819 	case WM_T_ICH9:
   3820 	case WM_T_ICH10:
   3821 	case WM_T_PCH:
   3822 	case WM_T_PCH2:
   3823 	case WM_T_PCH_LPT:
   3824 	case WM_T_PCH_SPT:
   3825 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3826 		if (wm_phy_resetisblocked(sc) == false) {
   3827 			/*
   3828 			 * Gate automatic PHY configuration by hardware on
   3829 			 * non-managed 82579
   3830 			 */
   3831 			if ((sc->sc_type == WM_T_PCH2)
   3832 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3833 				== 0))
   3834 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3835 
   3836 			reg |= CTRL_PHY_RESET;
   3837 			phy_reset = 1;
   3838 		} else
   3839 			printf("XXX reset is blocked!!!\n");
   3840 		wm_get_swfwhw_semaphore(sc);
   3841 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3842 		/* Don't insert a completion barrier when reset */
   3843 		delay(20*1000);
   3844 		wm_put_swfwhw_semaphore(sc);
   3845 		break;
   3846 	case WM_T_82580:
   3847 	case WM_T_I350:
   3848 	case WM_T_I354:
   3849 	case WM_T_I210:
   3850 	case WM_T_I211:
   3851 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3852 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3853 			CSR_WRITE_FLUSH(sc);
   3854 		delay(5000);
   3855 		break;
   3856 	case WM_T_82542_2_0:
   3857 	case WM_T_82542_2_1:
   3858 	case WM_T_82543:
   3859 	case WM_T_82540:
   3860 	case WM_T_82545:
   3861 	case WM_T_82546:
   3862 	case WM_T_82571:
   3863 	case WM_T_82572:
   3864 	case WM_T_82573:
   3865 	case WM_T_82574:
   3866 	case WM_T_82575:
   3867 	case WM_T_82576:
   3868 	case WM_T_82583:
   3869 	default:
   3870 		/* Everything else can safely use the documented method. */
   3871 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3872 		break;
   3873 	}
   3874 
   3875 	/* Must release the MDIO ownership after MAC reset */
   3876 	switch (sc->sc_type) {
   3877 	case WM_T_82573:
   3878 	case WM_T_82574:
   3879 	case WM_T_82583:
   3880 		if (error == 0)
   3881 			wm_put_hw_semaphore_82573(sc);
   3882 		break;
   3883 	default:
   3884 		break;
   3885 	}
   3886 
   3887 	if (phy_reset != 0)
   3888 		wm_get_cfg_done(sc);
   3889 
   3890 	/* reload EEPROM */
   3891 	switch (sc->sc_type) {
   3892 	case WM_T_82542_2_0:
   3893 	case WM_T_82542_2_1:
   3894 	case WM_T_82543:
   3895 	case WM_T_82544:
   3896 		delay(10);
   3897 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3898 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3899 		CSR_WRITE_FLUSH(sc);
   3900 		delay(2000);
   3901 		break;
   3902 	case WM_T_82540:
   3903 	case WM_T_82545:
   3904 	case WM_T_82545_3:
   3905 	case WM_T_82546:
   3906 	case WM_T_82546_3:
   3907 		delay(5*1000);
   3908 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3909 		break;
   3910 	case WM_T_82541:
   3911 	case WM_T_82541_2:
   3912 	case WM_T_82547:
   3913 	case WM_T_82547_2:
   3914 		delay(20000);
   3915 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3916 		break;
   3917 	case WM_T_82571:
   3918 	case WM_T_82572:
   3919 	case WM_T_82573:
   3920 	case WM_T_82574:
   3921 	case WM_T_82583:
   3922 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3923 			delay(10);
   3924 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3925 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3926 			CSR_WRITE_FLUSH(sc);
   3927 		}
   3928 		/* check EECD_EE_AUTORD */
   3929 		wm_get_auto_rd_done(sc);
   3930 		/*
   3931 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3932 		 * is set.
   3933 		 */
   3934 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3935 		    || (sc->sc_type == WM_T_82583))
   3936 			delay(25*1000);
   3937 		break;
   3938 	case WM_T_82575:
   3939 	case WM_T_82576:
   3940 	case WM_T_82580:
   3941 	case WM_T_I350:
   3942 	case WM_T_I354:
   3943 	case WM_T_I210:
   3944 	case WM_T_I211:
   3945 	case WM_T_80003:
   3946 		/* check EECD_EE_AUTORD */
   3947 		wm_get_auto_rd_done(sc);
   3948 		break;
   3949 	case WM_T_ICH8:
   3950 	case WM_T_ICH9:
   3951 	case WM_T_ICH10:
   3952 	case WM_T_PCH:
   3953 	case WM_T_PCH2:
   3954 	case WM_T_PCH_LPT:
   3955 	case WM_T_PCH_SPT:
   3956 		break;
   3957 	default:
   3958 		panic("%s: unknown type\n", __func__);
   3959 	}
   3960 
   3961 	/* Check whether EEPROM is present or not */
   3962 	switch (sc->sc_type) {
   3963 	case WM_T_82575:
   3964 	case WM_T_82576:
   3965 	case WM_T_82580:
   3966 	case WM_T_I350:
   3967 	case WM_T_I354:
   3968 	case WM_T_ICH8:
   3969 	case WM_T_ICH9:
   3970 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3971 			/* Not found */
   3972 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3973 			if (sc->sc_type == WM_T_82575)
   3974 				wm_reset_init_script_82575(sc);
   3975 		}
   3976 		break;
   3977 	default:
   3978 		break;
   3979 	}
   3980 
   3981 	if ((sc->sc_type == WM_T_82580)
   3982 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3983 		/* clear global device reset status bit */
   3984 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3985 	}
   3986 
   3987 	/* Clear any pending interrupt events. */
   3988 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3989 	reg = CSR_READ(sc, WMREG_ICR);
   3990 	if (sc->sc_nintrs > 1) {
   3991 		if (sc->sc_type != WM_T_82574) {
   3992 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3993 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3994 		} else
   3995 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3996 	}
   3997 
   3998 	/* reload sc_ctrl */
   3999 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4000 
   4001 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4002 		wm_set_eee_i350(sc);
   4003 
   4004 	/* dummy read from WUC */
   4005 	if (sc->sc_type == WM_T_PCH)
   4006 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4007 	/*
   4008 	 * For PCH, this write will make sure that any noise will be detected
   4009 	 * as a CRC error and be dropped rather than show up as a bad packet
   4010 	 * to the DMA engine
   4011 	 */
   4012 	if (sc->sc_type == WM_T_PCH)
   4013 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4014 
   4015 	if (sc->sc_type >= WM_T_82544)
   4016 		CSR_WRITE(sc, WMREG_WUC, 0);
   4017 
   4018 	wm_reset_mdicnfg_82580(sc);
   4019 
   4020 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4021 		wm_pll_workaround_i210(sc);
   4022 }
   4023 
   4024 /*
   4025  * wm_add_rxbuf:
   4026  *
   4027  *	Add a receive buffer to the indiciated descriptor.
   4028  */
   4029 static int
   4030 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4031 {
   4032 	struct wm_softc *sc = rxq->rxq_sc;
   4033 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4034 	struct mbuf *m;
   4035 	int error;
   4036 
   4037 	KASSERT(WM_RX_LOCKED(rxq));
   4038 
   4039 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4040 	if (m == NULL)
   4041 		return ENOBUFS;
   4042 
   4043 	MCLGET(m, M_DONTWAIT);
   4044 	if ((m->m_flags & M_EXT) == 0) {
   4045 		m_freem(m);
   4046 		return ENOBUFS;
   4047 	}
   4048 
   4049 	if (rxs->rxs_mbuf != NULL)
   4050 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4051 
   4052 	rxs->rxs_mbuf = m;
   4053 
   4054 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4055 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4056 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4057 	if (error) {
   4058 		/* XXX XXX XXX */
   4059 		aprint_error_dev(sc->sc_dev,
   4060 		    "unable to load rx DMA map %d, error = %d\n",
   4061 		    idx, error);
   4062 		panic("wm_add_rxbuf");
   4063 	}
   4064 
   4065 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4066 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4067 
   4068 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4069 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4070 			wm_init_rxdesc(rxq, idx);
   4071 	} else
   4072 		wm_init_rxdesc(rxq, idx);
   4073 
   4074 	return 0;
   4075 }
   4076 
   4077 /*
   4078  * wm_rxdrain:
   4079  *
   4080  *	Drain the receive queue.
   4081  */
   4082 static void
   4083 wm_rxdrain(struct wm_rxqueue *rxq)
   4084 {
   4085 	struct wm_softc *sc = rxq->rxq_sc;
   4086 	struct wm_rxsoft *rxs;
   4087 	int i;
   4088 
   4089 	KASSERT(WM_RX_LOCKED(rxq));
   4090 
   4091 	for (i = 0; i < WM_NRXDESC; i++) {
   4092 		rxs = &rxq->rxq_soft[i];
   4093 		if (rxs->rxs_mbuf != NULL) {
   4094 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4095 			m_freem(rxs->rxs_mbuf);
   4096 			rxs->rxs_mbuf = NULL;
   4097 		}
   4098 	}
   4099 }
   4100 
   4101 
   4102 /*
   4103  * XXX copy from FreeBSD's sys/net/rss_config.c
   4104  */
   4105 /*
   4106  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4107  * effectiveness may be limited by algorithm choice and available entropy
   4108  * during the boot.
   4109  *
   4110  * XXXRW: And that we don't randomize it yet!
   4111  *
   4112  * This is the default Microsoft RSS specification key which is also
   4113  * the Chelsio T5 firmware default key.
   4114  */
   4115 #define RSS_KEYSIZE 40
   4116 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4117 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4118 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4119 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4120 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4121 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4122 };
   4123 
   4124 /*
   4125  * Caller must pass an array of size sizeof(rss_key).
   4126  *
   4127  * XXX
   4128  * As if_ixgbe may use this function, this function should not be
   4129  * if_wm specific function.
   4130  */
   4131 static void
   4132 wm_rss_getkey(uint8_t *key)
   4133 {
   4134 
   4135 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4136 }
   4137 
   4138 /*
   4139  * Setup registers for RSS.
   4140  *
   4141  * XXX not yet VMDq support
   4142  */
   4143 static void
   4144 wm_init_rss(struct wm_softc *sc)
   4145 {
   4146 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4147 	int i;
   4148 
   4149 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4150 
   4151 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4152 		int qid, reta_ent;
   4153 
   4154 		qid  = i % sc->sc_nrxqueues;
   4155 		switch(sc->sc_type) {
   4156 		case WM_T_82574:
   4157 			reta_ent = __SHIFTIN(qid,
   4158 			    RETA_ENT_QINDEX_MASK_82574);
   4159 			break;
   4160 		case WM_T_82575:
   4161 			reta_ent = __SHIFTIN(qid,
   4162 			    RETA_ENT_QINDEX1_MASK_82575);
   4163 			break;
   4164 		default:
   4165 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4166 			break;
   4167 		}
   4168 
   4169 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4170 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4171 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4172 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4173 	}
   4174 
   4175 	wm_rss_getkey((uint8_t *)rss_key);
   4176 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4177 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4178 
   4179 	if (sc->sc_type == WM_T_82574)
   4180 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4181 	else
   4182 		mrqc = MRQC_ENABLE_RSS_MQ;
   4183 
   4184 	/* XXXX
   4185 	 * The same as FreeBSD igb.
   4186 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4187 	 */
   4188 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4189 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4190 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4191 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4192 
   4193 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4194 }
   4195 
   4196 /*
   4197  * Adjust TX and RX queue numbers which the system actulally uses.
   4198  *
   4199  * The numbers are affected by below parameters.
   4200  *     - The nubmer of hardware queues
   4201  *     - The number of MSI-X vectors (= "nvectors" argument)
   4202  *     - ncpu
   4203  */
   4204 static void
   4205 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4206 {
   4207 	int hw_ntxqueues, hw_nrxqueues;
   4208 
   4209 	if (nvectors < 3) {
   4210 		sc->sc_ntxqueues = 1;
   4211 		sc->sc_nrxqueues = 1;
   4212 		return;
   4213 	}
   4214 
   4215 	switch(sc->sc_type) {
   4216 	case WM_T_82572:
   4217 		hw_ntxqueues = 2;
   4218 		hw_nrxqueues = 2;
   4219 		break;
   4220 	case WM_T_82574:
   4221 		hw_ntxqueues = 2;
   4222 		hw_nrxqueues = 2;
   4223 		break;
   4224 	case WM_T_82575:
   4225 		hw_ntxqueues = 4;
   4226 		hw_nrxqueues = 4;
   4227 		break;
   4228 	case WM_T_82576:
   4229 		hw_ntxqueues = 16;
   4230 		hw_nrxqueues = 16;
   4231 		break;
   4232 	case WM_T_82580:
   4233 	case WM_T_I350:
   4234 	case WM_T_I354:
   4235 		hw_ntxqueues = 8;
   4236 		hw_nrxqueues = 8;
   4237 		break;
   4238 	case WM_T_I210:
   4239 		hw_ntxqueues = 4;
   4240 		hw_nrxqueues = 4;
   4241 		break;
   4242 	case WM_T_I211:
   4243 		hw_ntxqueues = 2;
   4244 		hw_nrxqueues = 2;
   4245 		break;
   4246 		/*
   4247 		 * As below ethernet controllers does not support MSI-X,
   4248 		 * this driver let them not use multiqueue.
   4249 		 *     - WM_T_80003
   4250 		 *     - WM_T_ICH8
   4251 		 *     - WM_T_ICH9
   4252 		 *     - WM_T_ICH10
   4253 		 *     - WM_T_PCH
   4254 		 *     - WM_T_PCH2
   4255 		 *     - WM_T_PCH_LPT
   4256 		 */
   4257 	default:
   4258 		hw_ntxqueues = 1;
   4259 		hw_nrxqueues = 1;
   4260 		break;
   4261 	}
   4262 
   4263 	/*
   4264 	 * As queues more then MSI-X vectors cannot improve scaling, we limit
   4265 	 * the number of queues used actually.
   4266 	 *
   4267 	 * XXX
   4268 	 * Currently, we separate TX queue interrupts and RX queue interrupts.
   4269 	 * Howerver, the number of MSI-X vectors of recent controllers (such as
   4270 	 * I354) expects that drivers bundle a TX queue interrupt and a RX
   4271 	 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
   4272 	 * such a way.
   4273 	 */
   4274 	if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
   4275 		sc->sc_ntxqueues = (nvectors - 1) / 2;
   4276 		sc->sc_nrxqueues = (nvectors - 1) / 2;
   4277 	} else {
   4278 		sc->sc_ntxqueues = hw_ntxqueues;
   4279 		sc->sc_nrxqueues = hw_nrxqueues;
   4280 	}
   4281 
   4282 	/*
   4283 	 * As queues more then cpus cannot improve scaling, we limit
   4284 	 * the number of queues used actually.
   4285 	 */
   4286 	if (ncpu < sc->sc_ntxqueues)
   4287 		sc->sc_ntxqueues = ncpu;
   4288 	if (ncpu < sc->sc_nrxqueues)
   4289 		sc->sc_nrxqueues = ncpu;
   4290 
   4291 	/* XXX Currently, this driver supports RX multiqueue only. */
   4292 	sc->sc_ntxqueues = 1;
   4293 }
   4294 
   4295 /*
   4296  * Both single interrupt MSI and INTx can use this function.
   4297  */
   4298 static int
   4299 wm_setup_legacy(struct wm_softc *sc)
   4300 {
   4301 	pci_chipset_tag_t pc = sc->sc_pc;
   4302 	const char *intrstr = NULL;
   4303 	char intrbuf[PCI_INTRSTR_LEN];
   4304 	int error;
   4305 
   4306 	error = wm_alloc_txrx_queues(sc);
   4307 	if (error) {
   4308 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4309 		    error);
   4310 		return ENOMEM;
   4311 	}
   4312 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4313 	    sizeof(intrbuf));
   4314 #ifdef WM_MPSAFE
   4315 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4316 #endif
   4317 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4318 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4319 	if (sc->sc_ihs[0] == NULL) {
   4320 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4321 		    (pci_intr_type(sc->sc_intrs[0])
   4322 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4323 		return ENOMEM;
   4324 	}
   4325 
   4326 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4327 	sc->sc_nintrs = 1;
   4328 	return 0;
   4329 }
   4330 
   4331 static int
   4332 wm_setup_msix(struct wm_softc *sc)
   4333 {
   4334 	void *vih;
   4335 	kcpuset_t *affinity;
   4336 	int qidx, error, intr_idx, tx_established, rx_established;
   4337 	pci_chipset_tag_t pc = sc->sc_pc;
   4338 	const char *intrstr = NULL;
   4339 	char intrbuf[PCI_INTRSTR_LEN];
   4340 	char intr_xname[INTRDEVNAMEBUF];
   4341 	/*
   4342 	 * To avoid other devices' interrupts, the affinity of Tx/Rx interrupts
   4343 	 * start from CPU#1.
   4344 	 */
   4345 	int affinity_offset = 1;
   4346 
   4347 	error = wm_alloc_txrx_queues(sc);
   4348 	if (error) {
   4349 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4350 		    error);
   4351 		return ENOMEM;
   4352 	}
   4353 
   4354 	kcpuset_create(&affinity, false);
   4355 	intr_idx = 0;
   4356 
   4357 	/*
   4358 	 * TX
   4359 	 */
   4360 	tx_established = 0;
   4361 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4362 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4363 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
   4364 
   4365 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4366 		    sizeof(intrbuf));
   4367 #ifdef WM_MPSAFE
   4368 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4369 		    PCI_INTR_MPSAFE, true);
   4370 #endif
   4371 		memset(intr_xname, 0, sizeof(intr_xname));
   4372 		snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
   4373 		    device_xname(sc->sc_dev), qidx);
   4374 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4375 		    IPL_NET, wm_txintr_msix, txq, intr_xname);
   4376 		if (vih == NULL) {
   4377 			aprint_error_dev(sc->sc_dev,
   4378 			    "unable to establish MSI-X(for TX)%s%s\n",
   4379 			    intrstr ? " at " : "",
   4380 			    intrstr ? intrstr : "");
   4381 
   4382 			goto fail_0;
   4383 		}
   4384 		kcpuset_zero(affinity);
   4385 		/* Round-robin affinity */
   4386 		kcpuset_set(affinity, affinity_to);
   4387 		error = interrupt_distribute(vih, affinity, NULL);
   4388 		if (error == 0) {
   4389 			aprint_normal_dev(sc->sc_dev,
   4390 			    "for TX interrupting at %s affinity to %u\n",
   4391 			    intrstr, affinity_to);
   4392 		} else {
   4393 			aprint_normal_dev(sc->sc_dev,
   4394 			    "for TX interrupting at %s\n", intrstr);
   4395 		}
   4396 		sc->sc_ihs[intr_idx] = vih;
   4397 		txq->txq_id = qidx;
   4398 		txq->txq_intr_idx = intr_idx;
   4399 
   4400 		tx_established++;
   4401 		intr_idx++;
   4402 	}
   4403 
   4404 	/*
   4405 	 * RX
   4406 	 */
   4407 	rx_established = 0;
   4408 	for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4409 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4410 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
   4411 
   4412 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4413 		    sizeof(intrbuf));
   4414 #ifdef WM_MPSAFE
   4415 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4416 		    PCI_INTR_MPSAFE, true);
   4417 #endif
   4418 		memset(intr_xname, 0, sizeof(intr_xname));
   4419 		snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
   4420 		    device_xname(sc->sc_dev), qidx);
   4421 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4422 		    IPL_NET, wm_rxintr_msix, rxq, intr_xname);
   4423 		if (vih == NULL) {
   4424 			aprint_error_dev(sc->sc_dev,
   4425 			    "unable to establish MSI-X(for RX)%s%s\n",
   4426 			    intrstr ? " at " : "",
   4427 			    intrstr ? intrstr : "");
   4428 
   4429 			goto fail_1;
   4430 		}
   4431 		kcpuset_zero(affinity);
   4432 		/* Round-robin affinity */
   4433 		kcpuset_set(affinity, affinity_to);
   4434 		error = interrupt_distribute(vih, affinity, NULL);
   4435 		if (error == 0) {
   4436 			aprint_normal_dev(sc->sc_dev,
   4437 			    "for RX interrupting at %s affinity to %u\n",
   4438 			    intrstr, affinity_to);
   4439 		} else {
   4440 			aprint_normal_dev(sc->sc_dev,
   4441 			    "for RX interrupting at %s\n", intrstr);
   4442 		}
   4443 		sc->sc_ihs[intr_idx] = vih;
   4444 		rxq->rxq_id = qidx;
   4445 		rxq->rxq_intr_idx = intr_idx;
   4446 
   4447 		rx_established++;
   4448 		intr_idx++;
   4449 	}
   4450 
   4451 	/*
   4452 	 * LINK
   4453 	 */
   4454 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4455 	    sizeof(intrbuf));
   4456 #ifdef WM_MPSAFE
   4457 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4458 #endif
   4459 	memset(intr_xname, 0, sizeof(intr_xname));
   4460 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4461 	    device_xname(sc->sc_dev));
   4462 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4463 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4464 	if (vih == NULL) {
   4465 		aprint_error_dev(sc->sc_dev,
   4466 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4467 		    intrstr ? " at " : "",
   4468 		    intrstr ? intrstr : "");
   4469 
   4470 		goto fail_1;
   4471 	}
   4472 	/* keep default affinity to LINK interrupt */
   4473 	aprint_normal_dev(sc->sc_dev,
   4474 	    "for LINK interrupting at %s\n", intrstr);
   4475 	sc->sc_ihs[intr_idx] = vih;
   4476 	sc->sc_link_intr_idx = intr_idx;
   4477 
   4478 	sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   4479 	kcpuset_destroy(affinity);
   4480 	return 0;
   4481 
   4482  fail_1:
   4483 	for (qidx = 0; qidx < rx_established; qidx++) {
   4484 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4485 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[rxq->rxq_intr_idx]);
   4486 		sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
   4487 	}
   4488  fail_0:
   4489 	for (qidx = 0; qidx < tx_established; qidx++) {
   4490 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4491 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[txq->txq_intr_idx]);
   4492 		sc->sc_ihs[txq->txq_intr_idx] = NULL;
   4493 	}
   4494 
   4495 	kcpuset_destroy(affinity);
   4496 	return ENOMEM;
   4497 }
   4498 
   4499 /*
   4500  * wm_init:		[ifnet interface function]
   4501  *
   4502  *	Initialize the interface.
   4503  */
   4504 static int
   4505 wm_init(struct ifnet *ifp)
   4506 {
   4507 	struct wm_softc *sc = ifp->if_softc;
   4508 	int ret;
   4509 
   4510 	WM_CORE_LOCK(sc);
   4511 	ret = wm_init_locked(ifp);
   4512 	WM_CORE_UNLOCK(sc);
   4513 
   4514 	return ret;
   4515 }
   4516 
   4517 static int
   4518 wm_init_locked(struct ifnet *ifp)
   4519 {
   4520 	struct wm_softc *sc = ifp->if_softc;
   4521 	int i, j, trynum, error = 0;
   4522 	uint32_t reg;
   4523 
   4524 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4525 		device_xname(sc->sc_dev), __func__));
   4526 	KASSERT(WM_CORE_LOCKED(sc));
   4527 	/*
   4528 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4529 	 * There is a small but measurable benefit to avoiding the adjusment
   4530 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4531 	 * on such platforms.  One possibility is that the DMA itself is
   4532 	 * slightly more efficient if the front of the entire packet (instead
   4533 	 * of the front of the headers) is aligned.
   4534 	 *
   4535 	 * Note we must always set align_tweak to 0 if we are using
   4536 	 * jumbo frames.
   4537 	 */
   4538 #ifdef __NO_STRICT_ALIGNMENT
   4539 	sc->sc_align_tweak = 0;
   4540 #else
   4541 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4542 		sc->sc_align_tweak = 0;
   4543 	else
   4544 		sc->sc_align_tweak = 2;
   4545 #endif /* __NO_STRICT_ALIGNMENT */
   4546 
   4547 	/* Cancel any pending I/O. */
   4548 	wm_stop_locked(ifp, 0);
   4549 
   4550 	/* update statistics before reset */
   4551 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4552 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4553 
   4554 	/* Reset the chip to a known state. */
   4555 	wm_reset(sc);
   4556 
   4557 	switch (sc->sc_type) {
   4558 	case WM_T_82571:
   4559 	case WM_T_82572:
   4560 	case WM_T_82573:
   4561 	case WM_T_82574:
   4562 	case WM_T_82583:
   4563 	case WM_T_80003:
   4564 	case WM_T_ICH8:
   4565 	case WM_T_ICH9:
   4566 	case WM_T_ICH10:
   4567 	case WM_T_PCH:
   4568 	case WM_T_PCH2:
   4569 	case WM_T_PCH_LPT:
   4570 	case WM_T_PCH_SPT:
   4571 		/* AMT based hardware can now take control from firmware */
   4572 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4573 			wm_get_hw_control(sc);
   4574 		break;
   4575 	default:
   4576 		break;
   4577 	}
   4578 
   4579 	/* Init hardware bits */
   4580 	wm_initialize_hardware_bits(sc);
   4581 
   4582 	/* Reset the PHY. */
   4583 	if (sc->sc_flags & WM_F_HAS_MII)
   4584 		wm_gmii_reset(sc);
   4585 
   4586 	/* Calculate (E)ITR value */
   4587 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4588 		sc->sc_itr = 450;	/* For EITR */
   4589 	} else if (sc->sc_type >= WM_T_82543) {
   4590 		/*
   4591 		 * Set up the interrupt throttling register (units of 256ns)
   4592 		 * Note that a footnote in Intel's documentation says this
   4593 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4594 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4595 		 * that that is also true for the 1024ns units of the other
   4596 		 * interrupt-related timer registers -- so, really, we ought
   4597 		 * to divide this value by 4 when the link speed is low.
   4598 		 *
   4599 		 * XXX implement this division at link speed change!
   4600 		 */
   4601 
   4602 		/*
   4603 		 * For N interrupts/sec, set this value to:
   4604 		 * 1000000000 / (N * 256).  Note that we set the
   4605 		 * absolute and packet timer values to this value
   4606 		 * divided by 4 to get "simple timer" behavior.
   4607 		 */
   4608 
   4609 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4610 	}
   4611 
   4612 	error = wm_init_txrx_queues(sc);
   4613 	if (error)
   4614 		goto out;
   4615 
   4616 	/*
   4617 	 * Clear out the VLAN table -- we don't use it (yet).
   4618 	 */
   4619 	CSR_WRITE(sc, WMREG_VET, 0);
   4620 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4621 		trynum = 10; /* Due to hw errata */
   4622 	else
   4623 		trynum = 1;
   4624 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4625 		for (j = 0; j < trynum; j++)
   4626 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4627 
   4628 	/*
   4629 	 * Set up flow-control parameters.
   4630 	 *
   4631 	 * XXX Values could probably stand some tuning.
   4632 	 */
   4633 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4634 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4635 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4636 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4637 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4638 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4639 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4640 	}
   4641 
   4642 	sc->sc_fcrtl = FCRTL_DFLT;
   4643 	if (sc->sc_type < WM_T_82543) {
   4644 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4645 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4646 	} else {
   4647 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4648 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4649 	}
   4650 
   4651 	if (sc->sc_type == WM_T_80003)
   4652 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4653 	else
   4654 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4655 
   4656 	/* Writes the control register. */
   4657 	wm_set_vlan(sc);
   4658 
   4659 	if (sc->sc_flags & WM_F_HAS_MII) {
   4660 		int val;
   4661 
   4662 		switch (sc->sc_type) {
   4663 		case WM_T_80003:
   4664 		case WM_T_ICH8:
   4665 		case WM_T_ICH9:
   4666 		case WM_T_ICH10:
   4667 		case WM_T_PCH:
   4668 		case WM_T_PCH2:
   4669 		case WM_T_PCH_LPT:
   4670 		case WM_T_PCH_SPT:
   4671 			/*
   4672 			 * Set the mac to wait the maximum time between each
   4673 			 * iteration and increase the max iterations when
   4674 			 * polling the phy; this fixes erroneous timeouts at
   4675 			 * 10Mbps.
   4676 			 */
   4677 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4678 			    0xFFFF);
   4679 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4680 			val |= 0x3F;
   4681 			wm_kmrn_writereg(sc,
   4682 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4683 			break;
   4684 		default:
   4685 			break;
   4686 		}
   4687 
   4688 		if (sc->sc_type == WM_T_80003) {
   4689 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4690 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4691 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4692 
   4693 			/* Bypass RX and TX FIFO's */
   4694 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4695 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4696 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4697 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4698 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4699 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4700 		}
   4701 	}
   4702 #if 0
   4703 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4704 #endif
   4705 
   4706 	/* Set up checksum offload parameters. */
   4707 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4708 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4709 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4710 		reg |= RXCSUM_IPOFL;
   4711 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4712 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4713 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4714 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4715 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4716 
   4717 	/* Set up MSI-X */
   4718 	if (sc->sc_nintrs > 1) {
   4719 		uint32_t ivar;
   4720 		struct wm_txqueue *txq;
   4721 		struct wm_rxqueue *rxq;
   4722 		int qid;
   4723 
   4724 		if (sc->sc_type == WM_T_82575) {
   4725 			/* Interrupt control */
   4726 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4727 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4728 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4729 
   4730 			/* TX */
   4731 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4732 				txq = &sc->sc_txq[i];
   4733 				CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
   4734 				    EITR_TX_QUEUE(txq->txq_id));
   4735 			}
   4736 			/* RX */
   4737 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4738 				rxq = &sc->sc_rxq[i];
   4739 				CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
   4740 				    EITR_RX_QUEUE(rxq->rxq_id));
   4741 			}
   4742 			/* Link status */
   4743 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4744 			    EITR_OTHER);
   4745 		} else if (sc->sc_type == WM_T_82574) {
   4746 			/* Interrupt control */
   4747 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4748 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4749 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4750 
   4751 			ivar = 0;
   4752 			/* TX */
   4753 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4754 				txq = &sc->sc_txq[i];
   4755 				ivar |= __SHIFTIN((IVAR_VALID_82574
   4756 					| txq->txq_intr_idx),
   4757 				    IVAR_TX_MASK_Q_82574(txq->txq_id));
   4758 			}
   4759 			/* RX */
   4760 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4761 				rxq = &sc->sc_rxq[i];
   4762 				ivar |= __SHIFTIN((IVAR_VALID_82574
   4763 					| rxq->rxq_intr_idx),
   4764 				    IVAR_RX_MASK_Q_82574(rxq->rxq_id));
   4765 			}
   4766 			/* Link status */
   4767 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4768 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4769 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4770 		} else {
   4771 			/* Interrupt control */
   4772 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4773 			    | GPIE_EIAME | GPIE_PBA);
   4774 
   4775 			switch (sc->sc_type) {
   4776 			case WM_T_82580:
   4777 			case WM_T_I350:
   4778 			case WM_T_I354:
   4779 			case WM_T_I210:
   4780 			case WM_T_I211:
   4781 				/* TX */
   4782 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4783 					txq = &sc->sc_txq[i];
   4784 					qid = txq->txq_id;
   4785 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4786 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4787 					ivar |= __SHIFTIN((txq->txq_intr_idx
   4788 						| IVAR_VALID),
   4789 					    IVAR_TX_MASK_Q(qid));
   4790 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4791 				}
   4792 
   4793 				/* RX */
   4794 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4795 					rxq = &sc->sc_rxq[i];
   4796 					qid = rxq->rxq_id;
   4797 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4798 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4799 					ivar |= __SHIFTIN((rxq->rxq_intr_idx
   4800 						| IVAR_VALID),
   4801 					    IVAR_RX_MASK_Q(qid));
   4802 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4803 				}
   4804 				break;
   4805 			case WM_T_82576:
   4806 				/* TX */
   4807 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4808 					txq = &sc->sc_txq[i];
   4809 					qid = txq->txq_id;
   4810 					ivar = CSR_READ(sc,
   4811 					    WMREG_IVAR_Q_82576(qid));
   4812 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4813 					ivar |= __SHIFTIN((txq->txq_intr_idx
   4814 						| IVAR_VALID),
   4815 					    IVAR_TX_MASK_Q_82576(qid));
   4816 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4817 					    ivar);
   4818 				}
   4819 
   4820 				/* RX */
   4821 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4822 					rxq = &sc->sc_rxq[i];
   4823 					qid = rxq->rxq_id;
   4824 					ivar = CSR_READ(sc,
   4825 					    WMREG_IVAR_Q_82576(qid));
   4826 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4827 					ivar |= __SHIFTIN((rxq->rxq_intr_idx
   4828 						| IVAR_VALID),
   4829 					    IVAR_RX_MASK_Q_82576(qid));
   4830 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4831 					    ivar);
   4832 				}
   4833 				break;
   4834 			default:
   4835 				break;
   4836 			}
   4837 
   4838 			/* Link status */
   4839 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4840 			    IVAR_MISC_OTHER);
   4841 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4842 		}
   4843 
   4844 		if (sc->sc_nrxqueues > 1) {
   4845 			wm_init_rss(sc);
   4846 
   4847 			/*
   4848 			** NOTE: Receive Full-Packet Checksum Offload
   4849 			** is mutually exclusive with Multiqueue. However
   4850 			** this is not the same as TCP/IP checksums which
   4851 			** still work.
   4852 			*/
   4853 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4854 			reg |= RXCSUM_PCSD;
   4855 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4856 		}
   4857 	}
   4858 
   4859 	/* Set up the interrupt registers. */
   4860 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4861 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4862 	    ICR_RXO | ICR_RXT0;
   4863 	if (sc->sc_nintrs > 1) {
   4864 		uint32_t mask;
   4865 		struct wm_txqueue *txq;
   4866 		struct wm_rxqueue *rxq;
   4867 
   4868 		switch (sc->sc_type) {
   4869 		case WM_T_82574:
   4870 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4871 			    WMREG_EIAC_82574_MSIX_MASK);
   4872 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4873 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4874 			break;
   4875 		default:
   4876 			if (sc->sc_type == WM_T_82575) {
   4877 				mask = 0;
   4878 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4879 					txq = &sc->sc_txq[i];
   4880 					mask |= EITR_TX_QUEUE(txq->txq_id);
   4881 				}
   4882 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4883 					rxq = &sc->sc_rxq[i];
   4884 					mask |= EITR_RX_QUEUE(rxq->rxq_id);
   4885 				}
   4886 				mask |= EITR_OTHER;
   4887 			} else {
   4888 				mask = 0;
   4889 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4890 					txq = &sc->sc_txq[i];
   4891 					mask |= 1 << txq->txq_intr_idx;
   4892 				}
   4893 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4894 					rxq = &sc->sc_rxq[i];
   4895 					mask |= 1 << rxq->rxq_intr_idx;
   4896 				}
   4897 				mask |= 1 << sc->sc_link_intr_idx;
   4898 			}
   4899 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4900 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4901 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4902 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4903 			break;
   4904 		}
   4905 	} else
   4906 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4907 
   4908 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4909 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4910 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4911 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4912 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4913 		reg |= KABGTXD_BGSQLBIAS;
   4914 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4915 	}
   4916 
   4917 	/* Set up the inter-packet gap. */
   4918 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4919 
   4920 	if (sc->sc_type >= WM_T_82543) {
   4921 		/*
   4922 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4923 		 * the multi queue function with MSI-X.
   4924 		 */
   4925 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4926 			int qidx;
   4927 			for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4928 				struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4929 				CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
   4930 				    sc->sc_itr);
   4931 			}
   4932 			for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4933 				struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4934 				CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
   4935 				    sc->sc_itr);
   4936 			}
   4937 			/*
   4938 			 * Link interrupts occur much less than TX
   4939 			 * interrupts and RX interrupts. So, we don't
   4940 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4941 			 * FreeBSD's if_igb.
   4942 			 */
   4943 		} else
   4944 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4945 	}
   4946 
   4947 	/* Set the VLAN ethernetype. */
   4948 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4949 
   4950 	/*
   4951 	 * Set up the transmit control register; we start out with
   4952 	 * a collision distance suitable for FDX, but update it whe
   4953 	 * we resolve the media type.
   4954 	 */
   4955 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4956 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4957 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4958 	if (sc->sc_type >= WM_T_82571)
   4959 		sc->sc_tctl |= TCTL_MULR;
   4960 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4961 
   4962 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4963 		/* Write TDT after TCTL.EN is set. See the document. */
   4964 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4965 	}
   4966 
   4967 	if (sc->sc_type == WM_T_80003) {
   4968 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4969 		reg &= ~TCTL_EXT_GCEX_MASK;
   4970 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4971 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4972 	}
   4973 
   4974 	/* Set the media. */
   4975 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4976 		goto out;
   4977 
   4978 	/* Configure for OS presence */
   4979 	wm_init_manageability(sc);
   4980 
   4981 	/*
   4982 	 * Set up the receive control register; we actually program
   4983 	 * the register when we set the receive filter.  Use multicast
   4984 	 * address offset type 0.
   4985 	 *
   4986 	 * Only the i82544 has the ability to strip the incoming
   4987 	 * CRC, so we don't enable that feature.
   4988 	 */
   4989 	sc->sc_mchash_type = 0;
   4990 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4991 	    | RCTL_MO(sc->sc_mchash_type);
   4992 
   4993 	/*
   4994 	 * The I350 has a bug where it always strips the CRC whether
   4995 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4996 	 */
   4997 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4998 	    || (sc->sc_type == WM_T_I210))
   4999 		sc->sc_rctl |= RCTL_SECRC;
   5000 
   5001 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5002 	    && (ifp->if_mtu > ETHERMTU)) {
   5003 		sc->sc_rctl |= RCTL_LPE;
   5004 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5005 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5006 	}
   5007 
   5008 	if (MCLBYTES == 2048) {
   5009 		sc->sc_rctl |= RCTL_2k;
   5010 	} else {
   5011 		if (sc->sc_type >= WM_T_82543) {
   5012 			switch (MCLBYTES) {
   5013 			case 4096:
   5014 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5015 				break;
   5016 			case 8192:
   5017 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5018 				break;
   5019 			case 16384:
   5020 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5021 				break;
   5022 			default:
   5023 				panic("wm_init: MCLBYTES %d unsupported",
   5024 				    MCLBYTES);
   5025 				break;
   5026 			}
   5027 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5028 	}
   5029 
   5030 	/* Set the receive filter. */
   5031 	wm_set_filter(sc);
   5032 
   5033 	/* Enable ECC */
   5034 	switch (sc->sc_type) {
   5035 	case WM_T_82571:
   5036 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5037 		reg |= PBA_ECC_CORR_EN;
   5038 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5039 		break;
   5040 	case WM_T_PCH_LPT:
   5041 	case WM_T_PCH_SPT:
   5042 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5043 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5044 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5045 
   5046 		reg = CSR_READ(sc, WMREG_CTRL);
   5047 		reg |= CTRL_MEHE;
   5048 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5049 		break;
   5050 	default:
   5051 		break;
   5052 	}
   5053 
   5054 	/* On 575 and later set RDT only if RX enabled */
   5055 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5056 		int qidx;
   5057 		for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   5058 			struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   5059 			for (i = 0; i < WM_NRXDESC; i++) {
   5060 				WM_RX_LOCK(rxq);
   5061 				wm_init_rxdesc(rxq, i);
   5062 				WM_RX_UNLOCK(rxq);
   5063 
   5064 			}
   5065 		}
   5066 	}
   5067 
   5068 	sc->sc_stopping = false;
   5069 
   5070 	/* Start the one second link check clock. */
   5071 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5072 
   5073 	/* ...all done! */
   5074 	ifp->if_flags |= IFF_RUNNING;
   5075 	ifp->if_flags &= ~IFF_OACTIVE;
   5076 
   5077  out:
   5078 	sc->sc_if_flags = ifp->if_flags;
   5079 	if (error)
   5080 		log(LOG_ERR, "%s: interface not running\n",
   5081 		    device_xname(sc->sc_dev));
   5082 	return error;
   5083 }
   5084 
   5085 /*
   5086  * wm_stop:		[ifnet interface function]
   5087  *
   5088  *	Stop transmission on the interface.
   5089  */
   5090 static void
   5091 wm_stop(struct ifnet *ifp, int disable)
   5092 {
   5093 	struct wm_softc *sc = ifp->if_softc;
   5094 
   5095 	WM_CORE_LOCK(sc);
   5096 	wm_stop_locked(ifp, disable);
   5097 	WM_CORE_UNLOCK(sc);
   5098 }
   5099 
   5100 static void
   5101 wm_stop_locked(struct ifnet *ifp, int disable)
   5102 {
   5103 	struct wm_softc *sc = ifp->if_softc;
   5104 	struct wm_txsoft *txs;
   5105 	int i, qidx;
   5106 
   5107 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5108 		device_xname(sc->sc_dev), __func__));
   5109 	KASSERT(WM_CORE_LOCKED(sc));
   5110 
   5111 	sc->sc_stopping = true;
   5112 
   5113 	/* Stop the one second clock. */
   5114 	callout_stop(&sc->sc_tick_ch);
   5115 
   5116 	/* Stop the 82547 Tx FIFO stall check timer. */
   5117 	if (sc->sc_type == WM_T_82547)
   5118 		callout_stop(&sc->sc_txfifo_ch);
   5119 
   5120 	if (sc->sc_flags & WM_F_HAS_MII) {
   5121 		/* Down the MII. */
   5122 		mii_down(&sc->sc_mii);
   5123 	} else {
   5124 #if 0
   5125 		/* Should we clear PHY's status properly? */
   5126 		wm_reset(sc);
   5127 #endif
   5128 	}
   5129 
   5130 	/* Stop the transmit and receive processes. */
   5131 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5132 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5133 	sc->sc_rctl &= ~RCTL_EN;
   5134 
   5135 	/*
   5136 	 * Clear the interrupt mask to ensure the device cannot assert its
   5137 	 * interrupt line.
   5138 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5139 	 * service any currently pending or shared interrupt.
   5140 	 */
   5141 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5142 	sc->sc_icr = 0;
   5143 	if (sc->sc_nintrs > 1) {
   5144 		if (sc->sc_type != WM_T_82574) {
   5145 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5146 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5147 		} else
   5148 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5149 	}
   5150 
   5151 	/* Release any queued transmit buffers. */
   5152 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   5153 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   5154 		WM_TX_LOCK(txq);
   5155 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5156 			txs = &txq->txq_soft[i];
   5157 			if (txs->txs_mbuf != NULL) {
   5158 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5159 				m_freem(txs->txs_mbuf);
   5160 				txs->txs_mbuf = NULL;
   5161 			}
   5162 		}
   5163 		if (sc->sc_type == WM_T_PCH_SPT) {
   5164 			pcireg_t preg;
   5165 			uint32_t reg;
   5166 			int nexttx;
   5167 
   5168 			/* First, disable MULR fix in FEXTNVM11 */
   5169 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5170 			reg |= FEXTNVM11_DIS_MULRFIX;
   5171 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5172 
   5173 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5174 			    WM_PCI_DESCRING_STATUS);
   5175 			reg = CSR_READ(sc, WMREG_TDLEN(0));
   5176 			printf("XXX RST: FLUSH = %08x, len = %u\n",
   5177 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
   5178 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
   5179 			    && (reg != 0)) {
   5180 				/* TX */
   5181 				printf("XXX need TX flush (reg = %08x)\n",
   5182 				    preg);
   5183 				wm_init_tx_descs(sc, txq);
   5184 				wm_init_tx_regs(sc, txq);
   5185 				nexttx = txq->txq_next;
   5186 				wm_set_dma_addr(
   5187 					&txq->txq_descs[nexttx].wtx_addr,
   5188 					WM_CDTXADDR(txq, nexttx));
   5189 				txq->txq_descs[nexttx].wtx_cmdlen
   5190 				    = htole32(WTX_CMD_IFCS | 512);
   5191 				wm_cdtxsync(txq, nexttx, 1,
   5192 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5193 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5194 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5195 				CSR_WRITE_FLUSH(sc);
   5196 				delay(250);
   5197 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5198 			}
   5199 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5200 			    WM_PCI_DESCRING_STATUS);
   5201 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5202 				/* RX */
   5203 				printf("XXX need RX flush\n");
   5204 			}
   5205 		}
   5206 		WM_TX_UNLOCK(txq);
   5207 	}
   5208 
   5209 	/* Mark the interface as down and cancel the watchdog timer. */
   5210 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5211 	ifp->if_timer = 0;
   5212 
   5213 	if (disable) {
   5214 		for (i = 0; i < sc->sc_nrxqueues; i++) {
   5215 			struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5216 			WM_RX_LOCK(rxq);
   5217 			wm_rxdrain(rxq);
   5218 			WM_RX_UNLOCK(rxq);
   5219 		}
   5220 	}
   5221 
   5222 #if 0 /* notyet */
   5223 	if (sc->sc_type >= WM_T_82544)
   5224 		CSR_WRITE(sc, WMREG_WUC, 0);
   5225 #endif
   5226 }
   5227 
   5228 static void
   5229 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5230 {
   5231 	struct mbuf *m;
   5232 	int i;
   5233 
   5234 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5235 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5236 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5237 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5238 		    m->m_data, m->m_len, m->m_flags);
   5239 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5240 	    i, i == 1 ? "" : "s");
   5241 }
   5242 
   5243 /*
   5244  * wm_82547_txfifo_stall:
   5245  *
   5246  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5247  *	reset the FIFO pointers, and restart packet transmission.
   5248  */
   5249 static void
   5250 wm_82547_txfifo_stall(void *arg)
   5251 {
   5252 	struct wm_softc *sc = arg;
   5253 	struct wm_txqueue *txq = sc->sc_txq;
   5254 #ifndef WM_MPSAFE
   5255 	int s;
   5256 
   5257 	s = splnet();
   5258 #endif
   5259 	WM_TX_LOCK(txq);
   5260 
   5261 	if (sc->sc_stopping)
   5262 		goto out;
   5263 
   5264 	if (txq->txq_fifo_stall) {
   5265 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5266 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5267 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5268 			/*
   5269 			 * Packets have drained.  Stop transmitter, reset
   5270 			 * FIFO pointers, restart transmitter, and kick
   5271 			 * the packet queue.
   5272 			 */
   5273 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5274 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5275 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5276 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5277 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5278 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5279 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5280 			CSR_WRITE_FLUSH(sc);
   5281 
   5282 			txq->txq_fifo_head = 0;
   5283 			txq->txq_fifo_stall = 0;
   5284 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5285 		} else {
   5286 			/*
   5287 			 * Still waiting for packets to drain; try again in
   5288 			 * another tick.
   5289 			 */
   5290 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5291 		}
   5292 	}
   5293 
   5294 out:
   5295 	WM_TX_UNLOCK(txq);
   5296 #ifndef WM_MPSAFE
   5297 	splx(s);
   5298 #endif
   5299 }
   5300 
   5301 /*
   5302  * wm_82547_txfifo_bugchk:
   5303  *
   5304  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5305  *	prevent enqueueing a packet that would wrap around the end
   5306  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5307  *
   5308  *	We do this by checking the amount of space before the end
   5309  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5310  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5311  *	the internal FIFO pointers to the beginning, and restart
   5312  *	transmission on the interface.
   5313  */
   5314 #define	WM_FIFO_HDR		0x10
   5315 #define	WM_82547_PAD_LEN	0x3e0
   5316 static int
   5317 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5318 {
   5319 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5320 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5321 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5322 
   5323 	/* Just return if already stalled. */
   5324 	if (txq->txq_fifo_stall)
   5325 		return 1;
   5326 
   5327 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5328 		/* Stall only occurs in half-duplex mode. */
   5329 		goto send_packet;
   5330 	}
   5331 
   5332 	if (len >= WM_82547_PAD_LEN + space) {
   5333 		txq->txq_fifo_stall = 1;
   5334 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5335 		return 1;
   5336 	}
   5337 
   5338  send_packet:
   5339 	txq->txq_fifo_head += len;
   5340 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5341 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5342 
   5343 	return 0;
   5344 }
   5345 
   5346 static int
   5347 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5348 {
   5349 	int error;
   5350 
   5351 	/*
   5352 	 * Allocate the control data structures, and create and load the
   5353 	 * DMA map for it.
   5354 	 *
   5355 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5356 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5357 	 * both sets within the same 4G segment.
   5358 	 */
   5359 	if (sc->sc_type < WM_T_82544)
   5360 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5361 	else
   5362 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5363 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5364 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5365 	else
   5366 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5367 
   5368 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5369 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5370 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5371 		aprint_error_dev(sc->sc_dev,
   5372 		    "unable to allocate TX control data, error = %d\n",
   5373 		    error);
   5374 		goto fail_0;
   5375 	}
   5376 
   5377 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5378 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5379 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5380 		aprint_error_dev(sc->sc_dev,
   5381 		    "unable to map TX control data, error = %d\n", error);
   5382 		goto fail_1;
   5383 	}
   5384 
   5385 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5386 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5387 		aprint_error_dev(sc->sc_dev,
   5388 		    "unable to create TX control data DMA map, error = %d\n",
   5389 		    error);
   5390 		goto fail_2;
   5391 	}
   5392 
   5393 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5394 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5395 		aprint_error_dev(sc->sc_dev,
   5396 		    "unable to load TX control data DMA map, error = %d\n",
   5397 		    error);
   5398 		goto fail_3;
   5399 	}
   5400 
   5401 	return 0;
   5402 
   5403  fail_3:
   5404 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5405  fail_2:
   5406 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5407 	    WM_TXDESCS_SIZE(txq));
   5408  fail_1:
   5409 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5410  fail_0:
   5411 	return error;
   5412 }
   5413 
   5414 static void
   5415 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5416 {
   5417 
   5418 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5419 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5420 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5421 	    WM_TXDESCS_SIZE(txq));
   5422 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5423 }
   5424 
   5425 static int
   5426 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5427 {
   5428 	int error;
   5429 
   5430 	/*
   5431 	 * Allocate the control data structures, and create and load the
   5432 	 * DMA map for it.
   5433 	 *
   5434 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5435 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5436 	 * both sets within the same 4G segment.
   5437 	 */
   5438 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5439 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5440 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5441 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5442 		aprint_error_dev(sc->sc_dev,
   5443 		    "unable to allocate RX control data, error = %d\n",
   5444 		    error);
   5445 		goto fail_0;
   5446 	}
   5447 
   5448 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5449 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5450 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5451 		aprint_error_dev(sc->sc_dev,
   5452 		    "unable to map RX control data, error = %d\n", error);
   5453 		goto fail_1;
   5454 	}
   5455 
   5456 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5457 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5458 		aprint_error_dev(sc->sc_dev,
   5459 		    "unable to create RX control data DMA map, error = %d\n",
   5460 		    error);
   5461 		goto fail_2;
   5462 	}
   5463 
   5464 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5465 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5466 		aprint_error_dev(sc->sc_dev,
   5467 		    "unable to load RX control data DMA map, error = %d\n",
   5468 		    error);
   5469 		goto fail_3;
   5470 	}
   5471 
   5472 	return 0;
   5473 
   5474  fail_3:
   5475 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5476  fail_2:
   5477 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5478 	    rxq->rxq_desc_size);
   5479  fail_1:
   5480 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5481  fail_0:
   5482 	return error;
   5483 }
   5484 
   5485 static void
   5486 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5487 {
   5488 
   5489 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5490 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5491 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5492 	    rxq->rxq_desc_size);
   5493 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5494 }
   5495 
   5496 
   5497 static int
   5498 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5499 {
   5500 	int i, error;
   5501 
   5502 	/* Create the transmit buffer DMA maps. */
   5503 	WM_TXQUEUELEN(txq) =
   5504 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5505 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5506 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5507 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5508 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5509 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5510 			aprint_error_dev(sc->sc_dev,
   5511 			    "unable to create Tx DMA map %d, error = %d\n",
   5512 			    i, error);
   5513 			goto fail;
   5514 		}
   5515 	}
   5516 
   5517 	return 0;
   5518 
   5519  fail:
   5520 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5521 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5522 			bus_dmamap_destroy(sc->sc_dmat,
   5523 			    txq->txq_soft[i].txs_dmamap);
   5524 	}
   5525 	return error;
   5526 }
   5527 
   5528 static void
   5529 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5530 {
   5531 	int i;
   5532 
   5533 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5534 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5535 			bus_dmamap_destroy(sc->sc_dmat,
   5536 			    txq->txq_soft[i].txs_dmamap);
   5537 	}
   5538 }
   5539 
   5540 static int
   5541 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5542 {
   5543 	int i, error;
   5544 
   5545 	/* Create the receive buffer DMA maps. */
   5546 	for (i = 0; i < WM_NRXDESC; i++) {
   5547 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5548 			    MCLBYTES, 0, 0,
   5549 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5550 			aprint_error_dev(sc->sc_dev,
   5551 			    "unable to create Rx DMA map %d error = %d\n",
   5552 			    i, error);
   5553 			goto fail;
   5554 		}
   5555 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5556 	}
   5557 
   5558 	return 0;
   5559 
   5560  fail:
   5561 	for (i = 0; i < WM_NRXDESC; i++) {
   5562 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5563 			bus_dmamap_destroy(sc->sc_dmat,
   5564 			    rxq->rxq_soft[i].rxs_dmamap);
   5565 	}
   5566 	return error;
   5567 }
   5568 
   5569 static void
   5570 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5571 {
   5572 	int i;
   5573 
   5574 	for (i = 0; i < WM_NRXDESC; i++) {
   5575 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5576 			bus_dmamap_destroy(sc->sc_dmat,
   5577 			    rxq->rxq_soft[i].rxs_dmamap);
   5578 	}
   5579 }
   5580 
   5581 /*
   5582  * wm_alloc_quques:
   5583  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5584  */
   5585 static int
   5586 wm_alloc_txrx_queues(struct wm_softc *sc)
   5587 {
   5588 	int i, error, tx_done, rx_done;
   5589 
   5590 	/*
   5591 	 * For transmission
   5592 	 */
   5593 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
   5594 	    KM_SLEEP);
   5595 	if (sc->sc_txq == NULL) {
   5596 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_txqueue\n");
   5597 		error = ENOMEM;
   5598 		goto fail_0;
   5599 	}
   5600 
   5601 	error = 0;
   5602 	tx_done = 0;
   5603 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5604 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5605 		txq->txq_sc = sc;
   5606 #ifdef WM_MPSAFE
   5607 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5608 #else
   5609 		txq->txq_lock = NULL;
   5610 #endif
   5611 		error = wm_alloc_tx_descs(sc, txq);
   5612 		if (error)
   5613 			break;
   5614 		error = wm_alloc_tx_buffer(sc, txq);
   5615 		if (error) {
   5616 			wm_free_tx_descs(sc, txq);
   5617 			break;
   5618 		}
   5619 		tx_done++;
   5620 	}
   5621 	if (error)
   5622 		goto fail_1;
   5623 
   5624 	/*
   5625 	 * For recieve
   5626 	 */
   5627 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
   5628 	    KM_SLEEP);
   5629 	if (sc->sc_rxq == NULL) {
   5630 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_rxqueue\n");
   5631 		error = ENOMEM;
   5632 		goto fail_1;
   5633 	}
   5634 
   5635 	error = 0;
   5636 	rx_done = 0;
   5637 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5638 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5639 		rxq->rxq_sc = sc;
   5640 #ifdef WM_MPSAFE
   5641 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5642 #else
   5643 		rxq->rxq_lock = NULL;
   5644 #endif
   5645 		error = wm_alloc_rx_descs(sc, rxq);
   5646 		if (error)
   5647 			break;
   5648 
   5649 		error = wm_alloc_rx_buffer(sc, rxq);
   5650 		if (error) {
   5651 			wm_free_rx_descs(sc, rxq);
   5652 			break;
   5653 		}
   5654 
   5655 		rx_done++;
   5656 	}
   5657 	if (error)
   5658 		goto fail_2;
   5659 
   5660 	return 0;
   5661 
   5662  fail_2:
   5663 	for (i = 0; i < rx_done; i++) {
   5664 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5665 		wm_free_rx_buffer(sc, rxq);
   5666 		wm_free_rx_descs(sc, rxq);
   5667 		if (rxq->rxq_lock)
   5668 			mutex_obj_free(rxq->rxq_lock);
   5669 	}
   5670 	kmem_free(sc->sc_rxq,
   5671 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5672  fail_1:
   5673 	for (i = 0; i < tx_done; i++) {
   5674 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5675 		wm_free_tx_buffer(sc, txq);
   5676 		wm_free_tx_descs(sc, txq);
   5677 		if (txq->txq_lock)
   5678 			mutex_obj_free(txq->txq_lock);
   5679 	}
   5680 	kmem_free(sc->sc_txq,
   5681 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5682  fail_0:
   5683 	return error;
   5684 }
   5685 
   5686 /*
   5687  * wm_free_quques:
   5688  *	Free {tx,rx}descs and {tx,rx} buffers
   5689  */
   5690 static void
   5691 wm_free_txrx_queues(struct wm_softc *sc)
   5692 {
   5693 	int i;
   5694 
   5695 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5696 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5697 		wm_free_rx_buffer(sc, rxq);
   5698 		wm_free_rx_descs(sc, rxq);
   5699 		if (rxq->rxq_lock)
   5700 			mutex_obj_free(rxq->rxq_lock);
   5701 	}
   5702 	kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5703 
   5704 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5705 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5706 		wm_free_tx_buffer(sc, txq);
   5707 		wm_free_tx_descs(sc, txq);
   5708 		if (txq->txq_lock)
   5709 			mutex_obj_free(txq->txq_lock);
   5710 	}
   5711 	kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5712 }
   5713 
   5714 static void
   5715 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5716 {
   5717 
   5718 	KASSERT(WM_TX_LOCKED(txq));
   5719 
   5720 	/* Initialize the transmit descriptor ring. */
   5721 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5722 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5723 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5724 	txq->txq_free = WM_NTXDESC(txq);
   5725 	txq->txq_next = 0;
   5726 }
   5727 
   5728 static void
   5729 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
   5730 {
   5731 
   5732 	KASSERT(WM_TX_LOCKED(txq));
   5733 
   5734 	if (sc->sc_type < WM_T_82543) {
   5735 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5736 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5737 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5738 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5739 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5740 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5741 	} else {
   5742 		int qid = txq->txq_id;
   5743 
   5744 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5745 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5746 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5747 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5748 
   5749 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5750 			/*
   5751 			 * Don't write TDT before TCTL.EN is set.
   5752 			 * See the document.
   5753 			 */
   5754 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5755 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5756 			    | TXDCTL_WTHRESH(0));
   5757 		else {
   5758 			/* ITR / 4 */
   5759 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5760 			if (sc->sc_type >= WM_T_82540) {
   5761 				/* should be same */
   5762 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5763 			}
   5764 
   5765 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5766 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5767 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5768 		}
   5769 	}
   5770 }
   5771 
   5772 static void
   5773 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5774 {
   5775 	int i;
   5776 
   5777 	KASSERT(WM_TX_LOCKED(txq));
   5778 
   5779 	/* Initialize the transmit job descriptors. */
   5780 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5781 		txq->txq_soft[i].txs_mbuf = NULL;
   5782 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5783 	txq->txq_snext = 0;
   5784 	txq->txq_sdirty = 0;
   5785 }
   5786 
   5787 static void
   5788 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
   5789 {
   5790 
   5791 	KASSERT(WM_TX_LOCKED(txq));
   5792 
   5793 	/*
   5794 	 * Set up some register offsets that are different between
   5795 	 * the i82542 and the i82543 and later chips.
   5796 	 */
   5797 	if (sc->sc_type < WM_T_82543)
   5798 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5799 	else
   5800 		txq->txq_tdt_reg = WMREG_TDT(txq->txq_id);
   5801 
   5802 	wm_init_tx_descs(sc, txq);
   5803 	wm_init_tx_regs(sc, txq);
   5804 	wm_init_tx_buffer(sc, txq);
   5805 }
   5806 
   5807 static void
   5808 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5809 {
   5810 
   5811 	KASSERT(WM_RX_LOCKED(rxq));
   5812 
   5813 	/*
   5814 	 * Initialize the receive descriptor and receive job
   5815 	 * descriptor rings.
   5816 	 */
   5817 	if (sc->sc_type < WM_T_82543) {
   5818 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5819 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5820 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5821 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5822 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5823 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5824 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5825 
   5826 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5827 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5828 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5829 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5830 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5831 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5832 	} else {
   5833 		int qid = rxq->rxq_id;
   5834 
   5835 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5836 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5837 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5838 
   5839 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5840 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5841 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5842 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5843 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5844 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5845 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5846 			    | RXDCTL_WTHRESH(1));
   5847 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5848 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5849 		} else {
   5850 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5851 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5852 			/* ITR / 4 */
   5853 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5854 			/* MUST be same */
   5855 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5856 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5857 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5858 		}
   5859 	}
   5860 }
   5861 
   5862 static int
   5863 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5864 {
   5865 	struct wm_rxsoft *rxs;
   5866 	int error, i;
   5867 
   5868 	KASSERT(WM_RX_LOCKED(rxq));
   5869 
   5870 	for (i = 0; i < WM_NRXDESC; i++) {
   5871 		rxs = &rxq->rxq_soft[i];
   5872 		if (rxs->rxs_mbuf == NULL) {
   5873 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5874 				log(LOG_ERR, "%s: unable to allocate or map "
   5875 				    "rx buffer %d, error = %d\n",
   5876 				    device_xname(sc->sc_dev), i, error);
   5877 				/*
   5878 				 * XXX Should attempt to run with fewer receive
   5879 				 * XXX buffers instead of just failing.
   5880 				 */
   5881 				wm_rxdrain(rxq);
   5882 				return ENOMEM;
   5883 			}
   5884 		} else {
   5885 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5886 				wm_init_rxdesc(rxq, i);
   5887 			/*
   5888 			 * For 82575 and newer device, the RX descriptors
   5889 			 * must be initialized after the setting of RCTL.EN in
   5890 			 * wm_set_filter()
   5891 			 */
   5892 		}
   5893 	}
   5894 	rxq->rxq_ptr = 0;
   5895 	rxq->rxq_discard = 0;
   5896 	WM_RXCHAIN_RESET(rxq);
   5897 
   5898 	return 0;
   5899 }
   5900 
   5901 static int
   5902 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5903 {
   5904 
   5905 	KASSERT(WM_RX_LOCKED(rxq));
   5906 
   5907 	/*
   5908 	 * Set up some register offsets that are different between
   5909 	 * the i82542 and the i82543 and later chips.
   5910 	 */
   5911 	if (sc->sc_type < WM_T_82543)
   5912 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5913 	else
   5914 		rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
   5915 
   5916 	wm_init_rx_regs(sc, rxq);
   5917 	return wm_init_rx_buffer(sc, rxq);
   5918 }
   5919 
   5920 /*
   5921  * wm_init_quques:
   5922  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5923  */
   5924 static int
   5925 wm_init_txrx_queues(struct wm_softc *sc)
   5926 {
   5927 	int i, error;
   5928 
   5929 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5930 		device_xname(sc->sc_dev), __func__));
   5931 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5932 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5933 		WM_TX_LOCK(txq);
   5934 		wm_init_tx_queue(sc, txq);
   5935 		WM_TX_UNLOCK(txq);
   5936 	}
   5937 
   5938 	error = 0;
   5939 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5940 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5941 		WM_RX_LOCK(rxq);
   5942 		error = wm_init_rx_queue(sc, rxq);
   5943 		WM_RX_UNLOCK(rxq);
   5944 		if (error)
   5945 			break;
   5946 	}
   5947 
   5948 	return error;
   5949 }
   5950 
   5951 /*
   5952  * wm_tx_offload:
   5953  *
   5954  *	Set up TCP/IP checksumming parameters for the
   5955  *	specified packet.
   5956  */
   5957 static int
   5958 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5959     uint8_t *fieldsp)
   5960 {
   5961 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5962 	struct mbuf *m0 = txs->txs_mbuf;
   5963 	struct livengood_tcpip_ctxdesc *t;
   5964 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5965 	uint32_t ipcse;
   5966 	struct ether_header *eh;
   5967 	int offset, iphl;
   5968 	uint8_t fields;
   5969 
   5970 	/*
   5971 	 * XXX It would be nice if the mbuf pkthdr had offset
   5972 	 * fields for the protocol headers.
   5973 	 */
   5974 
   5975 	eh = mtod(m0, struct ether_header *);
   5976 	switch (htons(eh->ether_type)) {
   5977 	case ETHERTYPE_IP:
   5978 	case ETHERTYPE_IPV6:
   5979 		offset = ETHER_HDR_LEN;
   5980 		break;
   5981 
   5982 	case ETHERTYPE_VLAN:
   5983 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5984 		break;
   5985 
   5986 	default:
   5987 		/*
   5988 		 * Don't support this protocol or encapsulation.
   5989 		 */
   5990 		*fieldsp = 0;
   5991 		*cmdp = 0;
   5992 		return 0;
   5993 	}
   5994 
   5995 	if ((m0->m_pkthdr.csum_flags &
   5996 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   5997 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5998 	} else {
   5999 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6000 	}
   6001 	ipcse = offset + iphl - 1;
   6002 
   6003 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6004 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6005 	seg = 0;
   6006 	fields = 0;
   6007 
   6008 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6009 		int hlen = offset + iphl;
   6010 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6011 
   6012 		if (__predict_false(m0->m_len <
   6013 				    (hlen + sizeof(struct tcphdr)))) {
   6014 			/*
   6015 			 * TCP/IP headers are not in the first mbuf; we need
   6016 			 * to do this the slow and painful way.  Let's just
   6017 			 * hope this doesn't happen very often.
   6018 			 */
   6019 			struct tcphdr th;
   6020 
   6021 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6022 
   6023 			m_copydata(m0, hlen, sizeof(th), &th);
   6024 			if (v4) {
   6025 				struct ip ip;
   6026 
   6027 				m_copydata(m0, offset, sizeof(ip), &ip);
   6028 				ip.ip_len = 0;
   6029 				m_copyback(m0,
   6030 				    offset + offsetof(struct ip, ip_len),
   6031 				    sizeof(ip.ip_len), &ip.ip_len);
   6032 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6033 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6034 			} else {
   6035 				struct ip6_hdr ip6;
   6036 
   6037 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6038 				ip6.ip6_plen = 0;
   6039 				m_copyback(m0,
   6040 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6041 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6042 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6043 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6044 			}
   6045 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6046 			    sizeof(th.th_sum), &th.th_sum);
   6047 
   6048 			hlen += th.th_off << 2;
   6049 		} else {
   6050 			/*
   6051 			 * TCP/IP headers are in the first mbuf; we can do
   6052 			 * this the easy way.
   6053 			 */
   6054 			struct tcphdr *th;
   6055 
   6056 			if (v4) {
   6057 				struct ip *ip =
   6058 				    (void *)(mtod(m0, char *) + offset);
   6059 				th = (void *)(mtod(m0, char *) + hlen);
   6060 
   6061 				ip->ip_len = 0;
   6062 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6063 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6064 			} else {
   6065 				struct ip6_hdr *ip6 =
   6066 				    (void *)(mtod(m0, char *) + offset);
   6067 				th = (void *)(mtod(m0, char *) + hlen);
   6068 
   6069 				ip6->ip6_plen = 0;
   6070 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6071 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6072 			}
   6073 			hlen += th->th_off << 2;
   6074 		}
   6075 
   6076 		if (v4) {
   6077 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6078 			cmdlen |= WTX_TCPIP_CMD_IP;
   6079 		} else {
   6080 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6081 			ipcse = 0;
   6082 		}
   6083 		cmd |= WTX_TCPIP_CMD_TSE;
   6084 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6085 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6086 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6087 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6088 	}
   6089 
   6090 	/*
   6091 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6092 	 * offload feature, if we load the context descriptor, we
   6093 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6094 	 */
   6095 
   6096 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6097 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6098 	    WTX_TCPIP_IPCSE(ipcse);
   6099 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6100 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   6101 		fields |= WTX_IXSM;
   6102 	}
   6103 
   6104 	offset += iphl;
   6105 
   6106 	if (m0->m_pkthdr.csum_flags &
   6107 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6108 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6109 		fields |= WTX_TXSM;
   6110 		tucs = WTX_TCPIP_TUCSS(offset) |
   6111 		    WTX_TCPIP_TUCSO(offset +
   6112 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6113 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6114 	} else if ((m0->m_pkthdr.csum_flags &
   6115 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6116 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6117 		fields |= WTX_TXSM;
   6118 		tucs = WTX_TCPIP_TUCSS(offset) |
   6119 		    WTX_TCPIP_TUCSO(offset +
   6120 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6121 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6122 	} else {
   6123 		/* Just initialize it to a valid TCP context. */
   6124 		tucs = WTX_TCPIP_TUCSS(offset) |
   6125 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6126 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6127 	}
   6128 
   6129 	/* Fill in the context descriptor. */
   6130 	t = (struct livengood_tcpip_ctxdesc *)
   6131 	    &txq->txq_descs[txq->txq_next];
   6132 	t->tcpip_ipcs = htole32(ipcs);
   6133 	t->tcpip_tucs = htole32(tucs);
   6134 	t->tcpip_cmdlen = htole32(cmdlen);
   6135 	t->tcpip_seg = htole32(seg);
   6136 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6137 
   6138 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6139 	txs->txs_ndesc++;
   6140 
   6141 	*cmdp = cmd;
   6142 	*fieldsp = fields;
   6143 
   6144 	return 0;
   6145 }
   6146 
   6147 /*
   6148  * wm_start:		[ifnet interface function]
   6149  *
   6150  *	Start packet transmission on the interface.
   6151  */
   6152 static void
   6153 wm_start(struct ifnet *ifp)
   6154 {
   6155 	struct wm_softc *sc = ifp->if_softc;
   6156 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6157 
   6158 	WM_TX_LOCK(txq);
   6159 	if (!sc->sc_stopping)
   6160 		wm_start_locked(ifp);
   6161 	WM_TX_UNLOCK(txq);
   6162 }
   6163 
   6164 static void
   6165 wm_start_locked(struct ifnet *ifp)
   6166 {
   6167 	struct wm_softc *sc = ifp->if_softc;
   6168 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6169 	struct mbuf *m0;
   6170 	struct m_tag *mtag;
   6171 	struct wm_txsoft *txs;
   6172 	bus_dmamap_t dmamap;
   6173 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6174 	bus_addr_t curaddr;
   6175 	bus_size_t seglen, curlen;
   6176 	uint32_t cksumcmd;
   6177 	uint8_t cksumfields;
   6178 
   6179 	KASSERT(WM_TX_LOCKED(txq));
   6180 
   6181 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6182 		return;
   6183 
   6184 	/* Remember the previous number of free descriptors. */
   6185 	ofree = txq->txq_free;
   6186 
   6187 	/*
   6188 	 * Loop through the send queue, setting up transmit descriptors
   6189 	 * until we drain the queue, or use up all available transmit
   6190 	 * descriptors.
   6191 	 */
   6192 	for (;;) {
   6193 		m0 = NULL;
   6194 
   6195 		/* Get a work queue entry. */
   6196 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6197 			wm_txeof(sc);
   6198 			if (txq->txq_sfree == 0) {
   6199 				DPRINTF(WM_DEBUG_TX,
   6200 				    ("%s: TX: no free job descriptors\n",
   6201 					device_xname(sc->sc_dev)));
   6202 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6203 				break;
   6204 			}
   6205 		}
   6206 
   6207 		/* Grab a packet off the queue. */
   6208 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6209 		if (m0 == NULL)
   6210 			break;
   6211 
   6212 		DPRINTF(WM_DEBUG_TX,
   6213 		    ("%s: TX: have packet to transmit: %p\n",
   6214 		    device_xname(sc->sc_dev), m0));
   6215 
   6216 		txs = &txq->txq_soft[txq->txq_snext];
   6217 		dmamap = txs->txs_dmamap;
   6218 
   6219 		use_tso = (m0->m_pkthdr.csum_flags &
   6220 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6221 
   6222 		/*
   6223 		 * So says the Linux driver:
   6224 		 * The controller does a simple calculation to make sure
   6225 		 * there is enough room in the FIFO before initiating the
   6226 		 * DMA for each buffer.  The calc is:
   6227 		 *	4 = ceil(buffer len / MSS)
   6228 		 * To make sure we don't overrun the FIFO, adjust the max
   6229 		 * buffer len if the MSS drops.
   6230 		 */
   6231 		dmamap->dm_maxsegsz =
   6232 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6233 		    ? m0->m_pkthdr.segsz << 2
   6234 		    : WTX_MAX_LEN;
   6235 
   6236 		/*
   6237 		 * Load the DMA map.  If this fails, the packet either
   6238 		 * didn't fit in the allotted number of segments, or we
   6239 		 * were short on resources.  For the too-many-segments
   6240 		 * case, we simply report an error and drop the packet,
   6241 		 * since we can't sanely copy a jumbo packet to a single
   6242 		 * buffer.
   6243 		 */
   6244 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6245 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6246 		if (error) {
   6247 			if (error == EFBIG) {
   6248 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6249 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6250 				    "DMA segments, dropping...\n",
   6251 				    device_xname(sc->sc_dev));
   6252 				wm_dump_mbuf_chain(sc, m0);
   6253 				m_freem(m0);
   6254 				continue;
   6255 			}
   6256 			/*  Short on resources, just stop for now. */
   6257 			DPRINTF(WM_DEBUG_TX,
   6258 			    ("%s: TX: dmamap load failed: %d\n",
   6259 			    device_xname(sc->sc_dev), error));
   6260 			break;
   6261 		}
   6262 
   6263 		segs_needed = dmamap->dm_nsegs;
   6264 		if (use_tso) {
   6265 			/* For sentinel descriptor; see below. */
   6266 			segs_needed++;
   6267 		}
   6268 
   6269 		/*
   6270 		 * Ensure we have enough descriptors free to describe
   6271 		 * the packet.  Note, we always reserve one descriptor
   6272 		 * at the end of the ring due to the semantics of the
   6273 		 * TDT register, plus one more in the event we need
   6274 		 * to load offload context.
   6275 		 */
   6276 		if (segs_needed > txq->txq_free - 2) {
   6277 			/*
   6278 			 * Not enough free descriptors to transmit this
   6279 			 * packet.  We haven't committed anything yet,
   6280 			 * so just unload the DMA map, put the packet
   6281 			 * pack on the queue, and punt.  Notify the upper
   6282 			 * layer that there are no more slots left.
   6283 			 */
   6284 			DPRINTF(WM_DEBUG_TX,
   6285 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6286 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6287 			    segs_needed, txq->txq_free - 1));
   6288 			ifp->if_flags |= IFF_OACTIVE;
   6289 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6290 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6291 			break;
   6292 		}
   6293 
   6294 		/*
   6295 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6296 		 * once we know we can transmit the packet, since we
   6297 		 * do some internal FIFO space accounting here.
   6298 		 */
   6299 		if (sc->sc_type == WM_T_82547 &&
   6300 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6301 			DPRINTF(WM_DEBUG_TX,
   6302 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6303 			    device_xname(sc->sc_dev)));
   6304 			ifp->if_flags |= IFF_OACTIVE;
   6305 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6306 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   6307 			break;
   6308 		}
   6309 
   6310 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6311 
   6312 		DPRINTF(WM_DEBUG_TX,
   6313 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6314 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6315 
   6316 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6317 
   6318 		/*
   6319 		 * Store a pointer to the packet so that we can free it
   6320 		 * later.
   6321 		 *
   6322 		 * Initially, we consider the number of descriptors the
   6323 		 * packet uses the number of DMA segments.  This may be
   6324 		 * incremented by 1 if we do checksum offload (a descriptor
   6325 		 * is used to set the checksum context).
   6326 		 */
   6327 		txs->txs_mbuf = m0;
   6328 		txs->txs_firstdesc = txq->txq_next;
   6329 		txs->txs_ndesc = segs_needed;
   6330 
   6331 		/* Set up offload parameters for this packet. */
   6332 		if (m0->m_pkthdr.csum_flags &
   6333 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6334 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6335 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6336 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6337 					  &cksumfields) != 0) {
   6338 				/* Error message already displayed. */
   6339 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6340 				continue;
   6341 			}
   6342 		} else {
   6343 			cksumcmd = 0;
   6344 			cksumfields = 0;
   6345 		}
   6346 
   6347 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6348 
   6349 		/* Sync the DMA map. */
   6350 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6351 		    BUS_DMASYNC_PREWRITE);
   6352 
   6353 		/* Initialize the transmit descriptor. */
   6354 		for (nexttx = txq->txq_next, seg = 0;
   6355 		     seg < dmamap->dm_nsegs; seg++) {
   6356 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6357 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6358 			     seglen != 0;
   6359 			     curaddr += curlen, seglen -= curlen,
   6360 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6361 				curlen = seglen;
   6362 
   6363 				/*
   6364 				 * So says the Linux driver:
   6365 				 * Work around for premature descriptor
   6366 				 * write-backs in TSO mode.  Append a
   6367 				 * 4-byte sentinel descriptor.
   6368 				 */
   6369 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6370 				    curlen > 8)
   6371 					curlen -= 4;
   6372 
   6373 				wm_set_dma_addr(
   6374 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6375 				txq->txq_descs[nexttx].wtx_cmdlen
   6376 				    = htole32(cksumcmd | curlen);
   6377 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6378 				    = 0;
   6379 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6380 				    = cksumfields;
   6381 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6382 				lasttx = nexttx;
   6383 
   6384 				DPRINTF(WM_DEBUG_TX,
   6385 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6386 				     "len %#04zx\n",
   6387 				    device_xname(sc->sc_dev), nexttx,
   6388 				    (uint64_t)curaddr, curlen));
   6389 			}
   6390 		}
   6391 
   6392 		KASSERT(lasttx != -1);
   6393 
   6394 		/*
   6395 		 * Set up the command byte on the last descriptor of
   6396 		 * the packet.  If we're in the interrupt delay window,
   6397 		 * delay the interrupt.
   6398 		 */
   6399 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6400 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6401 
   6402 		/*
   6403 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6404 		 * up the descriptor to encapsulate the packet for us.
   6405 		 *
   6406 		 * This is only valid on the last descriptor of the packet.
   6407 		 */
   6408 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6409 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6410 			    htole32(WTX_CMD_VLE);
   6411 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6412 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6413 		}
   6414 
   6415 		txs->txs_lastdesc = lasttx;
   6416 
   6417 		DPRINTF(WM_DEBUG_TX,
   6418 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6419 		    device_xname(sc->sc_dev),
   6420 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6421 
   6422 		/* Sync the descriptors we're using. */
   6423 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6424 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6425 
   6426 		/* Give the packet to the chip. */
   6427 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6428 
   6429 		DPRINTF(WM_DEBUG_TX,
   6430 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6431 
   6432 		DPRINTF(WM_DEBUG_TX,
   6433 		    ("%s: TX: finished transmitting packet, job %d\n",
   6434 		    device_xname(sc->sc_dev), txq->txq_snext));
   6435 
   6436 		/* Advance the tx pointer. */
   6437 		txq->txq_free -= txs->txs_ndesc;
   6438 		txq->txq_next = nexttx;
   6439 
   6440 		txq->txq_sfree--;
   6441 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6442 
   6443 		/* Pass the packet to any BPF listeners. */
   6444 		bpf_mtap(ifp, m0);
   6445 	}
   6446 
   6447 	if (m0 != NULL) {
   6448 		ifp->if_flags |= IFF_OACTIVE;
   6449 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6450 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6451 			__func__));
   6452 		m_freem(m0);
   6453 	}
   6454 
   6455 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6456 		/* No more slots; notify upper layer. */
   6457 		ifp->if_flags |= IFF_OACTIVE;
   6458 	}
   6459 
   6460 	if (txq->txq_free != ofree) {
   6461 		/* Set a watchdog timer in case the chip flakes out. */
   6462 		ifp->if_timer = 5;
   6463 	}
   6464 }
   6465 
   6466 /*
   6467  * wm_nq_tx_offload:
   6468  *
   6469  *	Set up TCP/IP checksumming parameters for the
   6470  *	specified packet, for NEWQUEUE devices
   6471  */
   6472 static int
   6473 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   6474     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6475 {
   6476 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6477 	struct mbuf *m0 = txs->txs_mbuf;
   6478 	struct m_tag *mtag;
   6479 	uint32_t vl_len, mssidx, cmdc;
   6480 	struct ether_header *eh;
   6481 	int offset, iphl;
   6482 
   6483 	/*
   6484 	 * XXX It would be nice if the mbuf pkthdr had offset
   6485 	 * fields for the protocol headers.
   6486 	 */
   6487 	*cmdlenp = 0;
   6488 	*fieldsp = 0;
   6489 
   6490 	eh = mtod(m0, struct ether_header *);
   6491 	switch (htons(eh->ether_type)) {
   6492 	case ETHERTYPE_IP:
   6493 	case ETHERTYPE_IPV6:
   6494 		offset = ETHER_HDR_LEN;
   6495 		break;
   6496 
   6497 	case ETHERTYPE_VLAN:
   6498 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6499 		break;
   6500 
   6501 	default:
   6502 		/* Don't support this protocol or encapsulation. */
   6503 		*do_csum = false;
   6504 		return 0;
   6505 	}
   6506 	*do_csum = true;
   6507 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6508 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6509 
   6510 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6511 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6512 
   6513 	if ((m0->m_pkthdr.csum_flags &
   6514 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6515 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6516 	} else {
   6517 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6518 	}
   6519 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6520 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6521 
   6522 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6523 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6524 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6525 		*cmdlenp |= NQTX_CMD_VLE;
   6526 	}
   6527 
   6528 	mssidx = 0;
   6529 
   6530 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6531 		int hlen = offset + iphl;
   6532 		int tcp_hlen;
   6533 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6534 
   6535 		if (__predict_false(m0->m_len <
   6536 				    (hlen + sizeof(struct tcphdr)))) {
   6537 			/*
   6538 			 * TCP/IP headers are not in the first mbuf; we need
   6539 			 * to do this the slow and painful way.  Let's just
   6540 			 * hope this doesn't happen very often.
   6541 			 */
   6542 			struct tcphdr th;
   6543 
   6544 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6545 
   6546 			m_copydata(m0, hlen, sizeof(th), &th);
   6547 			if (v4) {
   6548 				struct ip ip;
   6549 
   6550 				m_copydata(m0, offset, sizeof(ip), &ip);
   6551 				ip.ip_len = 0;
   6552 				m_copyback(m0,
   6553 				    offset + offsetof(struct ip, ip_len),
   6554 				    sizeof(ip.ip_len), &ip.ip_len);
   6555 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6556 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6557 			} else {
   6558 				struct ip6_hdr ip6;
   6559 
   6560 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6561 				ip6.ip6_plen = 0;
   6562 				m_copyback(m0,
   6563 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6564 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6565 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6566 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6567 			}
   6568 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6569 			    sizeof(th.th_sum), &th.th_sum);
   6570 
   6571 			tcp_hlen = th.th_off << 2;
   6572 		} else {
   6573 			/*
   6574 			 * TCP/IP headers are in the first mbuf; we can do
   6575 			 * this the easy way.
   6576 			 */
   6577 			struct tcphdr *th;
   6578 
   6579 			if (v4) {
   6580 				struct ip *ip =
   6581 				    (void *)(mtod(m0, char *) + offset);
   6582 				th = (void *)(mtod(m0, char *) + hlen);
   6583 
   6584 				ip->ip_len = 0;
   6585 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6586 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6587 			} else {
   6588 				struct ip6_hdr *ip6 =
   6589 				    (void *)(mtod(m0, char *) + offset);
   6590 				th = (void *)(mtod(m0, char *) + hlen);
   6591 
   6592 				ip6->ip6_plen = 0;
   6593 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6594 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6595 			}
   6596 			tcp_hlen = th->th_off << 2;
   6597 		}
   6598 		hlen += tcp_hlen;
   6599 		*cmdlenp |= NQTX_CMD_TSE;
   6600 
   6601 		if (v4) {
   6602 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6603 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6604 		} else {
   6605 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6606 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6607 		}
   6608 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6609 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6610 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6611 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6612 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6613 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6614 	} else {
   6615 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6616 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6617 	}
   6618 
   6619 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6620 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6621 		cmdc |= NQTXC_CMD_IP4;
   6622 	}
   6623 
   6624 	if (m0->m_pkthdr.csum_flags &
   6625 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6626 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6627 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6628 			cmdc |= NQTXC_CMD_TCP;
   6629 		} else {
   6630 			cmdc |= NQTXC_CMD_UDP;
   6631 		}
   6632 		cmdc |= NQTXC_CMD_IP4;
   6633 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6634 	}
   6635 	if (m0->m_pkthdr.csum_flags &
   6636 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6637 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6638 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6639 			cmdc |= NQTXC_CMD_TCP;
   6640 		} else {
   6641 			cmdc |= NQTXC_CMD_UDP;
   6642 		}
   6643 		cmdc |= NQTXC_CMD_IP6;
   6644 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6645 	}
   6646 
   6647 	/* Fill in the context descriptor. */
   6648 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6649 	    htole32(vl_len);
   6650 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6651 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6652 	    htole32(cmdc);
   6653 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6654 	    htole32(mssidx);
   6655 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6656 	DPRINTF(WM_DEBUG_TX,
   6657 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6658 	    txq->txq_next, 0, vl_len));
   6659 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6660 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6661 	txs->txs_ndesc++;
   6662 	return 0;
   6663 }
   6664 
   6665 /*
   6666  * wm_nq_start:		[ifnet interface function]
   6667  *
   6668  *	Start packet transmission on the interface for NEWQUEUE devices
   6669  */
   6670 static void
   6671 wm_nq_start(struct ifnet *ifp)
   6672 {
   6673 	struct wm_softc *sc = ifp->if_softc;
   6674 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6675 
   6676 	WM_TX_LOCK(txq);
   6677 	if (!sc->sc_stopping)
   6678 		wm_nq_start_locked(ifp);
   6679 	WM_TX_UNLOCK(txq);
   6680 }
   6681 
   6682 static void
   6683 wm_nq_start_locked(struct ifnet *ifp)
   6684 {
   6685 	struct wm_softc *sc = ifp->if_softc;
   6686 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6687 	struct mbuf *m0;
   6688 	struct m_tag *mtag;
   6689 	struct wm_txsoft *txs;
   6690 	bus_dmamap_t dmamap;
   6691 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6692 	bool do_csum, sent;
   6693 
   6694 	KASSERT(WM_TX_LOCKED(txq));
   6695 
   6696 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6697 		return;
   6698 
   6699 	sent = false;
   6700 
   6701 	/*
   6702 	 * Loop through the send queue, setting up transmit descriptors
   6703 	 * until we drain the queue, or use up all available transmit
   6704 	 * descriptors.
   6705 	 */
   6706 	for (;;) {
   6707 		m0 = NULL;
   6708 
   6709 		/* Get a work queue entry. */
   6710 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6711 			wm_txeof(sc);
   6712 			if (txq->txq_sfree == 0) {
   6713 				DPRINTF(WM_DEBUG_TX,
   6714 				    ("%s: TX: no free job descriptors\n",
   6715 					device_xname(sc->sc_dev)));
   6716 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6717 				break;
   6718 			}
   6719 		}
   6720 
   6721 		/* Grab a packet off the queue. */
   6722 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6723 		if (m0 == NULL)
   6724 			break;
   6725 
   6726 		DPRINTF(WM_DEBUG_TX,
   6727 		    ("%s: TX: have packet to transmit: %p\n",
   6728 		    device_xname(sc->sc_dev), m0));
   6729 
   6730 		txs = &txq->txq_soft[txq->txq_snext];
   6731 		dmamap = txs->txs_dmamap;
   6732 
   6733 		/*
   6734 		 * Load the DMA map.  If this fails, the packet either
   6735 		 * didn't fit in the allotted number of segments, or we
   6736 		 * were short on resources.  For the too-many-segments
   6737 		 * case, we simply report an error and drop the packet,
   6738 		 * since we can't sanely copy a jumbo packet to a single
   6739 		 * buffer.
   6740 		 */
   6741 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6742 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6743 		if (error) {
   6744 			if (error == EFBIG) {
   6745 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6746 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6747 				    "DMA segments, dropping...\n",
   6748 				    device_xname(sc->sc_dev));
   6749 				wm_dump_mbuf_chain(sc, m0);
   6750 				m_freem(m0);
   6751 				continue;
   6752 			}
   6753 			/* Short on resources, just stop for now. */
   6754 			DPRINTF(WM_DEBUG_TX,
   6755 			    ("%s: TX: dmamap load failed: %d\n",
   6756 			    device_xname(sc->sc_dev), error));
   6757 			break;
   6758 		}
   6759 
   6760 		segs_needed = dmamap->dm_nsegs;
   6761 
   6762 		/*
   6763 		 * Ensure we have enough descriptors free to describe
   6764 		 * the packet.  Note, we always reserve one descriptor
   6765 		 * at the end of the ring due to the semantics of the
   6766 		 * TDT register, plus one more in the event we need
   6767 		 * to load offload context.
   6768 		 */
   6769 		if (segs_needed > txq->txq_free - 2) {
   6770 			/*
   6771 			 * Not enough free descriptors to transmit this
   6772 			 * packet.  We haven't committed anything yet,
   6773 			 * so just unload the DMA map, put the packet
   6774 			 * pack on the queue, and punt.  Notify the upper
   6775 			 * layer that there are no more slots left.
   6776 			 */
   6777 			DPRINTF(WM_DEBUG_TX,
   6778 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6779 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6780 			    segs_needed, txq->txq_free - 1));
   6781 			ifp->if_flags |= IFF_OACTIVE;
   6782 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6783 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6784 			break;
   6785 		}
   6786 
   6787 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6788 
   6789 		DPRINTF(WM_DEBUG_TX,
   6790 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6791 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6792 
   6793 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6794 
   6795 		/*
   6796 		 * Store a pointer to the packet so that we can free it
   6797 		 * later.
   6798 		 *
   6799 		 * Initially, we consider the number of descriptors the
   6800 		 * packet uses the number of DMA segments.  This may be
   6801 		 * incremented by 1 if we do checksum offload (a descriptor
   6802 		 * is used to set the checksum context).
   6803 		 */
   6804 		txs->txs_mbuf = m0;
   6805 		txs->txs_firstdesc = txq->txq_next;
   6806 		txs->txs_ndesc = segs_needed;
   6807 
   6808 		/* Set up offload parameters for this packet. */
   6809 		uint32_t cmdlen, fields, dcmdlen;
   6810 		if (m0->m_pkthdr.csum_flags &
   6811 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6812 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6813 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6814 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   6815 			    &do_csum) != 0) {
   6816 				/* Error message already displayed. */
   6817 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6818 				continue;
   6819 			}
   6820 		} else {
   6821 			do_csum = false;
   6822 			cmdlen = 0;
   6823 			fields = 0;
   6824 		}
   6825 
   6826 		/* Sync the DMA map. */
   6827 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6828 		    BUS_DMASYNC_PREWRITE);
   6829 
   6830 		/* Initialize the first transmit descriptor. */
   6831 		nexttx = txq->txq_next;
   6832 		if (!do_csum) {
   6833 			/* setup a legacy descriptor */
   6834 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   6835 			    dmamap->dm_segs[0].ds_addr);
   6836 			txq->txq_descs[nexttx].wtx_cmdlen =
   6837 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6838 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6839 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6840 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6841 			    NULL) {
   6842 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6843 				    htole32(WTX_CMD_VLE);
   6844 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6845 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6846 			} else {
   6847 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6848 			}
   6849 			dcmdlen = 0;
   6850 		} else {
   6851 			/* setup an advanced data descriptor */
   6852 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6853 			    htole64(dmamap->dm_segs[0].ds_addr);
   6854 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6855 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6856 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6857 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6858 			    htole32(fields);
   6859 			DPRINTF(WM_DEBUG_TX,
   6860 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6861 			    device_xname(sc->sc_dev), nexttx,
   6862 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6863 			DPRINTF(WM_DEBUG_TX,
   6864 			    ("\t 0x%08x%08x\n", fields,
   6865 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6866 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6867 		}
   6868 
   6869 		lasttx = nexttx;
   6870 		nexttx = WM_NEXTTX(txq, nexttx);
   6871 		/*
   6872 		 * fill in the next descriptors. legacy or adcanced format
   6873 		 * is the same here
   6874 		 */
   6875 		for (seg = 1; seg < dmamap->dm_nsegs;
   6876 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6877 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6878 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6879 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6880 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6881 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6882 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6883 			lasttx = nexttx;
   6884 
   6885 			DPRINTF(WM_DEBUG_TX,
   6886 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6887 			     "len %#04zx\n",
   6888 			    device_xname(sc->sc_dev), nexttx,
   6889 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6890 			    dmamap->dm_segs[seg].ds_len));
   6891 		}
   6892 
   6893 		KASSERT(lasttx != -1);
   6894 
   6895 		/*
   6896 		 * Set up the command byte on the last descriptor of
   6897 		 * the packet.  If we're in the interrupt delay window,
   6898 		 * delay the interrupt.
   6899 		 */
   6900 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6901 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6902 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6903 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6904 
   6905 		txs->txs_lastdesc = lasttx;
   6906 
   6907 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6908 		    device_xname(sc->sc_dev),
   6909 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6910 
   6911 		/* Sync the descriptors we're using. */
   6912 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6913 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6914 
   6915 		/* Give the packet to the chip. */
   6916 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6917 		sent = true;
   6918 
   6919 		DPRINTF(WM_DEBUG_TX,
   6920 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6921 
   6922 		DPRINTF(WM_DEBUG_TX,
   6923 		    ("%s: TX: finished transmitting packet, job %d\n",
   6924 		    device_xname(sc->sc_dev), txq->txq_snext));
   6925 
   6926 		/* Advance the tx pointer. */
   6927 		txq->txq_free -= txs->txs_ndesc;
   6928 		txq->txq_next = nexttx;
   6929 
   6930 		txq->txq_sfree--;
   6931 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6932 
   6933 		/* Pass the packet to any BPF listeners. */
   6934 		bpf_mtap(ifp, m0);
   6935 	}
   6936 
   6937 	if (m0 != NULL) {
   6938 		ifp->if_flags |= IFF_OACTIVE;
   6939 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6940 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6941 			__func__));
   6942 		m_freem(m0);
   6943 	}
   6944 
   6945 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6946 		/* No more slots; notify upper layer. */
   6947 		ifp->if_flags |= IFF_OACTIVE;
   6948 	}
   6949 
   6950 	if (sent) {
   6951 		/* Set a watchdog timer in case the chip flakes out. */
   6952 		ifp->if_timer = 5;
   6953 	}
   6954 }
   6955 
   6956 /* Interrupt */
   6957 
   6958 /*
   6959  * wm_txeof:
   6960  *
   6961  *	Helper; handle transmit interrupts.
   6962  */
   6963 static int
   6964 wm_txeof(struct wm_softc *sc)
   6965 {
   6966 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6967 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6968 	struct wm_txsoft *txs;
   6969 	bool processed = false;
   6970 	int count = 0;
   6971 	int i;
   6972 	uint8_t status;
   6973 
   6974 	if (sc->sc_stopping)
   6975 		return 0;
   6976 
   6977 	ifp->if_flags &= ~IFF_OACTIVE;
   6978 
   6979 	/*
   6980 	 * Go through the Tx list and free mbufs for those
   6981 	 * frames which have been transmitted.
   6982 	 */
   6983 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   6984 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   6985 		txs = &txq->txq_soft[i];
   6986 
   6987 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   6988 			device_xname(sc->sc_dev), i));
   6989 
   6990 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   6991 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   6992 
   6993 		status =
   6994 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   6995 		if ((status & WTX_ST_DD) == 0) {
   6996 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   6997 			    BUS_DMASYNC_PREREAD);
   6998 			break;
   6999 		}
   7000 
   7001 		processed = true;
   7002 		count++;
   7003 		DPRINTF(WM_DEBUG_TX,
   7004 		    ("%s: TX: job %d done: descs %d..%d\n",
   7005 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7006 		    txs->txs_lastdesc));
   7007 
   7008 		/*
   7009 		 * XXX We should probably be using the statistics
   7010 		 * XXX registers, but I don't know if they exist
   7011 		 * XXX on chips before the i82544.
   7012 		 */
   7013 
   7014 #ifdef WM_EVENT_COUNTERS
   7015 		if (status & WTX_ST_TU)
   7016 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   7017 #endif /* WM_EVENT_COUNTERS */
   7018 
   7019 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7020 			ifp->if_oerrors++;
   7021 			if (status & WTX_ST_LC)
   7022 				log(LOG_WARNING, "%s: late collision\n",
   7023 				    device_xname(sc->sc_dev));
   7024 			else if (status & WTX_ST_EC) {
   7025 				ifp->if_collisions += 16;
   7026 				log(LOG_WARNING, "%s: excessive collisions\n",
   7027 				    device_xname(sc->sc_dev));
   7028 			}
   7029 		} else
   7030 			ifp->if_opackets++;
   7031 
   7032 		txq->txq_free += txs->txs_ndesc;
   7033 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7034 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7035 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7036 		m_freem(txs->txs_mbuf);
   7037 		txs->txs_mbuf = NULL;
   7038 	}
   7039 
   7040 	/* Update the dirty transmit buffer pointer. */
   7041 	txq->txq_sdirty = i;
   7042 	DPRINTF(WM_DEBUG_TX,
   7043 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7044 
   7045 	if (count != 0)
   7046 		rnd_add_uint32(&sc->rnd_source, count);
   7047 
   7048 	/*
   7049 	 * If there are no more pending transmissions, cancel the watchdog
   7050 	 * timer.
   7051 	 */
   7052 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7053 		ifp->if_timer = 0;
   7054 
   7055 	return processed;
   7056 }
   7057 
   7058 /*
   7059  * wm_rxeof:
   7060  *
   7061  *	Helper; handle receive interrupts.
   7062  */
   7063 static void
   7064 wm_rxeof(struct wm_rxqueue *rxq)
   7065 {
   7066 	struct wm_softc *sc = rxq->rxq_sc;
   7067 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7068 	struct wm_rxsoft *rxs;
   7069 	struct mbuf *m;
   7070 	int i, len;
   7071 	int count = 0;
   7072 	uint8_t status, errors;
   7073 	uint16_t vlantag;
   7074 
   7075 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7076 		rxs = &rxq->rxq_soft[i];
   7077 
   7078 		DPRINTF(WM_DEBUG_RX,
   7079 		    ("%s: RX: checking descriptor %d\n",
   7080 		    device_xname(sc->sc_dev), i));
   7081 
   7082 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7083 
   7084 		status = rxq->rxq_descs[i].wrx_status;
   7085 		errors = rxq->rxq_descs[i].wrx_errors;
   7086 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7087 		vlantag = rxq->rxq_descs[i].wrx_special;
   7088 
   7089 		if ((status & WRX_ST_DD) == 0) {
   7090 			/* We have processed all of the receive descriptors. */
   7091 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7092 			break;
   7093 		}
   7094 
   7095 		count++;
   7096 		if (__predict_false(rxq->rxq_discard)) {
   7097 			DPRINTF(WM_DEBUG_RX,
   7098 			    ("%s: RX: discarding contents of descriptor %d\n",
   7099 			    device_xname(sc->sc_dev), i));
   7100 			wm_init_rxdesc(rxq, i);
   7101 			if (status & WRX_ST_EOP) {
   7102 				/* Reset our state. */
   7103 				DPRINTF(WM_DEBUG_RX,
   7104 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7105 				    device_xname(sc->sc_dev)));
   7106 				rxq->rxq_discard = 0;
   7107 			}
   7108 			continue;
   7109 		}
   7110 
   7111 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7112 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7113 
   7114 		m = rxs->rxs_mbuf;
   7115 
   7116 		/*
   7117 		 * Add a new receive buffer to the ring, unless of
   7118 		 * course the length is zero. Treat the latter as a
   7119 		 * failed mapping.
   7120 		 */
   7121 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7122 			/*
   7123 			 * Failed, throw away what we've done so
   7124 			 * far, and discard the rest of the packet.
   7125 			 */
   7126 			ifp->if_ierrors++;
   7127 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7128 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7129 			wm_init_rxdesc(rxq, i);
   7130 			if ((status & WRX_ST_EOP) == 0)
   7131 				rxq->rxq_discard = 1;
   7132 			if (rxq->rxq_head != NULL)
   7133 				m_freem(rxq->rxq_head);
   7134 			WM_RXCHAIN_RESET(rxq);
   7135 			DPRINTF(WM_DEBUG_RX,
   7136 			    ("%s: RX: Rx buffer allocation failed, "
   7137 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7138 			    rxq->rxq_discard ? " (discard)" : ""));
   7139 			continue;
   7140 		}
   7141 
   7142 		m->m_len = len;
   7143 		rxq->rxq_len += len;
   7144 		DPRINTF(WM_DEBUG_RX,
   7145 		    ("%s: RX: buffer at %p len %d\n",
   7146 		    device_xname(sc->sc_dev), m->m_data, len));
   7147 
   7148 		/* If this is not the end of the packet, keep looking. */
   7149 		if ((status & WRX_ST_EOP) == 0) {
   7150 			WM_RXCHAIN_LINK(rxq, m);
   7151 			DPRINTF(WM_DEBUG_RX,
   7152 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7153 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7154 			continue;
   7155 		}
   7156 
   7157 		/*
   7158 		 * Okay, we have the entire packet now.  The chip is
   7159 		 * configured to include the FCS except I350 and I21[01]
   7160 		 * (not all chips can be configured to strip it),
   7161 		 * so we need to trim it.
   7162 		 * May need to adjust length of previous mbuf in the
   7163 		 * chain if the current mbuf is too short.
   7164 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7165 		 * is always set in I350, so we don't trim it.
   7166 		 */
   7167 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7168 		    && (sc->sc_type != WM_T_I210)
   7169 		    && (sc->sc_type != WM_T_I211)) {
   7170 			if (m->m_len < ETHER_CRC_LEN) {
   7171 				rxq->rxq_tail->m_len
   7172 				    -= (ETHER_CRC_LEN - m->m_len);
   7173 				m->m_len = 0;
   7174 			} else
   7175 				m->m_len -= ETHER_CRC_LEN;
   7176 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7177 		} else
   7178 			len = rxq->rxq_len;
   7179 
   7180 		WM_RXCHAIN_LINK(rxq, m);
   7181 
   7182 		*rxq->rxq_tailp = NULL;
   7183 		m = rxq->rxq_head;
   7184 
   7185 		WM_RXCHAIN_RESET(rxq);
   7186 
   7187 		DPRINTF(WM_DEBUG_RX,
   7188 		    ("%s: RX: have entire packet, len -> %d\n",
   7189 		    device_xname(sc->sc_dev), len));
   7190 
   7191 		/* If an error occurred, update stats and drop the packet. */
   7192 		if (errors &
   7193 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7194 			if (errors & WRX_ER_SE)
   7195 				log(LOG_WARNING, "%s: symbol error\n",
   7196 				    device_xname(sc->sc_dev));
   7197 			else if (errors & WRX_ER_SEQ)
   7198 				log(LOG_WARNING, "%s: receive sequence error\n",
   7199 				    device_xname(sc->sc_dev));
   7200 			else if (errors & WRX_ER_CE)
   7201 				log(LOG_WARNING, "%s: CRC error\n",
   7202 				    device_xname(sc->sc_dev));
   7203 			m_freem(m);
   7204 			continue;
   7205 		}
   7206 
   7207 		/* No errors.  Receive the packet. */
   7208 		m->m_pkthdr.rcvif = ifp;
   7209 		m->m_pkthdr.len = len;
   7210 
   7211 		/*
   7212 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7213 		 * for us.  Associate the tag with the packet.
   7214 		 */
   7215 		/* XXXX should check for i350 and i354 */
   7216 		if ((status & WRX_ST_VP) != 0) {
   7217 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7218 		}
   7219 
   7220 		/* Set up checksum info for this packet. */
   7221 		if ((status & WRX_ST_IXSM) == 0) {
   7222 			if (status & WRX_ST_IPCS) {
   7223 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   7224 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7225 				if (errors & WRX_ER_IPE)
   7226 					m->m_pkthdr.csum_flags |=
   7227 					    M_CSUM_IPv4_BAD;
   7228 			}
   7229 			if (status & WRX_ST_TCPCS) {
   7230 				/*
   7231 				 * Note: we don't know if this was TCP or UDP,
   7232 				 * so we just set both bits, and expect the
   7233 				 * upper layers to deal.
   7234 				 */
   7235 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   7236 				m->m_pkthdr.csum_flags |=
   7237 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7238 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7239 				if (errors & WRX_ER_TCPE)
   7240 					m->m_pkthdr.csum_flags |=
   7241 					    M_CSUM_TCP_UDP_BAD;
   7242 			}
   7243 		}
   7244 
   7245 		ifp->if_ipackets++;
   7246 
   7247 		WM_RX_UNLOCK(rxq);
   7248 
   7249 		/* Pass this up to any BPF listeners. */
   7250 		bpf_mtap(ifp, m);
   7251 
   7252 		/* Pass it on. */
   7253 		if_percpuq_enqueue(sc->sc_ipq, m);
   7254 
   7255 		WM_RX_LOCK(rxq);
   7256 
   7257 		if (sc->sc_stopping)
   7258 			break;
   7259 	}
   7260 
   7261 	/* Update the receive pointer. */
   7262 	rxq->rxq_ptr = i;
   7263 	if (count != 0)
   7264 		rnd_add_uint32(&sc->rnd_source, count);
   7265 
   7266 	DPRINTF(WM_DEBUG_RX,
   7267 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7268 }
   7269 
   7270 /*
   7271  * wm_linkintr_gmii:
   7272  *
   7273  *	Helper; handle link interrupts for GMII.
   7274  */
   7275 static void
   7276 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7277 {
   7278 
   7279 	KASSERT(WM_CORE_LOCKED(sc));
   7280 
   7281 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7282 		__func__));
   7283 
   7284 	if (icr & ICR_LSC) {
   7285 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7286 
   7287 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7288 			wm_gig_downshift_workaround_ich8lan(sc);
   7289 
   7290 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7291 			device_xname(sc->sc_dev)));
   7292 		mii_pollstat(&sc->sc_mii);
   7293 		if (sc->sc_type == WM_T_82543) {
   7294 			int miistatus, active;
   7295 
   7296 			/*
   7297 			 * With 82543, we need to force speed and
   7298 			 * duplex on the MAC equal to what the PHY
   7299 			 * speed and duplex configuration is.
   7300 			 */
   7301 			miistatus = sc->sc_mii.mii_media_status;
   7302 
   7303 			if (miistatus & IFM_ACTIVE) {
   7304 				active = sc->sc_mii.mii_media_active;
   7305 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7306 				switch (IFM_SUBTYPE(active)) {
   7307 				case IFM_10_T:
   7308 					sc->sc_ctrl |= CTRL_SPEED_10;
   7309 					break;
   7310 				case IFM_100_TX:
   7311 					sc->sc_ctrl |= CTRL_SPEED_100;
   7312 					break;
   7313 				case IFM_1000_T:
   7314 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7315 					break;
   7316 				default:
   7317 					/*
   7318 					 * fiber?
   7319 					 * Shoud not enter here.
   7320 					 */
   7321 					printf("unknown media (%x)\n", active);
   7322 					break;
   7323 				}
   7324 				if (active & IFM_FDX)
   7325 					sc->sc_ctrl |= CTRL_FD;
   7326 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7327 			}
   7328 		} else if ((sc->sc_type == WM_T_ICH8)
   7329 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7330 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7331 		} else if (sc->sc_type == WM_T_PCH) {
   7332 			wm_k1_gig_workaround_hv(sc,
   7333 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7334 		}
   7335 
   7336 		if ((sc->sc_phytype == WMPHY_82578)
   7337 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7338 			== IFM_1000_T)) {
   7339 
   7340 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7341 				delay(200*1000); /* XXX too big */
   7342 
   7343 				/* Link stall fix for link up */
   7344 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7345 				    HV_MUX_DATA_CTRL,
   7346 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7347 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7348 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7349 				    HV_MUX_DATA_CTRL,
   7350 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7351 			}
   7352 		}
   7353 	} else if (icr & ICR_RXSEQ) {
   7354 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7355 			device_xname(sc->sc_dev)));
   7356 	}
   7357 }
   7358 
   7359 /*
   7360  * wm_linkintr_tbi:
   7361  *
   7362  *	Helper; handle link interrupts for TBI mode.
   7363  */
   7364 static void
   7365 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7366 {
   7367 	uint32_t status;
   7368 
   7369 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7370 		__func__));
   7371 
   7372 	status = CSR_READ(sc, WMREG_STATUS);
   7373 	if (icr & ICR_LSC) {
   7374 		if (status & STATUS_LU) {
   7375 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7376 			    device_xname(sc->sc_dev),
   7377 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7378 			/*
   7379 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7380 			 * so we should update sc->sc_ctrl
   7381 			 */
   7382 
   7383 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7384 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7385 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7386 			if (status & STATUS_FD)
   7387 				sc->sc_tctl |=
   7388 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7389 			else
   7390 				sc->sc_tctl |=
   7391 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7392 			if (sc->sc_ctrl & CTRL_TFCE)
   7393 				sc->sc_fcrtl |= FCRTL_XONE;
   7394 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7395 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7396 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7397 				      sc->sc_fcrtl);
   7398 			sc->sc_tbi_linkup = 1;
   7399 		} else {
   7400 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7401 			    device_xname(sc->sc_dev)));
   7402 			sc->sc_tbi_linkup = 0;
   7403 		}
   7404 		/* Update LED */
   7405 		wm_tbi_serdes_set_linkled(sc);
   7406 	} else if (icr & ICR_RXSEQ) {
   7407 		DPRINTF(WM_DEBUG_LINK,
   7408 		    ("%s: LINK: Receive sequence error\n",
   7409 		    device_xname(sc->sc_dev)));
   7410 	}
   7411 }
   7412 
   7413 /*
   7414  * wm_linkintr_serdes:
   7415  *
   7416  *	Helper; handle link interrupts for TBI mode.
   7417  */
   7418 static void
   7419 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7420 {
   7421 	struct mii_data *mii = &sc->sc_mii;
   7422 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7423 	uint32_t pcs_adv, pcs_lpab, reg;
   7424 
   7425 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7426 		__func__));
   7427 
   7428 	if (icr & ICR_LSC) {
   7429 		/* Check PCS */
   7430 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7431 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7432 			mii->mii_media_status |= IFM_ACTIVE;
   7433 			sc->sc_tbi_linkup = 1;
   7434 		} else {
   7435 			mii->mii_media_status |= IFM_NONE;
   7436 			sc->sc_tbi_linkup = 0;
   7437 			wm_tbi_serdes_set_linkled(sc);
   7438 			return;
   7439 		}
   7440 		mii->mii_media_active |= IFM_1000_SX;
   7441 		if ((reg & PCS_LSTS_FDX) != 0)
   7442 			mii->mii_media_active |= IFM_FDX;
   7443 		else
   7444 			mii->mii_media_active |= IFM_HDX;
   7445 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7446 			/* Check flow */
   7447 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7448 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7449 				DPRINTF(WM_DEBUG_LINK,
   7450 				    ("XXX LINKOK but not ACOMP\n"));
   7451 				return;
   7452 			}
   7453 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7454 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7455 			DPRINTF(WM_DEBUG_LINK,
   7456 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7457 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7458 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7459 				mii->mii_media_active |= IFM_FLOW
   7460 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7461 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7462 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7463 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7464 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7465 				mii->mii_media_active |= IFM_FLOW
   7466 				    | IFM_ETH_TXPAUSE;
   7467 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7468 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7469 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7470 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7471 				mii->mii_media_active |= IFM_FLOW
   7472 				    | IFM_ETH_RXPAUSE;
   7473 		}
   7474 		/* Update LED */
   7475 		wm_tbi_serdes_set_linkled(sc);
   7476 	} else {
   7477 		DPRINTF(WM_DEBUG_LINK,
   7478 		    ("%s: LINK: Receive sequence error\n",
   7479 		    device_xname(sc->sc_dev)));
   7480 	}
   7481 }
   7482 
   7483 /*
   7484  * wm_linkintr:
   7485  *
   7486  *	Helper; handle link interrupts.
   7487  */
   7488 static void
   7489 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7490 {
   7491 
   7492 	KASSERT(WM_CORE_LOCKED(sc));
   7493 
   7494 	if (sc->sc_flags & WM_F_HAS_MII)
   7495 		wm_linkintr_gmii(sc, icr);
   7496 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7497 	    && (sc->sc_type >= WM_T_82575))
   7498 		wm_linkintr_serdes(sc, icr);
   7499 	else
   7500 		wm_linkintr_tbi(sc, icr);
   7501 }
   7502 
   7503 /*
   7504  * wm_intr_legacy:
   7505  *
   7506  *	Interrupt service routine for INTx and MSI.
   7507  */
   7508 static int
   7509 wm_intr_legacy(void *arg)
   7510 {
   7511 	struct wm_softc *sc = arg;
   7512 	struct wm_txqueue *txq = &sc->sc_txq[0];
   7513 	struct wm_rxqueue *rxq = &sc->sc_rxq[0];
   7514 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7515 	uint32_t icr, rndval = 0;
   7516 	int handled = 0;
   7517 
   7518 	DPRINTF(WM_DEBUG_TX,
   7519 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7520 	while (1 /* CONSTCOND */) {
   7521 		icr = CSR_READ(sc, WMREG_ICR);
   7522 		if ((icr & sc->sc_icr) == 0)
   7523 			break;
   7524 		if (rndval == 0)
   7525 			rndval = icr;
   7526 
   7527 		WM_RX_LOCK(rxq);
   7528 
   7529 		if (sc->sc_stopping) {
   7530 			WM_RX_UNLOCK(rxq);
   7531 			break;
   7532 		}
   7533 
   7534 		handled = 1;
   7535 
   7536 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7537 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7538 			DPRINTF(WM_DEBUG_RX,
   7539 			    ("%s: RX: got Rx intr 0x%08x\n",
   7540 			    device_xname(sc->sc_dev),
   7541 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7542 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7543 		}
   7544 #endif
   7545 		wm_rxeof(rxq);
   7546 
   7547 		WM_RX_UNLOCK(rxq);
   7548 		WM_TX_LOCK(txq);
   7549 
   7550 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7551 		if (icr & ICR_TXDW) {
   7552 			DPRINTF(WM_DEBUG_TX,
   7553 			    ("%s: TX: got TXDW interrupt\n",
   7554 			    device_xname(sc->sc_dev)));
   7555 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7556 		}
   7557 #endif
   7558 		wm_txeof(sc);
   7559 
   7560 		WM_TX_UNLOCK(txq);
   7561 		WM_CORE_LOCK(sc);
   7562 
   7563 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7564 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7565 			wm_linkintr(sc, icr);
   7566 		}
   7567 
   7568 		WM_CORE_UNLOCK(sc);
   7569 
   7570 		if (icr & ICR_RXO) {
   7571 #if defined(WM_DEBUG)
   7572 			log(LOG_WARNING, "%s: Receive overrun\n",
   7573 			    device_xname(sc->sc_dev));
   7574 #endif /* defined(WM_DEBUG) */
   7575 		}
   7576 	}
   7577 
   7578 	rnd_add_uint32(&sc->rnd_source, rndval);
   7579 
   7580 	if (handled) {
   7581 		/* Try to get more packets going. */
   7582 		ifp->if_start(ifp);
   7583 	}
   7584 
   7585 	return handled;
   7586 }
   7587 
   7588 /*
   7589  * wm_txintr_msix:
   7590  *
   7591  *	Interrupt service routine for TX complete interrupt for MSI-X.
   7592  */
   7593 static int
   7594 wm_txintr_msix(void *arg)
   7595 {
   7596 	struct wm_txqueue *txq = arg;
   7597 	struct wm_softc *sc = txq->txq_sc;
   7598 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7599 	int handled = 0;
   7600 
   7601 	DPRINTF(WM_DEBUG_TX,
   7602 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7603 
   7604 	if (sc->sc_type == WM_T_82574)
   7605 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id));
   7606 	else if (sc->sc_type == WM_T_82575)
   7607 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
   7608 	else
   7609 		CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
   7610 
   7611 	WM_TX_LOCK(txq);
   7612 
   7613 	if (sc->sc_stopping)
   7614 		goto out;
   7615 
   7616 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7617 	handled = wm_txeof(sc);
   7618 
   7619 out:
   7620 	WM_TX_UNLOCK(txq);
   7621 
   7622 	if (sc->sc_type == WM_T_82574)
   7623 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id));
   7624 	else if (sc->sc_type == WM_T_82575)
   7625 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
   7626 	else
   7627 		CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
   7628 
   7629 	if (handled) {
   7630 		/* Try to get more packets going. */
   7631 		ifp->if_start(ifp);
   7632 	}
   7633 
   7634 	return handled;
   7635 }
   7636 
   7637 /*
   7638  * wm_rxintr_msix:
   7639  *
   7640  *	Interrupt service routine for RX interrupt for MSI-X.
   7641  */
   7642 static int
   7643 wm_rxintr_msix(void *arg)
   7644 {
   7645 	struct wm_rxqueue *rxq = arg;
   7646 	struct wm_softc *sc = rxq->rxq_sc;
   7647 
   7648 	DPRINTF(WM_DEBUG_RX,
   7649 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7650 
   7651 	if (sc->sc_type == WM_T_82574)
   7652 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id));
   7653 	else if (sc->sc_type == WM_T_82575)
   7654 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
   7655 	else
   7656 		CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
   7657 
   7658 	WM_RX_LOCK(rxq);
   7659 
   7660 	if (sc->sc_stopping)
   7661 		goto out;
   7662 
   7663 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7664 	wm_rxeof(rxq);
   7665 
   7666 out:
   7667 	WM_RX_UNLOCK(rxq);
   7668 
   7669 	if (sc->sc_type == WM_T_82574)
   7670 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
   7671 	else if (sc->sc_type == WM_T_82575)
   7672 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
   7673 	else
   7674 		CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
   7675 
   7676 	return 1;
   7677 }
   7678 
   7679 /*
   7680  * wm_linkintr_msix:
   7681  *
   7682  *	Interrupt service routine for link status change for MSI-X.
   7683  */
   7684 static int
   7685 wm_linkintr_msix(void *arg)
   7686 {
   7687 	struct wm_softc *sc = arg;
   7688 	uint32_t reg;
   7689 
   7690 	DPRINTF(WM_DEBUG_LINK,
   7691 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7692 
   7693 	reg = CSR_READ(sc, WMREG_ICR);
   7694 	WM_CORE_LOCK(sc);
   7695 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7696 		goto out;
   7697 
   7698 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7699 	wm_linkintr(sc, ICR_LSC);
   7700 
   7701 out:
   7702 	WM_CORE_UNLOCK(sc);
   7703 
   7704 	if (sc->sc_type == WM_T_82574)
   7705 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7706 	else if (sc->sc_type == WM_T_82575)
   7707 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7708 	else
   7709 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7710 
   7711 	return 1;
   7712 }
   7713 
   7714 /*
   7715  * Media related.
   7716  * GMII, SGMII, TBI (and SERDES)
   7717  */
   7718 
   7719 /* Common */
   7720 
   7721 /*
   7722  * wm_tbi_serdes_set_linkled:
   7723  *
   7724  *	Update the link LED on TBI and SERDES devices.
   7725  */
   7726 static void
   7727 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7728 {
   7729 
   7730 	if (sc->sc_tbi_linkup)
   7731 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7732 	else
   7733 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7734 
   7735 	/* 82540 or newer devices are active low */
   7736 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7737 
   7738 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7739 }
   7740 
   7741 /* GMII related */
   7742 
   7743 /*
   7744  * wm_gmii_reset:
   7745  *
   7746  *	Reset the PHY.
   7747  */
   7748 static void
   7749 wm_gmii_reset(struct wm_softc *sc)
   7750 {
   7751 	uint32_t reg;
   7752 	int rv;
   7753 
   7754 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7755 		device_xname(sc->sc_dev), __func__));
   7756 	/* get phy semaphore */
   7757 	switch (sc->sc_type) {
   7758 	case WM_T_82571:
   7759 	case WM_T_82572:
   7760 	case WM_T_82573:
   7761 	case WM_T_82574:
   7762 	case WM_T_82583:
   7763 		 /* XXX should get sw semaphore, too */
   7764 		rv = wm_get_swsm_semaphore(sc);
   7765 		break;
   7766 	case WM_T_82575:
   7767 	case WM_T_82576:
   7768 	case WM_T_82580:
   7769 	case WM_T_I350:
   7770 	case WM_T_I354:
   7771 	case WM_T_I210:
   7772 	case WM_T_I211:
   7773 	case WM_T_80003:
   7774 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7775 		break;
   7776 	case WM_T_ICH8:
   7777 	case WM_T_ICH9:
   7778 	case WM_T_ICH10:
   7779 	case WM_T_PCH:
   7780 	case WM_T_PCH2:
   7781 	case WM_T_PCH_LPT:
   7782 	case WM_T_PCH_SPT:
   7783 		rv = wm_get_swfwhw_semaphore(sc);
   7784 		break;
   7785 	default:
   7786 		/* nothing to do*/
   7787 		rv = 0;
   7788 		break;
   7789 	}
   7790 	if (rv != 0) {
   7791 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7792 		    __func__);
   7793 		return;
   7794 	}
   7795 
   7796 	switch (sc->sc_type) {
   7797 	case WM_T_82542_2_0:
   7798 	case WM_T_82542_2_1:
   7799 		/* null */
   7800 		break;
   7801 	case WM_T_82543:
   7802 		/*
   7803 		 * With 82543, we need to force speed and duplex on the MAC
   7804 		 * equal to what the PHY speed and duplex configuration is.
   7805 		 * In addition, we need to perform a hardware reset on the PHY
   7806 		 * to take it out of reset.
   7807 		 */
   7808 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7809 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7810 
   7811 		/* The PHY reset pin is active-low. */
   7812 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7813 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7814 		    CTRL_EXT_SWDPIN(4));
   7815 		reg |= CTRL_EXT_SWDPIO(4);
   7816 
   7817 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7818 		CSR_WRITE_FLUSH(sc);
   7819 		delay(10*1000);
   7820 
   7821 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7822 		CSR_WRITE_FLUSH(sc);
   7823 		delay(150);
   7824 #if 0
   7825 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7826 #endif
   7827 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7828 		break;
   7829 	case WM_T_82544:	/* reset 10000us */
   7830 	case WM_T_82540:
   7831 	case WM_T_82545:
   7832 	case WM_T_82545_3:
   7833 	case WM_T_82546:
   7834 	case WM_T_82546_3:
   7835 	case WM_T_82541:
   7836 	case WM_T_82541_2:
   7837 	case WM_T_82547:
   7838 	case WM_T_82547_2:
   7839 	case WM_T_82571:	/* reset 100us */
   7840 	case WM_T_82572:
   7841 	case WM_T_82573:
   7842 	case WM_T_82574:
   7843 	case WM_T_82575:
   7844 	case WM_T_82576:
   7845 	case WM_T_82580:
   7846 	case WM_T_I350:
   7847 	case WM_T_I354:
   7848 	case WM_T_I210:
   7849 	case WM_T_I211:
   7850 	case WM_T_82583:
   7851 	case WM_T_80003:
   7852 		/* generic reset */
   7853 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7854 		CSR_WRITE_FLUSH(sc);
   7855 		delay(20000);
   7856 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7857 		CSR_WRITE_FLUSH(sc);
   7858 		delay(20000);
   7859 
   7860 		if ((sc->sc_type == WM_T_82541)
   7861 		    || (sc->sc_type == WM_T_82541_2)
   7862 		    || (sc->sc_type == WM_T_82547)
   7863 		    || (sc->sc_type == WM_T_82547_2)) {
   7864 			/* workaround for igp are done in igp_reset() */
   7865 			/* XXX add code to set LED after phy reset */
   7866 		}
   7867 		break;
   7868 	case WM_T_ICH8:
   7869 	case WM_T_ICH9:
   7870 	case WM_T_ICH10:
   7871 	case WM_T_PCH:
   7872 	case WM_T_PCH2:
   7873 	case WM_T_PCH_LPT:
   7874 	case WM_T_PCH_SPT:
   7875 		/* generic reset */
   7876 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7877 		CSR_WRITE_FLUSH(sc);
   7878 		delay(100);
   7879 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7880 		CSR_WRITE_FLUSH(sc);
   7881 		delay(150);
   7882 		break;
   7883 	default:
   7884 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7885 		    __func__);
   7886 		break;
   7887 	}
   7888 
   7889 	/* release PHY semaphore */
   7890 	switch (sc->sc_type) {
   7891 	case WM_T_82571:
   7892 	case WM_T_82572:
   7893 	case WM_T_82573:
   7894 	case WM_T_82574:
   7895 	case WM_T_82583:
   7896 		 /* XXX should put sw semaphore, too */
   7897 		wm_put_swsm_semaphore(sc);
   7898 		break;
   7899 	case WM_T_82575:
   7900 	case WM_T_82576:
   7901 	case WM_T_82580:
   7902 	case WM_T_I350:
   7903 	case WM_T_I354:
   7904 	case WM_T_I210:
   7905 	case WM_T_I211:
   7906 	case WM_T_80003:
   7907 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7908 		break;
   7909 	case WM_T_ICH8:
   7910 	case WM_T_ICH9:
   7911 	case WM_T_ICH10:
   7912 	case WM_T_PCH:
   7913 	case WM_T_PCH2:
   7914 	case WM_T_PCH_LPT:
   7915 	case WM_T_PCH_SPT:
   7916 		wm_put_swfwhw_semaphore(sc);
   7917 		break;
   7918 	default:
   7919 		/* nothing to do */
   7920 		rv = 0;
   7921 		break;
   7922 	}
   7923 
   7924 	/* get_cfg_done */
   7925 	wm_get_cfg_done(sc);
   7926 
   7927 	/* extra setup */
   7928 	switch (sc->sc_type) {
   7929 	case WM_T_82542_2_0:
   7930 	case WM_T_82542_2_1:
   7931 	case WM_T_82543:
   7932 	case WM_T_82544:
   7933 	case WM_T_82540:
   7934 	case WM_T_82545:
   7935 	case WM_T_82545_3:
   7936 	case WM_T_82546:
   7937 	case WM_T_82546_3:
   7938 	case WM_T_82541_2:
   7939 	case WM_T_82547_2:
   7940 	case WM_T_82571:
   7941 	case WM_T_82572:
   7942 	case WM_T_82573:
   7943 	case WM_T_82575:
   7944 	case WM_T_82576:
   7945 	case WM_T_82580:
   7946 	case WM_T_I350:
   7947 	case WM_T_I354:
   7948 	case WM_T_I210:
   7949 	case WM_T_I211:
   7950 	case WM_T_80003:
   7951 		/* null */
   7952 		break;
   7953 	case WM_T_82574:
   7954 	case WM_T_82583:
   7955 		wm_lplu_d0_disable(sc);
   7956 		break;
   7957 	case WM_T_82541:
   7958 	case WM_T_82547:
   7959 		/* XXX Configure actively LED after PHY reset */
   7960 		break;
   7961 	case WM_T_ICH8:
   7962 	case WM_T_ICH9:
   7963 	case WM_T_ICH10:
   7964 	case WM_T_PCH:
   7965 	case WM_T_PCH2:
   7966 	case WM_T_PCH_LPT:
   7967 	case WM_T_PCH_SPT:
   7968 		/* Allow time for h/w to get to a quiescent state afer reset */
   7969 		delay(10*1000);
   7970 
   7971 		if (sc->sc_type == WM_T_PCH)
   7972 			wm_hv_phy_workaround_ich8lan(sc);
   7973 
   7974 		if (sc->sc_type == WM_T_PCH2)
   7975 			wm_lv_phy_workaround_ich8lan(sc);
   7976 
   7977 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7978 			/*
   7979 			 * dummy read to clear the phy wakeup bit after lcd
   7980 			 * reset
   7981 			 */
   7982 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7983 		}
   7984 
   7985 		/*
   7986 		 * XXX Configure the LCD with th extended configuration region
   7987 		 * in NVM
   7988 		 */
   7989 
   7990 		/* Disable D0 LPLU. */
   7991 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   7992 			wm_lplu_d0_disable_pch(sc);
   7993 		else
   7994 			wm_lplu_d0_disable(sc);	/* ICH* */
   7995 		break;
   7996 	default:
   7997 		panic("%s: unknown type\n", __func__);
   7998 		break;
   7999 	}
   8000 }
   8001 
   8002 /*
   8003  * wm_get_phy_id_82575:
   8004  *
   8005  * Return PHY ID. Return -1 if it failed.
   8006  */
   8007 static int
   8008 wm_get_phy_id_82575(struct wm_softc *sc)
   8009 {
   8010 	uint32_t reg;
   8011 	int phyid = -1;
   8012 
   8013 	/* XXX */
   8014 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8015 		return -1;
   8016 
   8017 	if (wm_sgmii_uses_mdio(sc)) {
   8018 		switch (sc->sc_type) {
   8019 		case WM_T_82575:
   8020 		case WM_T_82576:
   8021 			reg = CSR_READ(sc, WMREG_MDIC);
   8022 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8023 			break;
   8024 		case WM_T_82580:
   8025 		case WM_T_I350:
   8026 		case WM_T_I354:
   8027 		case WM_T_I210:
   8028 		case WM_T_I211:
   8029 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8030 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8031 			break;
   8032 		default:
   8033 			return -1;
   8034 		}
   8035 	}
   8036 
   8037 	return phyid;
   8038 }
   8039 
   8040 
   8041 /*
   8042  * wm_gmii_mediainit:
   8043  *
   8044  *	Initialize media for use on 1000BASE-T devices.
   8045  */
   8046 static void
   8047 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8048 {
   8049 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8050 	struct mii_data *mii = &sc->sc_mii;
   8051 	uint32_t reg;
   8052 
   8053 	/* We have GMII. */
   8054 	sc->sc_flags |= WM_F_HAS_MII;
   8055 
   8056 	if (sc->sc_type == WM_T_80003)
   8057 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8058 	else
   8059 		sc->sc_tipg = TIPG_1000T_DFLT;
   8060 
   8061 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8062 	if ((sc->sc_type == WM_T_82580)
   8063 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8064 	    || (sc->sc_type == WM_T_I211)) {
   8065 		reg = CSR_READ(sc, WMREG_PHPM);
   8066 		reg &= ~PHPM_GO_LINK_D;
   8067 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8068 	}
   8069 
   8070 	/*
   8071 	 * Let the chip set speed/duplex on its own based on
   8072 	 * signals from the PHY.
   8073 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8074 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8075 	 */
   8076 	sc->sc_ctrl |= CTRL_SLU;
   8077 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8078 
   8079 	/* Initialize our media structures and probe the GMII. */
   8080 	mii->mii_ifp = ifp;
   8081 
   8082 	/*
   8083 	 * Determine the PHY access method.
   8084 	 *
   8085 	 *  For SGMII, use SGMII specific method.
   8086 	 *
   8087 	 *  For some devices, we can determine the PHY access method
   8088 	 * from sc_type.
   8089 	 *
   8090 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8091 	 * access  method by sc_type, so use the PCI product ID for some
   8092 	 * devices.
   8093 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8094 	 * can't detect, then use bm's method.
   8095 	 */
   8096 	switch (prodid) {
   8097 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8098 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8099 		/* 82577 */
   8100 		sc->sc_phytype = WMPHY_82577;
   8101 		break;
   8102 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8103 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8104 		/* 82578 */
   8105 		sc->sc_phytype = WMPHY_82578;
   8106 		break;
   8107 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8108 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8109 		/* 82579 */
   8110 		sc->sc_phytype = WMPHY_82579;
   8111 		break;
   8112 	case PCI_PRODUCT_INTEL_82801I_BM:
   8113 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8114 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8115 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8116 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8117 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8118 		/* 82567 */
   8119 		sc->sc_phytype = WMPHY_BM;
   8120 		mii->mii_readreg = wm_gmii_bm_readreg;
   8121 		mii->mii_writereg = wm_gmii_bm_writereg;
   8122 		break;
   8123 	default:
   8124 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8125 		    && !wm_sgmii_uses_mdio(sc)){
   8126 			/* SGMII */
   8127 			mii->mii_readreg = wm_sgmii_readreg;
   8128 			mii->mii_writereg = wm_sgmii_writereg;
   8129 		} else if (sc->sc_type >= WM_T_80003) {
   8130 			/* 80003 */
   8131 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8132 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8133 		} else if (sc->sc_type >= WM_T_I210) {
   8134 			/* I210 and I211 */
   8135 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8136 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8137 		} else if (sc->sc_type >= WM_T_82580) {
   8138 			/* 82580, I350 and I354 */
   8139 			sc->sc_phytype = WMPHY_82580;
   8140 			mii->mii_readreg = wm_gmii_82580_readreg;
   8141 			mii->mii_writereg = wm_gmii_82580_writereg;
   8142 		} else if (sc->sc_type >= WM_T_82544) {
   8143 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8144 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8145 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8146 		} else {
   8147 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8148 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8149 		}
   8150 		break;
   8151 	}
   8152 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8153 		/* All PCH* use _hv_ */
   8154 		mii->mii_readreg = wm_gmii_hv_readreg;
   8155 		mii->mii_writereg = wm_gmii_hv_writereg;
   8156 	}
   8157 	mii->mii_statchg = wm_gmii_statchg;
   8158 
   8159 	wm_gmii_reset(sc);
   8160 
   8161 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8162 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8163 	    wm_gmii_mediastatus);
   8164 
   8165 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8166 	    || (sc->sc_type == WM_T_82580)
   8167 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8168 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8169 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8170 			/* Attach only one port */
   8171 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8172 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8173 		} else {
   8174 			int i, id;
   8175 			uint32_t ctrl_ext;
   8176 
   8177 			id = wm_get_phy_id_82575(sc);
   8178 			if (id != -1) {
   8179 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8180 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8181 			}
   8182 			if ((id == -1)
   8183 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8184 				/* Power on sgmii phy if it is disabled */
   8185 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8186 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8187 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8188 				CSR_WRITE_FLUSH(sc);
   8189 				delay(300*1000); /* XXX too long */
   8190 
   8191 				/* from 1 to 8 */
   8192 				for (i = 1; i < 8; i++)
   8193 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8194 					    0xffffffff, i, MII_OFFSET_ANY,
   8195 					    MIIF_DOPAUSE);
   8196 
   8197 				/* restore previous sfp cage power state */
   8198 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8199 			}
   8200 		}
   8201 	} else {
   8202 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8203 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8204 	}
   8205 
   8206 	/*
   8207 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8208 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8209 	 */
   8210 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8211 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8212 		wm_set_mdio_slow_mode_hv(sc);
   8213 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8214 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8215 	}
   8216 
   8217 	/*
   8218 	 * (For ICH8 variants)
   8219 	 * If PHY detection failed, use BM's r/w function and retry.
   8220 	 */
   8221 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8222 		/* if failed, retry with *_bm_* */
   8223 		mii->mii_readreg = wm_gmii_bm_readreg;
   8224 		mii->mii_writereg = wm_gmii_bm_writereg;
   8225 
   8226 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8227 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8228 	}
   8229 
   8230 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8231 		/* Any PHY wasn't find */
   8232 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8233 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8234 		sc->sc_phytype = WMPHY_NONE;
   8235 	} else {
   8236 		/*
   8237 		 * PHY Found!
   8238 		 * Check PHY type.
   8239 		 */
   8240 		uint32_t model;
   8241 		struct mii_softc *child;
   8242 
   8243 		child = LIST_FIRST(&mii->mii_phys);
   8244 		model = child->mii_mpd_model;
   8245 		if (model == MII_MODEL_yyINTEL_I82566)
   8246 			sc->sc_phytype = WMPHY_IGP_3;
   8247 
   8248 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8249 	}
   8250 }
   8251 
   8252 /*
   8253  * wm_gmii_mediachange:	[ifmedia interface function]
   8254  *
   8255  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8256  */
   8257 static int
   8258 wm_gmii_mediachange(struct ifnet *ifp)
   8259 {
   8260 	struct wm_softc *sc = ifp->if_softc;
   8261 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8262 	int rc;
   8263 
   8264 	if ((ifp->if_flags & IFF_UP) == 0)
   8265 		return 0;
   8266 
   8267 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8268 	sc->sc_ctrl |= CTRL_SLU;
   8269 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8270 	    || (sc->sc_type > WM_T_82543)) {
   8271 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8272 	} else {
   8273 		sc->sc_ctrl &= ~CTRL_ASDE;
   8274 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8275 		if (ife->ifm_media & IFM_FDX)
   8276 			sc->sc_ctrl |= CTRL_FD;
   8277 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8278 		case IFM_10_T:
   8279 			sc->sc_ctrl |= CTRL_SPEED_10;
   8280 			break;
   8281 		case IFM_100_TX:
   8282 			sc->sc_ctrl |= CTRL_SPEED_100;
   8283 			break;
   8284 		case IFM_1000_T:
   8285 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8286 			break;
   8287 		default:
   8288 			panic("wm_gmii_mediachange: bad media 0x%x",
   8289 			    ife->ifm_media);
   8290 		}
   8291 	}
   8292 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8293 	if (sc->sc_type <= WM_T_82543)
   8294 		wm_gmii_reset(sc);
   8295 
   8296 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8297 		return 0;
   8298 	return rc;
   8299 }
   8300 
   8301 /*
   8302  * wm_gmii_mediastatus:	[ifmedia interface function]
   8303  *
   8304  *	Get the current interface media status on a 1000BASE-T device.
   8305  */
   8306 static void
   8307 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8308 {
   8309 	struct wm_softc *sc = ifp->if_softc;
   8310 
   8311 	ether_mediastatus(ifp, ifmr);
   8312 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8313 	    | sc->sc_flowflags;
   8314 }
   8315 
   8316 #define	MDI_IO		CTRL_SWDPIN(2)
   8317 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8318 #define	MDI_CLK		CTRL_SWDPIN(3)
   8319 
   8320 static void
   8321 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8322 {
   8323 	uint32_t i, v;
   8324 
   8325 	v = CSR_READ(sc, WMREG_CTRL);
   8326 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8327 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8328 
   8329 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8330 		if (data & i)
   8331 			v |= MDI_IO;
   8332 		else
   8333 			v &= ~MDI_IO;
   8334 		CSR_WRITE(sc, WMREG_CTRL, v);
   8335 		CSR_WRITE_FLUSH(sc);
   8336 		delay(10);
   8337 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8338 		CSR_WRITE_FLUSH(sc);
   8339 		delay(10);
   8340 		CSR_WRITE(sc, WMREG_CTRL, v);
   8341 		CSR_WRITE_FLUSH(sc);
   8342 		delay(10);
   8343 	}
   8344 }
   8345 
   8346 static uint32_t
   8347 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8348 {
   8349 	uint32_t v, i, data = 0;
   8350 
   8351 	v = CSR_READ(sc, WMREG_CTRL);
   8352 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8353 	v |= CTRL_SWDPIO(3);
   8354 
   8355 	CSR_WRITE(sc, WMREG_CTRL, v);
   8356 	CSR_WRITE_FLUSH(sc);
   8357 	delay(10);
   8358 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8359 	CSR_WRITE_FLUSH(sc);
   8360 	delay(10);
   8361 	CSR_WRITE(sc, WMREG_CTRL, v);
   8362 	CSR_WRITE_FLUSH(sc);
   8363 	delay(10);
   8364 
   8365 	for (i = 0; i < 16; i++) {
   8366 		data <<= 1;
   8367 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8368 		CSR_WRITE_FLUSH(sc);
   8369 		delay(10);
   8370 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8371 			data |= 1;
   8372 		CSR_WRITE(sc, WMREG_CTRL, v);
   8373 		CSR_WRITE_FLUSH(sc);
   8374 		delay(10);
   8375 	}
   8376 
   8377 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8378 	CSR_WRITE_FLUSH(sc);
   8379 	delay(10);
   8380 	CSR_WRITE(sc, WMREG_CTRL, v);
   8381 	CSR_WRITE_FLUSH(sc);
   8382 	delay(10);
   8383 
   8384 	return data;
   8385 }
   8386 
   8387 #undef MDI_IO
   8388 #undef MDI_DIR
   8389 #undef MDI_CLK
   8390 
   8391 /*
   8392  * wm_gmii_i82543_readreg:	[mii interface function]
   8393  *
   8394  *	Read a PHY register on the GMII (i82543 version).
   8395  */
   8396 static int
   8397 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8398 {
   8399 	struct wm_softc *sc = device_private(self);
   8400 	int rv;
   8401 
   8402 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8403 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8404 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8405 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8406 
   8407 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8408 	    device_xname(sc->sc_dev), phy, reg, rv));
   8409 
   8410 	return rv;
   8411 }
   8412 
   8413 /*
   8414  * wm_gmii_i82543_writereg:	[mii interface function]
   8415  *
   8416  *	Write a PHY register on the GMII (i82543 version).
   8417  */
   8418 static void
   8419 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8420 {
   8421 	struct wm_softc *sc = device_private(self);
   8422 
   8423 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8424 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8425 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8426 	    (MII_COMMAND_START << 30), 32);
   8427 }
   8428 
   8429 /*
   8430  * wm_gmii_i82544_readreg:	[mii interface function]
   8431  *
   8432  *	Read a PHY register on the GMII.
   8433  */
   8434 static int
   8435 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8436 {
   8437 	struct wm_softc *sc = device_private(self);
   8438 	uint32_t mdic = 0;
   8439 	int i, rv;
   8440 
   8441 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8442 	    MDIC_REGADD(reg));
   8443 
   8444 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8445 		mdic = CSR_READ(sc, WMREG_MDIC);
   8446 		if (mdic & MDIC_READY)
   8447 			break;
   8448 		delay(50);
   8449 	}
   8450 
   8451 	if ((mdic & MDIC_READY) == 0) {
   8452 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8453 		    device_xname(sc->sc_dev), phy, reg);
   8454 		rv = 0;
   8455 	} else if (mdic & MDIC_E) {
   8456 #if 0 /* This is normal if no PHY is present. */
   8457 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8458 		    device_xname(sc->sc_dev), phy, reg);
   8459 #endif
   8460 		rv = 0;
   8461 	} else {
   8462 		rv = MDIC_DATA(mdic);
   8463 		if (rv == 0xffff)
   8464 			rv = 0;
   8465 	}
   8466 
   8467 	return rv;
   8468 }
   8469 
   8470 /*
   8471  * wm_gmii_i82544_writereg:	[mii interface function]
   8472  *
   8473  *	Write a PHY register on the GMII.
   8474  */
   8475 static void
   8476 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8477 {
   8478 	struct wm_softc *sc = device_private(self);
   8479 	uint32_t mdic = 0;
   8480 	int i;
   8481 
   8482 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8483 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8484 
   8485 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8486 		mdic = CSR_READ(sc, WMREG_MDIC);
   8487 		if (mdic & MDIC_READY)
   8488 			break;
   8489 		delay(50);
   8490 	}
   8491 
   8492 	if ((mdic & MDIC_READY) == 0)
   8493 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8494 		    device_xname(sc->sc_dev), phy, reg);
   8495 	else if (mdic & MDIC_E)
   8496 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8497 		    device_xname(sc->sc_dev), phy, reg);
   8498 }
   8499 
   8500 /*
   8501  * wm_gmii_i80003_readreg:	[mii interface function]
   8502  *
   8503  *	Read a PHY register on the kumeran
   8504  * This could be handled by the PHY layer if we didn't have to lock the
   8505  * ressource ...
   8506  */
   8507 static int
   8508 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8509 {
   8510 	struct wm_softc *sc = device_private(self);
   8511 	int sem;
   8512 	int rv;
   8513 
   8514 	if (phy != 1) /* only one PHY on kumeran bus */
   8515 		return 0;
   8516 
   8517 	sem = swfwphysem[sc->sc_funcid];
   8518 	if (wm_get_swfw_semaphore(sc, sem)) {
   8519 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8520 		    __func__);
   8521 		return 0;
   8522 	}
   8523 
   8524 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8525 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8526 		    reg >> GG82563_PAGE_SHIFT);
   8527 	} else {
   8528 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8529 		    reg >> GG82563_PAGE_SHIFT);
   8530 	}
   8531 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8532 	delay(200);
   8533 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8534 	delay(200);
   8535 
   8536 	wm_put_swfw_semaphore(sc, sem);
   8537 	return rv;
   8538 }
   8539 
   8540 /*
   8541  * wm_gmii_i80003_writereg:	[mii interface function]
   8542  *
   8543  *	Write a PHY register on the kumeran.
   8544  * This could be handled by the PHY layer if we didn't have to lock the
   8545  * ressource ...
   8546  */
   8547 static void
   8548 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8549 {
   8550 	struct wm_softc *sc = device_private(self);
   8551 	int sem;
   8552 
   8553 	if (phy != 1) /* only one PHY on kumeran bus */
   8554 		return;
   8555 
   8556 	sem = swfwphysem[sc->sc_funcid];
   8557 	if (wm_get_swfw_semaphore(sc, sem)) {
   8558 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8559 		    __func__);
   8560 		return;
   8561 	}
   8562 
   8563 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8564 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8565 		    reg >> GG82563_PAGE_SHIFT);
   8566 	} else {
   8567 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8568 		    reg >> GG82563_PAGE_SHIFT);
   8569 	}
   8570 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8571 	delay(200);
   8572 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8573 	delay(200);
   8574 
   8575 	wm_put_swfw_semaphore(sc, sem);
   8576 }
   8577 
   8578 /*
   8579  * wm_gmii_bm_readreg:	[mii interface function]
   8580  *
   8581  *	Read a PHY register on the kumeran
   8582  * This could be handled by the PHY layer if we didn't have to lock the
   8583  * ressource ...
   8584  */
   8585 static int
   8586 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8587 {
   8588 	struct wm_softc *sc = device_private(self);
   8589 	int sem;
   8590 	int rv;
   8591 
   8592 	sem = swfwphysem[sc->sc_funcid];
   8593 	if (wm_get_swfw_semaphore(sc, sem)) {
   8594 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8595 		    __func__);
   8596 		return 0;
   8597 	}
   8598 
   8599 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8600 		if (phy == 1)
   8601 			wm_gmii_i82544_writereg(self, phy,
   8602 			    MII_IGPHY_PAGE_SELECT, reg);
   8603 		else
   8604 			wm_gmii_i82544_writereg(self, phy,
   8605 			    GG82563_PHY_PAGE_SELECT,
   8606 			    reg >> GG82563_PAGE_SHIFT);
   8607 	}
   8608 
   8609 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8610 	wm_put_swfw_semaphore(sc, sem);
   8611 	return rv;
   8612 }
   8613 
   8614 /*
   8615  * wm_gmii_bm_writereg:	[mii interface function]
   8616  *
   8617  *	Write a PHY register on the kumeran.
   8618  * This could be handled by the PHY layer if we didn't have to lock the
   8619  * ressource ...
   8620  */
   8621 static void
   8622 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8623 {
   8624 	struct wm_softc *sc = device_private(self);
   8625 	int sem;
   8626 
   8627 	sem = swfwphysem[sc->sc_funcid];
   8628 	if (wm_get_swfw_semaphore(sc, sem)) {
   8629 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8630 		    __func__);
   8631 		return;
   8632 	}
   8633 
   8634 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8635 		if (phy == 1)
   8636 			wm_gmii_i82544_writereg(self, phy,
   8637 			    MII_IGPHY_PAGE_SELECT, reg);
   8638 		else
   8639 			wm_gmii_i82544_writereg(self, phy,
   8640 			    GG82563_PHY_PAGE_SELECT,
   8641 			    reg >> GG82563_PAGE_SHIFT);
   8642 	}
   8643 
   8644 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8645 	wm_put_swfw_semaphore(sc, sem);
   8646 }
   8647 
   8648 static void
   8649 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8650 {
   8651 	struct wm_softc *sc = device_private(self);
   8652 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8653 	uint16_t wuce;
   8654 
   8655 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8656 	if (sc->sc_type == WM_T_PCH) {
   8657 		/* XXX e1000 driver do nothing... why? */
   8658 	}
   8659 
   8660 	/* Set page 769 */
   8661 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8662 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8663 
   8664 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8665 
   8666 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8667 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8668 	    wuce | BM_WUC_ENABLE_BIT);
   8669 
   8670 	/* Select page 800 */
   8671 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8672 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8673 
   8674 	/* Write page 800 */
   8675 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8676 
   8677 	if (rd)
   8678 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8679 	else
   8680 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8681 
   8682 	/* Set page 769 */
   8683 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8684 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8685 
   8686 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8687 }
   8688 
   8689 /*
   8690  * wm_gmii_hv_readreg:	[mii interface function]
   8691  *
   8692  *	Read a PHY register on the kumeran
   8693  * This could be handled by the PHY layer if we didn't have to lock the
   8694  * ressource ...
   8695  */
   8696 static int
   8697 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8698 {
   8699 	struct wm_softc *sc = device_private(self);
   8700 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8701 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8702 	uint16_t val;
   8703 	int rv;
   8704 
   8705 	if (wm_get_swfwhw_semaphore(sc)) {
   8706 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8707 		    __func__);
   8708 		return 0;
   8709 	}
   8710 
   8711 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8712 	if (sc->sc_phytype == WMPHY_82577) {
   8713 		/* XXX must write */
   8714 	}
   8715 
   8716 	/* Page 800 works differently than the rest so it has its own func */
   8717 	if (page == BM_WUC_PAGE) {
   8718 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8719 		return val;
   8720 	}
   8721 
   8722 	/*
   8723 	 * Lower than page 768 works differently than the rest so it has its
   8724 	 * own func
   8725 	 */
   8726 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8727 		printf("gmii_hv_readreg!!!\n");
   8728 		return 0;
   8729 	}
   8730 
   8731 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8732 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8733 		    page << BME1000_PAGE_SHIFT);
   8734 	}
   8735 
   8736 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8737 	wm_put_swfwhw_semaphore(sc);
   8738 	return rv;
   8739 }
   8740 
   8741 /*
   8742  * wm_gmii_hv_writereg:	[mii interface function]
   8743  *
   8744  *	Write a PHY register on the kumeran.
   8745  * This could be handled by the PHY layer if we didn't have to lock the
   8746  * ressource ...
   8747  */
   8748 static void
   8749 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8750 {
   8751 	struct wm_softc *sc = device_private(self);
   8752 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8753 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8754 
   8755 	if (wm_get_swfwhw_semaphore(sc)) {
   8756 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8757 		    __func__);
   8758 		return;
   8759 	}
   8760 
   8761 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8762 
   8763 	/* Page 800 works differently than the rest so it has its own func */
   8764 	if (page == BM_WUC_PAGE) {
   8765 		uint16_t tmp;
   8766 
   8767 		tmp = val;
   8768 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8769 		return;
   8770 	}
   8771 
   8772 	/*
   8773 	 * Lower than page 768 works differently than the rest so it has its
   8774 	 * own func
   8775 	 */
   8776 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8777 		printf("gmii_hv_writereg!!!\n");
   8778 		return;
   8779 	}
   8780 
   8781 	/*
   8782 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8783 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8784 	 */
   8785 
   8786 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8787 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8788 		    page << BME1000_PAGE_SHIFT);
   8789 	}
   8790 
   8791 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8792 	wm_put_swfwhw_semaphore(sc);
   8793 }
   8794 
   8795 /*
   8796  * wm_gmii_82580_readreg:	[mii interface function]
   8797  *
   8798  *	Read a PHY register on the 82580 and I350.
   8799  * This could be handled by the PHY layer if we didn't have to lock the
   8800  * ressource ...
   8801  */
   8802 static int
   8803 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8804 {
   8805 	struct wm_softc *sc = device_private(self);
   8806 	int sem;
   8807 	int rv;
   8808 
   8809 	sem = swfwphysem[sc->sc_funcid];
   8810 	if (wm_get_swfw_semaphore(sc, sem)) {
   8811 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8812 		    __func__);
   8813 		return 0;
   8814 	}
   8815 
   8816 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8817 
   8818 	wm_put_swfw_semaphore(sc, sem);
   8819 	return rv;
   8820 }
   8821 
   8822 /*
   8823  * wm_gmii_82580_writereg:	[mii interface function]
   8824  *
   8825  *	Write a PHY register on the 82580 and I350.
   8826  * This could be handled by the PHY layer if we didn't have to lock the
   8827  * ressource ...
   8828  */
   8829 static void
   8830 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8831 {
   8832 	struct wm_softc *sc = device_private(self);
   8833 	int sem;
   8834 
   8835 	sem = swfwphysem[sc->sc_funcid];
   8836 	if (wm_get_swfw_semaphore(sc, sem)) {
   8837 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8838 		    __func__);
   8839 		return;
   8840 	}
   8841 
   8842 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8843 
   8844 	wm_put_swfw_semaphore(sc, sem);
   8845 }
   8846 
   8847 /*
   8848  * wm_gmii_gs40g_readreg:	[mii interface function]
   8849  *
   8850  *	Read a PHY register on the I2100 and I211.
   8851  * This could be handled by the PHY layer if we didn't have to lock the
   8852  * ressource ...
   8853  */
   8854 static int
   8855 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8856 {
   8857 	struct wm_softc *sc = device_private(self);
   8858 	int sem;
   8859 	int page, offset;
   8860 	int rv;
   8861 
   8862 	/* Acquire semaphore */
   8863 	sem = swfwphysem[sc->sc_funcid];
   8864 	if (wm_get_swfw_semaphore(sc, sem)) {
   8865 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8866 		    __func__);
   8867 		return 0;
   8868 	}
   8869 
   8870 	/* Page select */
   8871 	page = reg >> GS40G_PAGE_SHIFT;
   8872 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8873 
   8874 	/* Read reg */
   8875 	offset = reg & GS40G_OFFSET_MASK;
   8876 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8877 
   8878 	wm_put_swfw_semaphore(sc, sem);
   8879 	return rv;
   8880 }
   8881 
   8882 /*
   8883  * wm_gmii_gs40g_writereg:	[mii interface function]
   8884  *
   8885  *	Write a PHY register on the I210 and I211.
   8886  * This could be handled by the PHY layer if we didn't have to lock the
   8887  * ressource ...
   8888  */
   8889 static void
   8890 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8891 {
   8892 	struct wm_softc *sc = device_private(self);
   8893 	int sem;
   8894 	int page, offset;
   8895 
   8896 	/* Acquire semaphore */
   8897 	sem = swfwphysem[sc->sc_funcid];
   8898 	if (wm_get_swfw_semaphore(sc, sem)) {
   8899 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8900 		    __func__);
   8901 		return;
   8902 	}
   8903 
   8904 	/* Page select */
   8905 	page = reg >> GS40G_PAGE_SHIFT;
   8906 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8907 
   8908 	/* Write reg */
   8909 	offset = reg & GS40G_OFFSET_MASK;
   8910 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8911 
   8912 	/* Release semaphore */
   8913 	wm_put_swfw_semaphore(sc, sem);
   8914 }
   8915 
   8916 /*
   8917  * wm_gmii_statchg:	[mii interface function]
   8918  *
   8919  *	Callback from MII layer when media changes.
   8920  */
   8921 static void
   8922 wm_gmii_statchg(struct ifnet *ifp)
   8923 {
   8924 	struct wm_softc *sc = ifp->if_softc;
   8925 	struct mii_data *mii = &sc->sc_mii;
   8926 
   8927 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8928 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8929 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8930 
   8931 	/*
   8932 	 * Get flow control negotiation result.
   8933 	 */
   8934 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8935 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8936 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8937 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8938 	}
   8939 
   8940 	if (sc->sc_flowflags & IFM_FLOW) {
   8941 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8942 			sc->sc_ctrl |= CTRL_TFCE;
   8943 			sc->sc_fcrtl |= FCRTL_XONE;
   8944 		}
   8945 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8946 			sc->sc_ctrl |= CTRL_RFCE;
   8947 	}
   8948 
   8949 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8950 		DPRINTF(WM_DEBUG_LINK,
   8951 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8952 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8953 	} else {
   8954 		DPRINTF(WM_DEBUG_LINK,
   8955 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8956 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8957 	}
   8958 
   8959 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8960 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8961 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8962 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8963 	if (sc->sc_type == WM_T_80003) {
   8964 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8965 		case IFM_1000_T:
   8966 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8967 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8968 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8969 			break;
   8970 		default:
   8971 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8972 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8973 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8974 			break;
   8975 		}
   8976 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8977 	}
   8978 }
   8979 
   8980 /*
   8981  * wm_kmrn_readreg:
   8982  *
   8983  *	Read a kumeran register
   8984  */
   8985 static int
   8986 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8987 {
   8988 	int rv;
   8989 
   8990 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8991 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8992 			aprint_error_dev(sc->sc_dev,
   8993 			    "%s: failed to get semaphore\n", __func__);
   8994 			return 0;
   8995 		}
   8996 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8997 		if (wm_get_swfwhw_semaphore(sc)) {
   8998 			aprint_error_dev(sc->sc_dev,
   8999 			    "%s: failed to get semaphore\n", __func__);
   9000 			return 0;
   9001 		}
   9002 	}
   9003 
   9004 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9005 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9006 	    KUMCTRLSTA_REN);
   9007 	CSR_WRITE_FLUSH(sc);
   9008 	delay(2);
   9009 
   9010 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9011 
   9012 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9013 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9014 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9015 		wm_put_swfwhw_semaphore(sc);
   9016 
   9017 	return rv;
   9018 }
   9019 
   9020 /*
   9021  * wm_kmrn_writereg:
   9022  *
   9023  *	Write a kumeran register
   9024  */
   9025 static void
   9026 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9027 {
   9028 
   9029 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9030 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   9031 			aprint_error_dev(sc->sc_dev,
   9032 			    "%s: failed to get semaphore\n", __func__);
   9033 			return;
   9034 		}
   9035 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9036 		if (wm_get_swfwhw_semaphore(sc)) {
   9037 			aprint_error_dev(sc->sc_dev,
   9038 			    "%s: failed to get semaphore\n", __func__);
   9039 			return;
   9040 		}
   9041 	}
   9042 
   9043 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9044 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9045 	    (val & KUMCTRLSTA_MASK));
   9046 
   9047 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9048 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9049 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9050 		wm_put_swfwhw_semaphore(sc);
   9051 }
   9052 
   9053 /* SGMII related */
   9054 
   9055 /*
   9056  * wm_sgmii_uses_mdio
   9057  *
   9058  * Check whether the transaction is to the internal PHY or the external
   9059  * MDIO interface. Return true if it's MDIO.
   9060  */
   9061 static bool
   9062 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9063 {
   9064 	uint32_t reg;
   9065 	bool ismdio = false;
   9066 
   9067 	switch (sc->sc_type) {
   9068 	case WM_T_82575:
   9069 	case WM_T_82576:
   9070 		reg = CSR_READ(sc, WMREG_MDIC);
   9071 		ismdio = ((reg & MDIC_DEST) != 0);
   9072 		break;
   9073 	case WM_T_82580:
   9074 	case WM_T_I350:
   9075 	case WM_T_I354:
   9076 	case WM_T_I210:
   9077 	case WM_T_I211:
   9078 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9079 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9080 		break;
   9081 	default:
   9082 		break;
   9083 	}
   9084 
   9085 	return ismdio;
   9086 }
   9087 
   9088 /*
   9089  * wm_sgmii_readreg:	[mii interface function]
   9090  *
   9091  *	Read a PHY register on the SGMII
   9092  * This could be handled by the PHY layer if we didn't have to lock the
   9093  * ressource ...
   9094  */
   9095 static int
   9096 wm_sgmii_readreg(device_t self, int phy, int reg)
   9097 {
   9098 	struct wm_softc *sc = device_private(self);
   9099 	uint32_t i2ccmd;
   9100 	int i, rv;
   9101 
   9102 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9103 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9104 		    __func__);
   9105 		return 0;
   9106 	}
   9107 
   9108 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9109 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9110 	    | I2CCMD_OPCODE_READ;
   9111 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9112 
   9113 	/* Poll the ready bit */
   9114 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9115 		delay(50);
   9116 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9117 		if (i2ccmd & I2CCMD_READY)
   9118 			break;
   9119 	}
   9120 	if ((i2ccmd & I2CCMD_READY) == 0)
   9121 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9122 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9123 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9124 
   9125 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9126 
   9127 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   9128 	return rv;
   9129 }
   9130 
   9131 /*
   9132  * wm_sgmii_writereg:	[mii interface function]
   9133  *
   9134  *	Write a PHY register on the SGMII.
   9135  * This could be handled by the PHY layer if we didn't have to lock the
   9136  * ressource ...
   9137  */
   9138 static void
   9139 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9140 {
   9141 	struct wm_softc *sc = device_private(self);
   9142 	uint32_t i2ccmd;
   9143 	int i;
   9144 	int val_swapped;
   9145 
   9146 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9147 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9148 		    __func__);
   9149 		return;
   9150 	}
   9151 	/* Swap the data bytes for the I2C interface */
   9152 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9153 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9154 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9155 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9156 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9157 
   9158 	/* Poll the ready bit */
   9159 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9160 		delay(50);
   9161 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9162 		if (i2ccmd & I2CCMD_READY)
   9163 			break;
   9164 	}
   9165 	if ((i2ccmd & I2CCMD_READY) == 0)
   9166 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9167 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9168 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9169 
   9170 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9171 }
   9172 
   9173 /* TBI related */
   9174 
   9175 /*
   9176  * wm_tbi_mediainit:
   9177  *
   9178  *	Initialize media for use on 1000BASE-X devices.
   9179  */
   9180 static void
   9181 wm_tbi_mediainit(struct wm_softc *sc)
   9182 {
   9183 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9184 	const char *sep = "";
   9185 
   9186 	if (sc->sc_type < WM_T_82543)
   9187 		sc->sc_tipg = TIPG_WM_DFLT;
   9188 	else
   9189 		sc->sc_tipg = TIPG_LG_DFLT;
   9190 
   9191 	sc->sc_tbi_serdes_anegticks = 5;
   9192 
   9193 	/* Initialize our media structures */
   9194 	sc->sc_mii.mii_ifp = ifp;
   9195 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9196 
   9197 	if ((sc->sc_type >= WM_T_82575)
   9198 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9199 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9200 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9201 	else
   9202 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9203 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9204 
   9205 	/*
   9206 	 * SWD Pins:
   9207 	 *
   9208 	 *	0 = Link LED (output)
   9209 	 *	1 = Loss Of Signal (input)
   9210 	 */
   9211 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9212 
   9213 	/* XXX Perhaps this is only for TBI */
   9214 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9215 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9216 
   9217 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9218 		sc->sc_ctrl &= ~CTRL_LRST;
   9219 
   9220 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9221 
   9222 #define	ADD(ss, mm, dd)							\
   9223 do {									\
   9224 	aprint_normal("%s%s", sep, ss);					\
   9225 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9226 	sep = ", ";							\
   9227 } while (/*CONSTCOND*/0)
   9228 
   9229 	aprint_normal_dev(sc->sc_dev, "");
   9230 
   9231 	/* Only 82545 is LX */
   9232 	if (sc->sc_type == WM_T_82545) {
   9233 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9234 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9235 	} else {
   9236 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9237 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9238 	}
   9239 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9240 	aprint_normal("\n");
   9241 
   9242 #undef ADD
   9243 
   9244 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9245 }
   9246 
   9247 /*
   9248  * wm_tbi_mediachange:	[ifmedia interface function]
   9249  *
   9250  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9251  */
   9252 static int
   9253 wm_tbi_mediachange(struct ifnet *ifp)
   9254 {
   9255 	struct wm_softc *sc = ifp->if_softc;
   9256 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9257 	uint32_t status;
   9258 	int i;
   9259 
   9260 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9261 		/* XXX need some work for >= 82571 and < 82575 */
   9262 		if (sc->sc_type < WM_T_82575)
   9263 			return 0;
   9264 	}
   9265 
   9266 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9267 	    || (sc->sc_type >= WM_T_82575))
   9268 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9269 
   9270 	sc->sc_ctrl &= ~CTRL_LRST;
   9271 	sc->sc_txcw = TXCW_ANE;
   9272 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9273 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9274 	else if (ife->ifm_media & IFM_FDX)
   9275 		sc->sc_txcw |= TXCW_FD;
   9276 	else
   9277 		sc->sc_txcw |= TXCW_HD;
   9278 
   9279 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9280 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9281 
   9282 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9283 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9284 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9285 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9286 	CSR_WRITE_FLUSH(sc);
   9287 	delay(1000);
   9288 
   9289 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9290 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9291 
   9292 	/*
   9293 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9294 	 * optics detect a signal, 0 if they don't.
   9295 	 */
   9296 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9297 		/* Have signal; wait for the link to come up. */
   9298 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9299 			delay(10000);
   9300 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9301 				break;
   9302 		}
   9303 
   9304 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9305 			    device_xname(sc->sc_dev),i));
   9306 
   9307 		status = CSR_READ(sc, WMREG_STATUS);
   9308 		DPRINTF(WM_DEBUG_LINK,
   9309 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9310 			device_xname(sc->sc_dev),status, STATUS_LU));
   9311 		if (status & STATUS_LU) {
   9312 			/* Link is up. */
   9313 			DPRINTF(WM_DEBUG_LINK,
   9314 			    ("%s: LINK: set media -> link up %s\n",
   9315 			    device_xname(sc->sc_dev),
   9316 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9317 
   9318 			/*
   9319 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9320 			 * so we should update sc->sc_ctrl
   9321 			 */
   9322 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9323 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9324 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9325 			if (status & STATUS_FD)
   9326 				sc->sc_tctl |=
   9327 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9328 			else
   9329 				sc->sc_tctl |=
   9330 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9331 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9332 				sc->sc_fcrtl |= FCRTL_XONE;
   9333 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9334 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9335 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9336 				      sc->sc_fcrtl);
   9337 			sc->sc_tbi_linkup = 1;
   9338 		} else {
   9339 			if (i == WM_LINKUP_TIMEOUT)
   9340 				wm_check_for_link(sc);
   9341 			/* Link is down. */
   9342 			DPRINTF(WM_DEBUG_LINK,
   9343 			    ("%s: LINK: set media -> link down\n",
   9344 			    device_xname(sc->sc_dev)));
   9345 			sc->sc_tbi_linkup = 0;
   9346 		}
   9347 	} else {
   9348 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9349 		    device_xname(sc->sc_dev)));
   9350 		sc->sc_tbi_linkup = 0;
   9351 	}
   9352 
   9353 	wm_tbi_serdes_set_linkled(sc);
   9354 
   9355 	return 0;
   9356 }
   9357 
   9358 /*
   9359  * wm_tbi_mediastatus:	[ifmedia interface function]
   9360  *
   9361  *	Get the current interface media status on a 1000BASE-X device.
   9362  */
   9363 static void
   9364 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9365 {
   9366 	struct wm_softc *sc = ifp->if_softc;
   9367 	uint32_t ctrl, status;
   9368 
   9369 	ifmr->ifm_status = IFM_AVALID;
   9370 	ifmr->ifm_active = IFM_ETHER;
   9371 
   9372 	status = CSR_READ(sc, WMREG_STATUS);
   9373 	if ((status & STATUS_LU) == 0) {
   9374 		ifmr->ifm_active |= IFM_NONE;
   9375 		return;
   9376 	}
   9377 
   9378 	ifmr->ifm_status |= IFM_ACTIVE;
   9379 	/* Only 82545 is LX */
   9380 	if (sc->sc_type == WM_T_82545)
   9381 		ifmr->ifm_active |= IFM_1000_LX;
   9382 	else
   9383 		ifmr->ifm_active |= IFM_1000_SX;
   9384 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9385 		ifmr->ifm_active |= IFM_FDX;
   9386 	else
   9387 		ifmr->ifm_active |= IFM_HDX;
   9388 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9389 	if (ctrl & CTRL_RFCE)
   9390 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9391 	if (ctrl & CTRL_TFCE)
   9392 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9393 }
   9394 
   9395 /* XXX TBI only */
   9396 static int
   9397 wm_check_for_link(struct wm_softc *sc)
   9398 {
   9399 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9400 	uint32_t rxcw;
   9401 	uint32_t ctrl;
   9402 	uint32_t status;
   9403 	uint32_t sig;
   9404 
   9405 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9406 		/* XXX need some work for >= 82571 */
   9407 		if (sc->sc_type >= WM_T_82571) {
   9408 			sc->sc_tbi_linkup = 1;
   9409 			return 0;
   9410 		}
   9411 	}
   9412 
   9413 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9414 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9415 	status = CSR_READ(sc, WMREG_STATUS);
   9416 
   9417 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9418 
   9419 	DPRINTF(WM_DEBUG_LINK,
   9420 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9421 		device_xname(sc->sc_dev), __func__,
   9422 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9423 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9424 
   9425 	/*
   9426 	 * SWDPIN   LU RXCW
   9427 	 *      0    0    0
   9428 	 *      0    0    1	(should not happen)
   9429 	 *      0    1    0	(should not happen)
   9430 	 *      0    1    1	(should not happen)
   9431 	 *      1    0    0	Disable autonego and force linkup
   9432 	 *      1    0    1	got /C/ but not linkup yet
   9433 	 *      1    1    0	(linkup)
   9434 	 *      1    1    1	If IFM_AUTO, back to autonego
   9435 	 *
   9436 	 */
   9437 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9438 	    && ((status & STATUS_LU) == 0)
   9439 	    && ((rxcw & RXCW_C) == 0)) {
   9440 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9441 			__func__));
   9442 		sc->sc_tbi_linkup = 0;
   9443 		/* Disable auto-negotiation in the TXCW register */
   9444 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9445 
   9446 		/*
   9447 		 * Force link-up and also force full-duplex.
   9448 		 *
   9449 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9450 		 * so we should update sc->sc_ctrl
   9451 		 */
   9452 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9453 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9454 	} else if (((status & STATUS_LU) != 0)
   9455 	    && ((rxcw & RXCW_C) != 0)
   9456 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9457 		sc->sc_tbi_linkup = 1;
   9458 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9459 			__func__));
   9460 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9461 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9462 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9463 	    && ((rxcw & RXCW_C) != 0)) {
   9464 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9465 	} else {
   9466 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9467 			status));
   9468 	}
   9469 
   9470 	return 0;
   9471 }
   9472 
   9473 /*
   9474  * wm_tbi_tick:
   9475  *
   9476  *	Check the link on TBI devices.
   9477  *	This function acts as mii_tick().
   9478  */
   9479 static void
   9480 wm_tbi_tick(struct wm_softc *sc)
   9481 {
   9482 	struct mii_data *mii = &sc->sc_mii;
   9483 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9484 	uint32_t status;
   9485 
   9486 	KASSERT(WM_CORE_LOCKED(sc));
   9487 
   9488 	status = CSR_READ(sc, WMREG_STATUS);
   9489 
   9490 	/* XXX is this needed? */
   9491 	(void)CSR_READ(sc, WMREG_RXCW);
   9492 	(void)CSR_READ(sc, WMREG_CTRL);
   9493 
   9494 	/* set link status */
   9495 	if ((status & STATUS_LU) == 0) {
   9496 		DPRINTF(WM_DEBUG_LINK,
   9497 		    ("%s: LINK: checklink -> down\n",
   9498 			device_xname(sc->sc_dev)));
   9499 		sc->sc_tbi_linkup = 0;
   9500 	} else if (sc->sc_tbi_linkup == 0) {
   9501 		DPRINTF(WM_DEBUG_LINK,
   9502 		    ("%s: LINK: checklink -> up %s\n",
   9503 			device_xname(sc->sc_dev),
   9504 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9505 		sc->sc_tbi_linkup = 1;
   9506 		sc->sc_tbi_serdes_ticks = 0;
   9507 	}
   9508 
   9509 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9510 		goto setled;
   9511 
   9512 	if ((status & STATUS_LU) == 0) {
   9513 		sc->sc_tbi_linkup = 0;
   9514 		/* If the timer expired, retry autonegotiation */
   9515 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9516 		    && (++sc->sc_tbi_serdes_ticks
   9517 			>= sc->sc_tbi_serdes_anegticks)) {
   9518 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9519 			sc->sc_tbi_serdes_ticks = 0;
   9520 			/*
   9521 			 * Reset the link, and let autonegotiation do
   9522 			 * its thing
   9523 			 */
   9524 			sc->sc_ctrl |= CTRL_LRST;
   9525 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9526 			CSR_WRITE_FLUSH(sc);
   9527 			delay(1000);
   9528 			sc->sc_ctrl &= ~CTRL_LRST;
   9529 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9530 			CSR_WRITE_FLUSH(sc);
   9531 			delay(1000);
   9532 			CSR_WRITE(sc, WMREG_TXCW,
   9533 			    sc->sc_txcw & ~TXCW_ANE);
   9534 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9535 		}
   9536 	}
   9537 
   9538 setled:
   9539 	wm_tbi_serdes_set_linkled(sc);
   9540 }
   9541 
   9542 /* SERDES related */
   9543 static void
   9544 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9545 {
   9546 	uint32_t reg;
   9547 
   9548 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9549 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9550 		return;
   9551 
   9552 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9553 	reg |= PCS_CFG_PCS_EN;
   9554 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9555 
   9556 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9557 	reg &= ~CTRL_EXT_SWDPIN(3);
   9558 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9559 	CSR_WRITE_FLUSH(sc);
   9560 }
   9561 
   9562 static int
   9563 wm_serdes_mediachange(struct ifnet *ifp)
   9564 {
   9565 	struct wm_softc *sc = ifp->if_softc;
   9566 	bool pcs_autoneg = true; /* XXX */
   9567 	uint32_t ctrl_ext, pcs_lctl, reg;
   9568 
   9569 	/* XXX Currently, this function is not called on 8257[12] */
   9570 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9571 	    || (sc->sc_type >= WM_T_82575))
   9572 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9573 
   9574 	wm_serdes_power_up_link_82575(sc);
   9575 
   9576 	sc->sc_ctrl |= CTRL_SLU;
   9577 
   9578 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9579 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9580 
   9581 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9582 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9583 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9584 	case CTRL_EXT_LINK_MODE_SGMII:
   9585 		pcs_autoneg = true;
   9586 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9587 		break;
   9588 	case CTRL_EXT_LINK_MODE_1000KX:
   9589 		pcs_autoneg = false;
   9590 		/* FALLTHROUGH */
   9591 	default:
   9592 		if ((sc->sc_type == WM_T_82575)
   9593 		    || (sc->sc_type == WM_T_82576)) {
   9594 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9595 				pcs_autoneg = false;
   9596 		}
   9597 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9598 		    | CTRL_FRCFDX;
   9599 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9600 	}
   9601 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9602 
   9603 	if (pcs_autoneg) {
   9604 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9605 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9606 
   9607 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9608 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9609 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9610 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9611 	} else
   9612 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9613 
   9614 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9615 
   9616 
   9617 	return 0;
   9618 }
   9619 
   9620 static void
   9621 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9622 {
   9623 	struct wm_softc *sc = ifp->if_softc;
   9624 	struct mii_data *mii = &sc->sc_mii;
   9625 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9626 	uint32_t pcs_adv, pcs_lpab, reg;
   9627 
   9628 	ifmr->ifm_status = IFM_AVALID;
   9629 	ifmr->ifm_active = IFM_ETHER;
   9630 
   9631 	/* Check PCS */
   9632 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9633 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9634 		ifmr->ifm_active |= IFM_NONE;
   9635 		sc->sc_tbi_linkup = 0;
   9636 		goto setled;
   9637 	}
   9638 
   9639 	sc->sc_tbi_linkup = 1;
   9640 	ifmr->ifm_status |= IFM_ACTIVE;
   9641 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9642 	if ((reg & PCS_LSTS_FDX) != 0)
   9643 		ifmr->ifm_active |= IFM_FDX;
   9644 	else
   9645 		ifmr->ifm_active |= IFM_HDX;
   9646 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9647 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9648 		/* Check flow */
   9649 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9650 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9651 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9652 			goto setled;
   9653 		}
   9654 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9655 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9656 		DPRINTF(WM_DEBUG_LINK,
   9657 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9658 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9659 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9660 			mii->mii_media_active |= IFM_FLOW
   9661 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9662 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9663 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9664 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9665 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9666 			mii->mii_media_active |= IFM_FLOW
   9667 			    | IFM_ETH_TXPAUSE;
   9668 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9669 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9670 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9671 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9672 			mii->mii_media_active |= IFM_FLOW
   9673 			    | IFM_ETH_RXPAUSE;
   9674 		} else {
   9675 		}
   9676 	}
   9677 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9678 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9679 setled:
   9680 	wm_tbi_serdes_set_linkled(sc);
   9681 }
   9682 
   9683 /*
   9684  * wm_serdes_tick:
   9685  *
   9686  *	Check the link on serdes devices.
   9687  */
   9688 static void
   9689 wm_serdes_tick(struct wm_softc *sc)
   9690 {
   9691 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9692 	struct mii_data *mii = &sc->sc_mii;
   9693 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9694 	uint32_t reg;
   9695 
   9696 	KASSERT(WM_CORE_LOCKED(sc));
   9697 
   9698 	mii->mii_media_status = IFM_AVALID;
   9699 	mii->mii_media_active = IFM_ETHER;
   9700 
   9701 	/* Check PCS */
   9702 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9703 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9704 		mii->mii_media_status |= IFM_ACTIVE;
   9705 		sc->sc_tbi_linkup = 1;
   9706 		sc->sc_tbi_serdes_ticks = 0;
   9707 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9708 		if ((reg & PCS_LSTS_FDX) != 0)
   9709 			mii->mii_media_active |= IFM_FDX;
   9710 		else
   9711 			mii->mii_media_active |= IFM_HDX;
   9712 	} else {
   9713 		mii->mii_media_status |= IFM_NONE;
   9714 		sc->sc_tbi_linkup = 0;
   9715 		    /* If the timer expired, retry autonegotiation */
   9716 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9717 		    && (++sc->sc_tbi_serdes_ticks
   9718 			>= sc->sc_tbi_serdes_anegticks)) {
   9719 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9720 			sc->sc_tbi_serdes_ticks = 0;
   9721 			/* XXX */
   9722 			wm_serdes_mediachange(ifp);
   9723 		}
   9724 	}
   9725 
   9726 	wm_tbi_serdes_set_linkled(sc);
   9727 }
   9728 
   9729 /* SFP related */
   9730 
   9731 static int
   9732 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9733 {
   9734 	uint32_t i2ccmd;
   9735 	int i;
   9736 
   9737 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9738 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9739 
   9740 	/* Poll the ready bit */
   9741 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9742 		delay(50);
   9743 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9744 		if (i2ccmd & I2CCMD_READY)
   9745 			break;
   9746 	}
   9747 	if ((i2ccmd & I2CCMD_READY) == 0)
   9748 		return -1;
   9749 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9750 		return -1;
   9751 
   9752 	*data = i2ccmd & 0x00ff;
   9753 
   9754 	return 0;
   9755 }
   9756 
   9757 static uint32_t
   9758 wm_sfp_get_media_type(struct wm_softc *sc)
   9759 {
   9760 	uint32_t ctrl_ext;
   9761 	uint8_t val = 0;
   9762 	int timeout = 3;
   9763 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9764 	int rv = -1;
   9765 
   9766 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9767 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9768 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9769 	CSR_WRITE_FLUSH(sc);
   9770 
   9771 	/* Read SFP module data */
   9772 	while (timeout) {
   9773 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9774 		if (rv == 0)
   9775 			break;
   9776 		delay(100*1000); /* XXX too big */
   9777 		timeout--;
   9778 	}
   9779 	if (rv != 0)
   9780 		goto out;
   9781 	switch (val) {
   9782 	case SFF_SFP_ID_SFF:
   9783 		aprint_normal_dev(sc->sc_dev,
   9784 		    "Module/Connector soldered to board\n");
   9785 		break;
   9786 	case SFF_SFP_ID_SFP:
   9787 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9788 		break;
   9789 	case SFF_SFP_ID_UNKNOWN:
   9790 		goto out;
   9791 	default:
   9792 		break;
   9793 	}
   9794 
   9795 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9796 	if (rv != 0) {
   9797 		goto out;
   9798 	}
   9799 
   9800 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9801 		mediatype = WM_MEDIATYPE_SERDES;
   9802 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9803 		sc->sc_flags |= WM_F_SGMII;
   9804 		mediatype = WM_MEDIATYPE_COPPER;
   9805 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9806 		sc->sc_flags |= WM_F_SGMII;
   9807 		mediatype = WM_MEDIATYPE_SERDES;
   9808 	}
   9809 
   9810 out:
   9811 	/* Restore I2C interface setting */
   9812 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9813 
   9814 	return mediatype;
   9815 }
   9816 /*
   9817  * NVM related.
   9818  * Microwire, SPI (w/wo EERD) and Flash.
   9819  */
   9820 
   9821 /* Both spi and uwire */
   9822 
   9823 /*
   9824  * wm_eeprom_sendbits:
   9825  *
   9826  *	Send a series of bits to the EEPROM.
   9827  */
   9828 static void
   9829 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9830 {
   9831 	uint32_t reg;
   9832 	int x;
   9833 
   9834 	reg = CSR_READ(sc, WMREG_EECD);
   9835 
   9836 	for (x = nbits; x > 0; x--) {
   9837 		if (bits & (1U << (x - 1)))
   9838 			reg |= EECD_DI;
   9839 		else
   9840 			reg &= ~EECD_DI;
   9841 		CSR_WRITE(sc, WMREG_EECD, reg);
   9842 		CSR_WRITE_FLUSH(sc);
   9843 		delay(2);
   9844 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9845 		CSR_WRITE_FLUSH(sc);
   9846 		delay(2);
   9847 		CSR_WRITE(sc, WMREG_EECD, reg);
   9848 		CSR_WRITE_FLUSH(sc);
   9849 		delay(2);
   9850 	}
   9851 }
   9852 
   9853 /*
   9854  * wm_eeprom_recvbits:
   9855  *
   9856  *	Receive a series of bits from the EEPROM.
   9857  */
   9858 static void
   9859 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9860 {
   9861 	uint32_t reg, val;
   9862 	int x;
   9863 
   9864 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9865 
   9866 	val = 0;
   9867 	for (x = nbits; x > 0; x--) {
   9868 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9869 		CSR_WRITE_FLUSH(sc);
   9870 		delay(2);
   9871 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9872 			val |= (1U << (x - 1));
   9873 		CSR_WRITE(sc, WMREG_EECD, reg);
   9874 		CSR_WRITE_FLUSH(sc);
   9875 		delay(2);
   9876 	}
   9877 	*valp = val;
   9878 }
   9879 
   9880 /* Microwire */
   9881 
   9882 /*
   9883  * wm_nvm_read_uwire:
   9884  *
   9885  *	Read a word from the EEPROM using the MicroWire protocol.
   9886  */
   9887 static int
   9888 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9889 {
   9890 	uint32_t reg, val;
   9891 	int i;
   9892 
   9893 	for (i = 0; i < wordcnt; i++) {
   9894 		/* Clear SK and DI. */
   9895 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9896 		CSR_WRITE(sc, WMREG_EECD, reg);
   9897 
   9898 		/*
   9899 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9900 		 * and Xen.
   9901 		 *
   9902 		 * We use this workaround only for 82540 because qemu's
   9903 		 * e1000 act as 82540.
   9904 		 */
   9905 		if (sc->sc_type == WM_T_82540) {
   9906 			reg |= EECD_SK;
   9907 			CSR_WRITE(sc, WMREG_EECD, reg);
   9908 			reg &= ~EECD_SK;
   9909 			CSR_WRITE(sc, WMREG_EECD, reg);
   9910 			CSR_WRITE_FLUSH(sc);
   9911 			delay(2);
   9912 		}
   9913 		/* XXX: end of workaround */
   9914 
   9915 		/* Set CHIP SELECT. */
   9916 		reg |= EECD_CS;
   9917 		CSR_WRITE(sc, WMREG_EECD, reg);
   9918 		CSR_WRITE_FLUSH(sc);
   9919 		delay(2);
   9920 
   9921 		/* Shift in the READ command. */
   9922 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9923 
   9924 		/* Shift in address. */
   9925 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9926 
   9927 		/* Shift out the data. */
   9928 		wm_eeprom_recvbits(sc, &val, 16);
   9929 		data[i] = val & 0xffff;
   9930 
   9931 		/* Clear CHIP SELECT. */
   9932 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9933 		CSR_WRITE(sc, WMREG_EECD, reg);
   9934 		CSR_WRITE_FLUSH(sc);
   9935 		delay(2);
   9936 	}
   9937 
   9938 	return 0;
   9939 }
   9940 
   9941 /* SPI */
   9942 
   9943 /*
   9944  * Set SPI and FLASH related information from the EECD register.
   9945  * For 82541 and 82547, the word size is taken from EEPROM.
   9946  */
   9947 static int
   9948 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9949 {
   9950 	int size;
   9951 	uint32_t reg;
   9952 	uint16_t data;
   9953 
   9954 	reg = CSR_READ(sc, WMREG_EECD);
   9955 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9956 
   9957 	/* Read the size of NVM from EECD by default */
   9958 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9959 	switch (sc->sc_type) {
   9960 	case WM_T_82541:
   9961 	case WM_T_82541_2:
   9962 	case WM_T_82547:
   9963 	case WM_T_82547_2:
   9964 		/* Set dummy value to access EEPROM */
   9965 		sc->sc_nvm_wordsize = 64;
   9966 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9967 		reg = data;
   9968 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9969 		if (size == 0)
   9970 			size = 6; /* 64 word size */
   9971 		else
   9972 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9973 		break;
   9974 	case WM_T_80003:
   9975 	case WM_T_82571:
   9976 	case WM_T_82572:
   9977 	case WM_T_82573: /* SPI case */
   9978 	case WM_T_82574: /* SPI case */
   9979 	case WM_T_82583: /* SPI case */
   9980 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9981 		if (size > 14)
   9982 			size = 14;
   9983 		break;
   9984 	case WM_T_82575:
   9985 	case WM_T_82576:
   9986 	case WM_T_82580:
   9987 	case WM_T_I350:
   9988 	case WM_T_I354:
   9989 	case WM_T_I210:
   9990 	case WM_T_I211:
   9991 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9992 		if (size > 15)
   9993 			size = 15;
   9994 		break;
   9995 	default:
   9996 		aprint_error_dev(sc->sc_dev,
   9997 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   9998 		return -1;
   9999 		break;
   10000 	}
   10001 
   10002 	sc->sc_nvm_wordsize = 1 << size;
   10003 
   10004 	return 0;
   10005 }
   10006 
   10007 /*
   10008  * wm_nvm_ready_spi:
   10009  *
   10010  *	Wait for a SPI EEPROM to be ready for commands.
   10011  */
   10012 static int
   10013 wm_nvm_ready_spi(struct wm_softc *sc)
   10014 {
   10015 	uint32_t val;
   10016 	int usec;
   10017 
   10018 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10019 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10020 		wm_eeprom_recvbits(sc, &val, 8);
   10021 		if ((val & SPI_SR_RDY) == 0)
   10022 			break;
   10023 	}
   10024 	if (usec >= SPI_MAX_RETRIES) {
   10025 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10026 		return 1;
   10027 	}
   10028 	return 0;
   10029 }
   10030 
   10031 /*
   10032  * wm_nvm_read_spi:
   10033  *
   10034  *	Read a work from the EEPROM using the SPI protocol.
   10035  */
   10036 static int
   10037 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10038 {
   10039 	uint32_t reg, val;
   10040 	int i;
   10041 	uint8_t opc;
   10042 
   10043 	/* Clear SK and CS. */
   10044 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10045 	CSR_WRITE(sc, WMREG_EECD, reg);
   10046 	CSR_WRITE_FLUSH(sc);
   10047 	delay(2);
   10048 
   10049 	if (wm_nvm_ready_spi(sc))
   10050 		return 1;
   10051 
   10052 	/* Toggle CS to flush commands. */
   10053 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10054 	CSR_WRITE_FLUSH(sc);
   10055 	delay(2);
   10056 	CSR_WRITE(sc, WMREG_EECD, reg);
   10057 	CSR_WRITE_FLUSH(sc);
   10058 	delay(2);
   10059 
   10060 	opc = SPI_OPC_READ;
   10061 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10062 		opc |= SPI_OPC_A8;
   10063 
   10064 	wm_eeprom_sendbits(sc, opc, 8);
   10065 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10066 
   10067 	for (i = 0; i < wordcnt; i++) {
   10068 		wm_eeprom_recvbits(sc, &val, 16);
   10069 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10070 	}
   10071 
   10072 	/* Raise CS and clear SK. */
   10073 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10074 	CSR_WRITE(sc, WMREG_EECD, reg);
   10075 	CSR_WRITE_FLUSH(sc);
   10076 	delay(2);
   10077 
   10078 	return 0;
   10079 }
   10080 
   10081 /* Using with EERD */
   10082 
   10083 static int
   10084 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10085 {
   10086 	uint32_t attempts = 100000;
   10087 	uint32_t i, reg = 0;
   10088 	int32_t done = -1;
   10089 
   10090 	for (i = 0; i < attempts; i++) {
   10091 		reg = CSR_READ(sc, rw);
   10092 
   10093 		if (reg & EERD_DONE) {
   10094 			done = 0;
   10095 			break;
   10096 		}
   10097 		delay(5);
   10098 	}
   10099 
   10100 	return done;
   10101 }
   10102 
   10103 static int
   10104 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10105     uint16_t *data)
   10106 {
   10107 	int i, eerd = 0;
   10108 	int error = 0;
   10109 
   10110 	for (i = 0; i < wordcnt; i++) {
   10111 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10112 
   10113 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10114 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10115 		if (error != 0)
   10116 			break;
   10117 
   10118 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10119 	}
   10120 
   10121 	return error;
   10122 }
   10123 
   10124 /* Flash */
   10125 
   10126 static int
   10127 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10128 {
   10129 	uint32_t eecd;
   10130 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10131 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10132 	uint8_t sig_byte = 0;
   10133 
   10134 	switch (sc->sc_type) {
   10135 	case WM_T_PCH_SPT:
   10136 		/*
   10137 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10138 		 * sector valid bits from the NVM.
   10139 		 */
   10140 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10141 		if ((*bank == 0) || (*bank == 1)) {
   10142 			aprint_error_dev(sc->sc_dev,
   10143 					 "%s: no valid NVM bank present\n",
   10144 				__func__);
   10145 			return -1;
   10146 		} else {
   10147 			*bank = *bank - 2;
   10148 			return 0;
   10149 		}
   10150 	case WM_T_ICH8:
   10151 	case WM_T_ICH9:
   10152 		eecd = CSR_READ(sc, WMREG_EECD);
   10153 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10154 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10155 			return 0;
   10156 		}
   10157 		/* FALLTHROUGH */
   10158 	default:
   10159 		/* Default to 0 */
   10160 		*bank = 0;
   10161 
   10162 		/* Check bank 0 */
   10163 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10164 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10165 			*bank = 0;
   10166 			return 0;
   10167 		}
   10168 
   10169 		/* Check bank 1 */
   10170 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10171 		    &sig_byte);
   10172 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10173 			*bank = 1;
   10174 			return 0;
   10175 		}
   10176 	}
   10177 
   10178 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10179 		device_xname(sc->sc_dev)));
   10180 	return -1;
   10181 }
   10182 
   10183 /******************************************************************************
   10184  * This function does initial flash setup so that a new read/write/erase cycle
   10185  * can be started.
   10186  *
   10187  * sc - The pointer to the hw structure
   10188  ****************************************************************************/
   10189 static int32_t
   10190 wm_ich8_cycle_init(struct wm_softc *sc)
   10191 {
   10192 	uint16_t hsfsts;
   10193 	int32_t error = 1;
   10194 	int32_t i     = 0;
   10195 
   10196 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10197 
   10198 	/* May be check the Flash Des Valid bit in Hw status */
   10199 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10200 		return error;
   10201 	}
   10202 
   10203 	/* Clear FCERR in Hw status by writing 1 */
   10204 	/* Clear DAEL in Hw status by writing a 1 */
   10205 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10206 
   10207 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10208 
   10209 	/*
   10210 	 * Either we should have a hardware SPI cycle in progress bit to check
   10211 	 * against, in order to start a new cycle or FDONE bit should be
   10212 	 * changed in the hardware so that it is 1 after harware reset, which
   10213 	 * can then be used as an indication whether a cycle is in progress or
   10214 	 * has been completed .. we should also have some software semaphore
   10215 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10216 	 * threads access to those bits can be sequentiallized or a way so that
   10217 	 * 2 threads dont start the cycle at the same time
   10218 	 */
   10219 
   10220 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10221 		/*
   10222 		 * There is no cycle running at present, so we can start a
   10223 		 * cycle
   10224 		 */
   10225 
   10226 		/* Begin by setting Flash Cycle Done. */
   10227 		hsfsts |= HSFSTS_DONE;
   10228 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10229 		error = 0;
   10230 	} else {
   10231 		/*
   10232 		 * otherwise poll for sometime so the current cycle has a
   10233 		 * chance to end before giving up.
   10234 		 */
   10235 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10236 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10237 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10238 				error = 0;
   10239 				break;
   10240 			}
   10241 			delay(1);
   10242 		}
   10243 		if (error == 0) {
   10244 			/*
   10245 			 * Successful in waiting for previous cycle to timeout,
   10246 			 * now set the Flash Cycle Done.
   10247 			 */
   10248 			hsfsts |= HSFSTS_DONE;
   10249 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10250 		}
   10251 	}
   10252 	return error;
   10253 }
   10254 
   10255 /******************************************************************************
   10256  * This function starts a flash cycle and waits for its completion
   10257  *
   10258  * sc - The pointer to the hw structure
   10259  ****************************************************************************/
   10260 static int32_t
   10261 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10262 {
   10263 	uint16_t hsflctl;
   10264 	uint16_t hsfsts;
   10265 	int32_t error = 1;
   10266 	uint32_t i = 0;
   10267 
   10268 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10269 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10270 	hsflctl |= HSFCTL_GO;
   10271 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10272 
   10273 	/* Wait till FDONE bit is set to 1 */
   10274 	do {
   10275 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10276 		if (hsfsts & HSFSTS_DONE)
   10277 			break;
   10278 		delay(1);
   10279 		i++;
   10280 	} while (i < timeout);
   10281 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10282 		error = 0;
   10283 
   10284 	return error;
   10285 }
   10286 
   10287 /******************************************************************************
   10288  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10289  *
   10290  * sc - The pointer to the hw structure
   10291  * index - The index of the byte or word to read.
   10292  * size - Size of data to read, 1=byte 2=word, 4=dword
   10293  * data - Pointer to the word to store the value read.
   10294  *****************************************************************************/
   10295 static int32_t
   10296 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10297     uint32_t size, uint32_t *data)
   10298 {
   10299 	uint16_t hsfsts;
   10300 	uint16_t hsflctl;
   10301 	uint32_t flash_linear_address;
   10302 	uint32_t flash_data = 0;
   10303 	int32_t error = 1;
   10304 	int32_t count = 0;
   10305 
   10306 	if (size < 1  || size > 4 || data == 0x0 ||
   10307 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10308 		return error;
   10309 
   10310 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10311 	    sc->sc_ich8_flash_base;
   10312 
   10313 	do {
   10314 		delay(1);
   10315 		/* Steps */
   10316 		error = wm_ich8_cycle_init(sc);
   10317 		if (error)
   10318 			break;
   10319 
   10320 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10321 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10322 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10323 		    & HSFCTL_BCOUNT_MASK;
   10324 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10325 		if (sc->sc_type == WM_T_PCH_SPT) {
   10326 			/*
   10327 			 * In SPT, This register is in Lan memory space, not
   10328 			 * flash. Therefore, only 32 bit access is supported.
   10329 			 */
   10330 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10331 			    (uint32_t)hsflctl);
   10332 		} else
   10333 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10334 
   10335 		/*
   10336 		 * Write the last 24 bits of index into Flash Linear address
   10337 		 * field in Flash Address
   10338 		 */
   10339 		/* TODO: TBD maybe check the index against the size of flash */
   10340 
   10341 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10342 
   10343 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10344 
   10345 		/*
   10346 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10347 		 * the whole sequence a few more times, else read in (shift in)
   10348 		 * the Flash Data0, the order is least significant byte first
   10349 		 * msb to lsb
   10350 		 */
   10351 		if (error == 0) {
   10352 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10353 			if (size == 1)
   10354 				*data = (uint8_t)(flash_data & 0x000000FF);
   10355 			else if (size == 2)
   10356 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10357 			else if (size == 4)
   10358 				*data = (uint32_t)flash_data;
   10359 			break;
   10360 		} else {
   10361 			/*
   10362 			 * If we've gotten here, then things are probably
   10363 			 * completely hosed, but if the error condition is
   10364 			 * detected, it won't hurt to give it another try...
   10365 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10366 			 */
   10367 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10368 			if (hsfsts & HSFSTS_ERR) {
   10369 				/* Repeat for some time before giving up. */
   10370 				continue;
   10371 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10372 				break;
   10373 		}
   10374 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10375 
   10376 	return error;
   10377 }
   10378 
   10379 /******************************************************************************
   10380  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10381  *
   10382  * sc - pointer to wm_hw structure
   10383  * index - The index of the byte to read.
   10384  * data - Pointer to a byte to store the value read.
   10385  *****************************************************************************/
   10386 static int32_t
   10387 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10388 {
   10389 	int32_t status;
   10390 	uint32_t word = 0;
   10391 
   10392 	status = wm_read_ich8_data(sc, index, 1, &word);
   10393 	if (status == 0)
   10394 		*data = (uint8_t)word;
   10395 	else
   10396 		*data = 0;
   10397 
   10398 	return status;
   10399 }
   10400 
   10401 /******************************************************************************
   10402  * Reads a word from the NVM using the ICH8 flash access registers.
   10403  *
   10404  * sc - pointer to wm_hw structure
   10405  * index - The starting byte index of the word to read.
   10406  * data - Pointer to a word to store the value read.
   10407  *****************************************************************************/
   10408 static int32_t
   10409 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10410 {
   10411 	int32_t status;
   10412 	uint32_t word = 0;
   10413 
   10414 	status = wm_read_ich8_data(sc, index, 2, &word);
   10415 	if (status == 0)
   10416 		*data = (uint16_t)word;
   10417 	else
   10418 		*data = 0;
   10419 
   10420 	return status;
   10421 }
   10422 
   10423 /******************************************************************************
   10424  * Reads a dword from the NVM using the ICH8 flash access registers.
   10425  *
   10426  * sc - pointer to wm_hw structure
   10427  * index - The starting byte index of the word to read.
   10428  * data - Pointer to a word to store the value read.
   10429  *****************************************************************************/
   10430 static int32_t
   10431 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10432 {
   10433 	int32_t status;
   10434 
   10435 	status = wm_read_ich8_data(sc, index, 4, data);
   10436 	return status;
   10437 }
   10438 
   10439 /******************************************************************************
   10440  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10441  * register.
   10442  *
   10443  * sc - Struct containing variables accessed by shared code
   10444  * offset - offset of word in the EEPROM to read
   10445  * data - word read from the EEPROM
   10446  * words - number of words to read
   10447  *****************************************************************************/
   10448 static int
   10449 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10450 {
   10451 	int32_t  error = 0;
   10452 	uint32_t flash_bank = 0;
   10453 	uint32_t act_offset = 0;
   10454 	uint32_t bank_offset = 0;
   10455 	uint16_t word = 0;
   10456 	uint16_t i = 0;
   10457 
   10458 	/*
   10459 	 * We need to know which is the valid flash bank.  In the event
   10460 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10461 	 * managing flash_bank.  So it cannot be trusted and needs
   10462 	 * to be updated with each read.
   10463 	 */
   10464 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10465 	if (error) {
   10466 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10467 			device_xname(sc->sc_dev)));
   10468 		flash_bank = 0;
   10469 	}
   10470 
   10471 	/*
   10472 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10473 	 * size
   10474 	 */
   10475 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10476 
   10477 	error = wm_get_swfwhw_semaphore(sc);
   10478 	if (error) {
   10479 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10480 		    __func__);
   10481 		return error;
   10482 	}
   10483 
   10484 	for (i = 0; i < words; i++) {
   10485 		/* The NVM part needs a byte offset, hence * 2 */
   10486 		act_offset = bank_offset + ((offset + i) * 2);
   10487 		error = wm_read_ich8_word(sc, act_offset, &word);
   10488 		if (error) {
   10489 			aprint_error_dev(sc->sc_dev,
   10490 			    "%s: failed to read NVM\n", __func__);
   10491 			break;
   10492 		}
   10493 		data[i] = word;
   10494 	}
   10495 
   10496 	wm_put_swfwhw_semaphore(sc);
   10497 	return error;
   10498 }
   10499 
   10500 /******************************************************************************
   10501  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10502  * register.
   10503  *
   10504  * sc - Struct containing variables accessed by shared code
   10505  * offset - offset of word in the EEPROM to read
   10506  * data - word read from the EEPROM
   10507  * words - number of words to read
   10508  *****************************************************************************/
   10509 static int
   10510 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10511 {
   10512 	int32_t  error = 0;
   10513 	uint32_t flash_bank = 0;
   10514 	uint32_t act_offset = 0;
   10515 	uint32_t bank_offset = 0;
   10516 	uint32_t dword = 0;
   10517 	uint16_t i = 0;
   10518 
   10519 	/*
   10520 	 * We need to know which is the valid flash bank.  In the event
   10521 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10522 	 * managing flash_bank.  So it cannot be trusted and needs
   10523 	 * to be updated with each read.
   10524 	 */
   10525 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10526 	if (error) {
   10527 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10528 			device_xname(sc->sc_dev)));
   10529 		flash_bank = 0;
   10530 	}
   10531 
   10532 	/*
   10533 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10534 	 * size
   10535 	 */
   10536 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10537 
   10538 	error = wm_get_swfwhw_semaphore(sc);
   10539 	if (error) {
   10540 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10541 		    __func__);
   10542 		return error;
   10543 	}
   10544 
   10545 	for (i = 0; i < words; i++) {
   10546 		/* The NVM part needs a byte offset, hence * 2 */
   10547 		act_offset = bank_offset + ((offset + i) * 2);
   10548 		/* but we must read dword aligned, so mask ... */
   10549 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10550 		if (error) {
   10551 			aprint_error_dev(sc->sc_dev,
   10552 			    "%s: failed to read NVM\n", __func__);
   10553 			break;
   10554 		}
   10555 		/* ... and pick out low or high word */
   10556 		if ((act_offset & 0x2) == 0)
   10557 			data[i] = (uint16_t)(dword & 0xFFFF);
   10558 		else
   10559 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10560 	}
   10561 
   10562 	wm_put_swfwhw_semaphore(sc);
   10563 	return error;
   10564 }
   10565 
   10566 /* iNVM */
   10567 
   10568 static int
   10569 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10570 {
   10571 	int32_t  rv = 0;
   10572 	uint32_t invm_dword;
   10573 	uint16_t i;
   10574 	uint8_t record_type, word_address;
   10575 
   10576 	for (i = 0; i < INVM_SIZE; i++) {
   10577 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10578 		/* Get record type */
   10579 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10580 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10581 			break;
   10582 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10583 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10584 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10585 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10586 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10587 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10588 			if (word_address == address) {
   10589 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10590 				rv = 0;
   10591 				break;
   10592 			}
   10593 		}
   10594 	}
   10595 
   10596 	return rv;
   10597 }
   10598 
   10599 static int
   10600 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10601 {
   10602 	int rv = 0;
   10603 	int i;
   10604 
   10605 	for (i = 0; i < words; i++) {
   10606 		switch (offset + i) {
   10607 		case NVM_OFF_MACADDR:
   10608 		case NVM_OFF_MACADDR1:
   10609 		case NVM_OFF_MACADDR2:
   10610 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10611 			if (rv != 0) {
   10612 				data[i] = 0xffff;
   10613 				rv = -1;
   10614 			}
   10615 			break;
   10616 		case NVM_OFF_CFG2:
   10617 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10618 			if (rv != 0) {
   10619 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10620 				rv = 0;
   10621 			}
   10622 			break;
   10623 		case NVM_OFF_CFG4:
   10624 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10625 			if (rv != 0) {
   10626 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10627 				rv = 0;
   10628 			}
   10629 			break;
   10630 		case NVM_OFF_LED_1_CFG:
   10631 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10632 			if (rv != 0) {
   10633 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10634 				rv = 0;
   10635 			}
   10636 			break;
   10637 		case NVM_OFF_LED_0_2_CFG:
   10638 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10639 			if (rv != 0) {
   10640 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10641 				rv = 0;
   10642 			}
   10643 			break;
   10644 		case NVM_OFF_ID_LED_SETTINGS:
   10645 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10646 			if (rv != 0) {
   10647 				*data = ID_LED_RESERVED_FFFF;
   10648 				rv = 0;
   10649 			}
   10650 			break;
   10651 		default:
   10652 			DPRINTF(WM_DEBUG_NVM,
   10653 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10654 			*data = NVM_RESERVED_WORD;
   10655 			break;
   10656 		}
   10657 	}
   10658 
   10659 	return rv;
   10660 }
   10661 
   10662 /* Lock, detecting NVM type, validate checksum, version and read */
   10663 
   10664 /*
   10665  * wm_nvm_acquire:
   10666  *
   10667  *	Perform the EEPROM handshake required on some chips.
   10668  */
   10669 static int
   10670 wm_nvm_acquire(struct wm_softc *sc)
   10671 {
   10672 	uint32_t reg;
   10673 	int x;
   10674 	int ret = 0;
   10675 
   10676 	/* always success */
   10677 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10678 		return 0;
   10679 
   10680 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10681 		ret = wm_get_swfwhw_semaphore(sc);
   10682 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10683 		/* This will also do wm_get_swsm_semaphore() if needed */
   10684 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10685 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10686 		ret = wm_get_swsm_semaphore(sc);
   10687 	}
   10688 
   10689 	if (ret) {
   10690 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10691 			__func__);
   10692 		return 1;
   10693 	}
   10694 
   10695 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10696 		reg = CSR_READ(sc, WMREG_EECD);
   10697 
   10698 		/* Request EEPROM access. */
   10699 		reg |= EECD_EE_REQ;
   10700 		CSR_WRITE(sc, WMREG_EECD, reg);
   10701 
   10702 		/* ..and wait for it to be granted. */
   10703 		for (x = 0; x < 1000; x++) {
   10704 			reg = CSR_READ(sc, WMREG_EECD);
   10705 			if (reg & EECD_EE_GNT)
   10706 				break;
   10707 			delay(5);
   10708 		}
   10709 		if ((reg & EECD_EE_GNT) == 0) {
   10710 			aprint_error_dev(sc->sc_dev,
   10711 			    "could not acquire EEPROM GNT\n");
   10712 			reg &= ~EECD_EE_REQ;
   10713 			CSR_WRITE(sc, WMREG_EECD, reg);
   10714 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10715 				wm_put_swfwhw_semaphore(sc);
   10716 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10717 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10718 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10719 				wm_put_swsm_semaphore(sc);
   10720 			return 1;
   10721 		}
   10722 	}
   10723 
   10724 	return 0;
   10725 }
   10726 
   10727 /*
   10728  * wm_nvm_release:
   10729  *
   10730  *	Release the EEPROM mutex.
   10731  */
   10732 static void
   10733 wm_nvm_release(struct wm_softc *sc)
   10734 {
   10735 	uint32_t reg;
   10736 
   10737 	/* always success */
   10738 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10739 		return;
   10740 
   10741 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10742 		reg = CSR_READ(sc, WMREG_EECD);
   10743 		reg &= ~EECD_EE_REQ;
   10744 		CSR_WRITE(sc, WMREG_EECD, reg);
   10745 	}
   10746 
   10747 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10748 		wm_put_swfwhw_semaphore(sc);
   10749 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10750 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10751 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10752 		wm_put_swsm_semaphore(sc);
   10753 }
   10754 
   10755 static int
   10756 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10757 {
   10758 	uint32_t eecd = 0;
   10759 
   10760 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10761 	    || sc->sc_type == WM_T_82583) {
   10762 		eecd = CSR_READ(sc, WMREG_EECD);
   10763 
   10764 		/* Isolate bits 15 & 16 */
   10765 		eecd = ((eecd >> 15) & 0x03);
   10766 
   10767 		/* If both bits are set, device is Flash type */
   10768 		if (eecd == 0x03)
   10769 			return 0;
   10770 	}
   10771 	return 1;
   10772 }
   10773 
   10774 static int
   10775 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10776 {
   10777 	uint32_t eec;
   10778 
   10779 	eec = CSR_READ(sc, WMREG_EEC);
   10780 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10781 		return 1;
   10782 
   10783 	return 0;
   10784 }
   10785 
   10786 /*
   10787  * wm_nvm_validate_checksum
   10788  *
   10789  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10790  */
   10791 static int
   10792 wm_nvm_validate_checksum(struct wm_softc *sc)
   10793 {
   10794 	uint16_t checksum;
   10795 	uint16_t eeprom_data;
   10796 #ifdef WM_DEBUG
   10797 	uint16_t csum_wordaddr, valid_checksum;
   10798 #endif
   10799 	int i;
   10800 
   10801 	checksum = 0;
   10802 
   10803 	/* Don't check for I211 */
   10804 	if (sc->sc_type == WM_T_I211)
   10805 		return 0;
   10806 
   10807 #ifdef WM_DEBUG
   10808 	if (sc->sc_type == WM_T_PCH_LPT) {
   10809 		csum_wordaddr = NVM_OFF_COMPAT;
   10810 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10811 	} else {
   10812 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10813 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10814 	}
   10815 
   10816 	/* Dump EEPROM image for debug */
   10817 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10818 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10819 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10820 		/* XXX PCH_SPT? */
   10821 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10822 		if ((eeprom_data & valid_checksum) == 0) {
   10823 			DPRINTF(WM_DEBUG_NVM,
   10824 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10825 				device_xname(sc->sc_dev), eeprom_data,
   10826 				    valid_checksum));
   10827 		}
   10828 	}
   10829 
   10830 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10831 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10832 		for (i = 0; i < NVM_SIZE; i++) {
   10833 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10834 				printf("XXXX ");
   10835 			else
   10836 				printf("%04hx ", eeprom_data);
   10837 			if (i % 8 == 7)
   10838 				printf("\n");
   10839 		}
   10840 	}
   10841 
   10842 #endif /* WM_DEBUG */
   10843 
   10844 	for (i = 0; i < NVM_SIZE; i++) {
   10845 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10846 			return 1;
   10847 		checksum += eeprom_data;
   10848 	}
   10849 
   10850 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10851 #ifdef WM_DEBUG
   10852 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10853 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10854 #endif
   10855 	}
   10856 
   10857 	return 0;
   10858 }
   10859 
   10860 static void
   10861 wm_nvm_version_invm(struct wm_softc *sc)
   10862 {
   10863 	uint32_t dword;
   10864 
   10865 	/*
   10866 	 * Linux's code to decode version is very strange, so we don't
   10867 	 * obey that algorithm and just use word 61 as the document.
   10868 	 * Perhaps it's not perfect though...
   10869 	 *
   10870 	 * Example:
   10871 	 *
   10872 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10873 	 */
   10874 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10875 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10876 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10877 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10878 }
   10879 
   10880 static void
   10881 wm_nvm_version(struct wm_softc *sc)
   10882 {
   10883 	uint16_t major, minor, build, patch;
   10884 	uint16_t uid0, uid1;
   10885 	uint16_t nvm_data;
   10886 	uint16_t off;
   10887 	bool check_version = false;
   10888 	bool check_optionrom = false;
   10889 	bool have_build = false;
   10890 
   10891 	/*
   10892 	 * Version format:
   10893 	 *
   10894 	 * XYYZ
   10895 	 * X0YZ
   10896 	 * X0YY
   10897 	 *
   10898 	 * Example:
   10899 	 *
   10900 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10901 	 *	82571	0x50a6	5.10.6?
   10902 	 *	82572	0x506a	5.6.10?
   10903 	 *	82572EI	0x5069	5.6.9?
   10904 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10905 	 *		0x2013	2.1.3?
   10906 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10907 	 */
   10908 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10909 	switch (sc->sc_type) {
   10910 	case WM_T_82571:
   10911 	case WM_T_82572:
   10912 	case WM_T_82574:
   10913 	case WM_T_82583:
   10914 		check_version = true;
   10915 		check_optionrom = true;
   10916 		have_build = true;
   10917 		break;
   10918 	case WM_T_82575:
   10919 	case WM_T_82576:
   10920 	case WM_T_82580:
   10921 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10922 			check_version = true;
   10923 		break;
   10924 	case WM_T_I211:
   10925 		wm_nvm_version_invm(sc);
   10926 		goto printver;
   10927 	case WM_T_I210:
   10928 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10929 			wm_nvm_version_invm(sc);
   10930 			goto printver;
   10931 		}
   10932 		/* FALLTHROUGH */
   10933 	case WM_T_I350:
   10934 	case WM_T_I354:
   10935 		check_version = true;
   10936 		check_optionrom = true;
   10937 		break;
   10938 	default:
   10939 		return;
   10940 	}
   10941 	if (check_version) {
   10942 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10943 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10944 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10945 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10946 			build = nvm_data & NVM_BUILD_MASK;
   10947 			have_build = true;
   10948 		} else
   10949 			minor = nvm_data & 0x00ff;
   10950 
   10951 		/* Decimal */
   10952 		minor = (minor / 16) * 10 + (minor % 16);
   10953 		sc->sc_nvm_ver_major = major;
   10954 		sc->sc_nvm_ver_minor = minor;
   10955 
   10956 printver:
   10957 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10958 		    sc->sc_nvm_ver_minor);
   10959 		if (have_build) {
   10960 			sc->sc_nvm_ver_build = build;
   10961 			aprint_verbose(".%d", build);
   10962 		}
   10963 	}
   10964 	if (check_optionrom) {
   10965 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10966 		/* Option ROM Version */
   10967 		if ((off != 0x0000) && (off != 0xffff)) {
   10968 			off += NVM_COMBO_VER_OFF;
   10969 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10970 			wm_nvm_read(sc, off, 1, &uid0);
   10971 			if ((uid0 != 0) && (uid0 != 0xffff)
   10972 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10973 				/* 16bits */
   10974 				major = uid0 >> 8;
   10975 				build = (uid0 << 8) | (uid1 >> 8);
   10976 				patch = uid1 & 0x00ff;
   10977 				aprint_verbose(", option ROM Version %d.%d.%d",
   10978 				    major, build, patch);
   10979 			}
   10980 		}
   10981 	}
   10982 
   10983 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10984 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10985 }
   10986 
   10987 /*
   10988  * wm_nvm_read:
   10989  *
   10990  *	Read data from the serial EEPROM.
   10991  */
   10992 static int
   10993 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10994 {
   10995 	int rv;
   10996 
   10997 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   10998 		return 1;
   10999 
   11000 	if (wm_nvm_acquire(sc))
   11001 		return 1;
   11002 
   11003 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11004 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11005 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11006 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11007 	else if (sc->sc_type == WM_T_PCH_SPT)
   11008 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11009 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11010 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11011 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11012 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11013 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11014 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11015 	else
   11016 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11017 
   11018 	wm_nvm_release(sc);
   11019 	return rv;
   11020 }
   11021 
   11022 /*
   11023  * Hardware semaphores.
   11024  * Very complexed...
   11025  */
   11026 
   11027 static int
   11028 wm_get_swsm_semaphore(struct wm_softc *sc)
   11029 {
   11030 	int32_t timeout;
   11031 	uint32_t swsm;
   11032 
   11033 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11034 		/* Get the SW semaphore. */
   11035 		timeout = sc->sc_nvm_wordsize + 1;
   11036 		while (timeout) {
   11037 			swsm = CSR_READ(sc, WMREG_SWSM);
   11038 
   11039 			if ((swsm & SWSM_SMBI) == 0)
   11040 				break;
   11041 
   11042 			delay(50);
   11043 			timeout--;
   11044 		}
   11045 
   11046 		if (timeout == 0) {
   11047 			aprint_error_dev(sc->sc_dev,
   11048 			    "could not acquire SWSM SMBI\n");
   11049 			return 1;
   11050 		}
   11051 	}
   11052 
   11053 	/* Get the FW semaphore. */
   11054 	timeout = sc->sc_nvm_wordsize + 1;
   11055 	while (timeout) {
   11056 		swsm = CSR_READ(sc, WMREG_SWSM);
   11057 		swsm |= SWSM_SWESMBI;
   11058 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11059 		/* If we managed to set the bit we got the semaphore. */
   11060 		swsm = CSR_READ(sc, WMREG_SWSM);
   11061 		if (swsm & SWSM_SWESMBI)
   11062 			break;
   11063 
   11064 		delay(50);
   11065 		timeout--;
   11066 	}
   11067 
   11068 	if (timeout == 0) {
   11069 		aprint_error_dev(sc->sc_dev,
   11070 		    "could not acquire SWSM SWESMBI\n");
   11071 		/* Release semaphores */
   11072 		wm_put_swsm_semaphore(sc);
   11073 		return 1;
   11074 	}
   11075 	return 0;
   11076 }
   11077 
   11078 static void
   11079 wm_put_swsm_semaphore(struct wm_softc *sc)
   11080 {
   11081 	uint32_t swsm;
   11082 
   11083 	swsm = CSR_READ(sc, WMREG_SWSM);
   11084 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11085 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11086 }
   11087 
   11088 static int
   11089 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11090 {
   11091 	uint32_t swfw_sync;
   11092 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11093 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11094 	int timeout = 200;
   11095 
   11096 	for (timeout = 0; timeout < 200; timeout++) {
   11097 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11098 			if (wm_get_swsm_semaphore(sc)) {
   11099 				aprint_error_dev(sc->sc_dev,
   11100 				    "%s: failed to get semaphore\n",
   11101 				    __func__);
   11102 				return 1;
   11103 			}
   11104 		}
   11105 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11106 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11107 			swfw_sync |= swmask;
   11108 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11109 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11110 				wm_put_swsm_semaphore(sc);
   11111 			return 0;
   11112 		}
   11113 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11114 			wm_put_swsm_semaphore(sc);
   11115 		delay(5000);
   11116 	}
   11117 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11118 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11119 	return 1;
   11120 }
   11121 
   11122 static void
   11123 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11124 {
   11125 	uint32_t swfw_sync;
   11126 
   11127 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11128 		while (wm_get_swsm_semaphore(sc) != 0)
   11129 			continue;
   11130 	}
   11131 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11132 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11133 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11134 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11135 		wm_put_swsm_semaphore(sc);
   11136 }
   11137 
   11138 static int
   11139 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11140 {
   11141 	uint32_t ext_ctrl;
   11142 	int timeout = 200;
   11143 
   11144 	for (timeout = 0; timeout < 200; timeout++) {
   11145 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11146 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11147 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11148 
   11149 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11150 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11151 			return 0;
   11152 		delay(5000);
   11153 	}
   11154 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11155 	    device_xname(sc->sc_dev), ext_ctrl);
   11156 	return 1;
   11157 }
   11158 
   11159 static void
   11160 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11161 {
   11162 	uint32_t ext_ctrl;
   11163 
   11164 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11165 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11166 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11167 }
   11168 
   11169 static int
   11170 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11171 {
   11172 	int i = 0;
   11173 	uint32_t reg;
   11174 
   11175 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11176 	do {
   11177 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11178 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11179 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11180 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11181 			break;
   11182 		delay(2*1000);
   11183 		i++;
   11184 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11185 
   11186 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11187 		wm_put_hw_semaphore_82573(sc);
   11188 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11189 		    device_xname(sc->sc_dev));
   11190 		return -1;
   11191 	}
   11192 
   11193 	return 0;
   11194 }
   11195 
   11196 static void
   11197 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11198 {
   11199 	uint32_t reg;
   11200 
   11201 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11202 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11203 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11204 }
   11205 
   11206 /*
   11207  * Management mode and power management related subroutines.
   11208  * BMC, AMT, suspend/resume and EEE.
   11209  */
   11210 
   11211 #ifdef WM_WOL
   11212 static int
   11213 wm_check_mng_mode(struct wm_softc *sc)
   11214 {
   11215 	int rv;
   11216 
   11217 	switch (sc->sc_type) {
   11218 	case WM_T_ICH8:
   11219 	case WM_T_ICH9:
   11220 	case WM_T_ICH10:
   11221 	case WM_T_PCH:
   11222 	case WM_T_PCH2:
   11223 	case WM_T_PCH_LPT:
   11224 	case WM_T_PCH_SPT:
   11225 		rv = wm_check_mng_mode_ich8lan(sc);
   11226 		break;
   11227 	case WM_T_82574:
   11228 	case WM_T_82583:
   11229 		rv = wm_check_mng_mode_82574(sc);
   11230 		break;
   11231 	case WM_T_82571:
   11232 	case WM_T_82572:
   11233 	case WM_T_82573:
   11234 	case WM_T_80003:
   11235 		rv = wm_check_mng_mode_generic(sc);
   11236 		break;
   11237 	default:
   11238 		/* noting to do */
   11239 		rv = 0;
   11240 		break;
   11241 	}
   11242 
   11243 	return rv;
   11244 }
   11245 
   11246 static int
   11247 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11248 {
   11249 	uint32_t fwsm;
   11250 
   11251 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11252 
   11253 	if (((fwsm & FWSM_FW_VALID) != 0)
   11254 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11255 		return 1;
   11256 
   11257 	return 0;
   11258 }
   11259 
   11260 static int
   11261 wm_check_mng_mode_82574(struct wm_softc *sc)
   11262 {
   11263 	uint16_t data;
   11264 
   11265 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11266 
   11267 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11268 		return 1;
   11269 
   11270 	return 0;
   11271 }
   11272 
   11273 static int
   11274 wm_check_mng_mode_generic(struct wm_softc *sc)
   11275 {
   11276 	uint32_t fwsm;
   11277 
   11278 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11279 
   11280 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11281 		return 1;
   11282 
   11283 	return 0;
   11284 }
   11285 #endif /* WM_WOL */
   11286 
   11287 static int
   11288 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11289 {
   11290 	uint32_t manc, fwsm, factps;
   11291 
   11292 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11293 		return 0;
   11294 
   11295 	manc = CSR_READ(sc, WMREG_MANC);
   11296 
   11297 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11298 		device_xname(sc->sc_dev), manc));
   11299 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11300 		return 0;
   11301 
   11302 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11303 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11304 		factps = CSR_READ(sc, WMREG_FACTPS);
   11305 		if (((factps & FACTPS_MNGCG) == 0)
   11306 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11307 			return 1;
   11308 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11309 		uint16_t data;
   11310 
   11311 		factps = CSR_READ(sc, WMREG_FACTPS);
   11312 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11313 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11314 			device_xname(sc->sc_dev), factps, data));
   11315 		if (((factps & FACTPS_MNGCG) == 0)
   11316 		    && ((data & NVM_CFG2_MNGM_MASK)
   11317 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11318 			return 1;
   11319 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11320 	    && ((manc & MANC_ASF_EN) == 0))
   11321 		return 1;
   11322 
   11323 	return 0;
   11324 }
   11325 
   11326 static bool
   11327 wm_phy_resetisblocked(struct wm_softc *sc)
   11328 {
   11329 	bool blocked = false;
   11330 	uint32_t reg;
   11331 	int i = 0;
   11332 
   11333 	switch (sc->sc_type) {
   11334 	case WM_T_ICH8:
   11335 	case WM_T_ICH9:
   11336 	case WM_T_ICH10:
   11337 	case WM_T_PCH:
   11338 	case WM_T_PCH2:
   11339 	case WM_T_PCH_LPT:
   11340 	case WM_T_PCH_SPT:
   11341 		do {
   11342 			reg = CSR_READ(sc, WMREG_FWSM);
   11343 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11344 				blocked = true;
   11345 				delay(10*1000);
   11346 				continue;
   11347 			}
   11348 			blocked = false;
   11349 		} while (blocked && (i++ < 10));
   11350 		return blocked;
   11351 		break;
   11352 	case WM_T_82571:
   11353 	case WM_T_82572:
   11354 	case WM_T_82573:
   11355 	case WM_T_82574:
   11356 	case WM_T_82583:
   11357 	case WM_T_80003:
   11358 		reg = CSR_READ(sc, WMREG_MANC);
   11359 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11360 			return true;
   11361 		else
   11362 			return false;
   11363 		break;
   11364 	default:
   11365 		/* no problem */
   11366 		break;
   11367 	}
   11368 
   11369 	return false;
   11370 }
   11371 
   11372 static void
   11373 wm_get_hw_control(struct wm_softc *sc)
   11374 {
   11375 	uint32_t reg;
   11376 
   11377 	switch (sc->sc_type) {
   11378 	case WM_T_82573:
   11379 		reg = CSR_READ(sc, WMREG_SWSM);
   11380 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11381 		break;
   11382 	case WM_T_82571:
   11383 	case WM_T_82572:
   11384 	case WM_T_82574:
   11385 	case WM_T_82583:
   11386 	case WM_T_80003:
   11387 	case WM_T_ICH8:
   11388 	case WM_T_ICH9:
   11389 	case WM_T_ICH10:
   11390 	case WM_T_PCH:
   11391 	case WM_T_PCH2:
   11392 	case WM_T_PCH_LPT:
   11393 	case WM_T_PCH_SPT:
   11394 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11395 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11396 		break;
   11397 	default:
   11398 		break;
   11399 	}
   11400 }
   11401 
   11402 static void
   11403 wm_release_hw_control(struct wm_softc *sc)
   11404 {
   11405 	uint32_t reg;
   11406 
   11407 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11408 		return;
   11409 
   11410 	if (sc->sc_type == WM_T_82573) {
   11411 		reg = CSR_READ(sc, WMREG_SWSM);
   11412 		reg &= ~SWSM_DRV_LOAD;
   11413 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11414 	} else {
   11415 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11416 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11417 	}
   11418 }
   11419 
   11420 static void
   11421 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11422 {
   11423 	uint32_t reg;
   11424 
   11425 	if (sc->sc_type < WM_T_PCH2)
   11426 		return;
   11427 
   11428 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11429 
   11430 	if (gate)
   11431 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11432 	else
   11433 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11434 
   11435 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11436 }
   11437 
   11438 static void
   11439 wm_smbustopci(struct wm_softc *sc)
   11440 {
   11441 	uint32_t fwsm, reg;
   11442 
   11443 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11444 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11445 
   11446 	/* Acquire semaphore */
   11447 	wm_get_swfwhw_semaphore(sc);
   11448 
   11449 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11450 	if (((fwsm & FWSM_FW_VALID) == 0)
   11451 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11452 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11453 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11454 			reg |= CTRL_EXT_FORCE_SMBUS;
   11455 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11456 			CSR_WRITE_FLUSH(sc);
   11457 			delay(50*1000);
   11458 		}
   11459 
   11460 		/* Toggle LANPHYPC */
   11461 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11462 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11463 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11464 		CSR_WRITE_FLUSH(sc);
   11465 		delay(10);
   11466 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11467 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11468 		CSR_WRITE_FLUSH(sc);
   11469 		delay(50*1000);
   11470 
   11471 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11472 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11473 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11474 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11475 		}
   11476 	}
   11477 
   11478 	/* Release semaphore */
   11479 	wm_put_swfwhw_semaphore(sc);
   11480 
   11481 	/*
   11482 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11483 	 */
   11484 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11485 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11486 }
   11487 
   11488 static void
   11489 wm_init_manageability(struct wm_softc *sc)
   11490 {
   11491 
   11492 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11493 		device_xname(sc->sc_dev), __func__));
   11494 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11495 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11496 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11497 
   11498 		/* Disable hardware interception of ARP */
   11499 		manc &= ~MANC_ARP_EN;
   11500 
   11501 		/* Enable receiving management packets to the host */
   11502 		if (sc->sc_type >= WM_T_82571) {
   11503 			manc |= MANC_EN_MNG2HOST;
   11504 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11505 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11506 		}
   11507 
   11508 		CSR_WRITE(sc, WMREG_MANC, manc);
   11509 	}
   11510 }
   11511 
   11512 static void
   11513 wm_release_manageability(struct wm_softc *sc)
   11514 {
   11515 
   11516 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11517 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11518 
   11519 		manc |= MANC_ARP_EN;
   11520 		if (sc->sc_type >= WM_T_82571)
   11521 			manc &= ~MANC_EN_MNG2HOST;
   11522 
   11523 		CSR_WRITE(sc, WMREG_MANC, manc);
   11524 	}
   11525 }
   11526 
   11527 static void
   11528 wm_get_wakeup(struct wm_softc *sc)
   11529 {
   11530 
   11531 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11532 	switch (sc->sc_type) {
   11533 	case WM_T_82573:
   11534 	case WM_T_82583:
   11535 		sc->sc_flags |= WM_F_HAS_AMT;
   11536 		/* FALLTHROUGH */
   11537 	case WM_T_80003:
   11538 	case WM_T_82541:
   11539 	case WM_T_82547:
   11540 	case WM_T_82571:
   11541 	case WM_T_82572:
   11542 	case WM_T_82574:
   11543 	case WM_T_82575:
   11544 	case WM_T_82576:
   11545 	case WM_T_82580:
   11546 	case WM_T_I350:
   11547 	case WM_T_I354:
   11548 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11549 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11550 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11551 		break;
   11552 	case WM_T_ICH8:
   11553 	case WM_T_ICH9:
   11554 	case WM_T_ICH10:
   11555 	case WM_T_PCH:
   11556 	case WM_T_PCH2:
   11557 	case WM_T_PCH_LPT:
   11558 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   11559 		sc->sc_flags |= WM_F_HAS_AMT;
   11560 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11561 		break;
   11562 	default:
   11563 		break;
   11564 	}
   11565 
   11566 	/* 1: HAS_MANAGE */
   11567 	if (wm_enable_mng_pass_thru(sc) != 0)
   11568 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11569 
   11570 #ifdef WM_DEBUG
   11571 	printf("\n");
   11572 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11573 		printf("HAS_AMT,");
   11574 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11575 		printf("ARC_SUBSYS_VALID,");
   11576 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11577 		printf("ASF_FIRMWARE_PRES,");
   11578 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11579 		printf("HAS_MANAGE,");
   11580 	printf("\n");
   11581 #endif
   11582 	/*
   11583 	 * Note that the WOL flags is set after the resetting of the eeprom
   11584 	 * stuff
   11585 	 */
   11586 }
   11587 
   11588 #ifdef WM_WOL
   11589 /* WOL in the newer chipset interfaces (pchlan) */
   11590 static void
   11591 wm_enable_phy_wakeup(struct wm_softc *sc)
   11592 {
   11593 #if 0
   11594 	uint16_t preg;
   11595 
   11596 	/* Copy MAC RARs to PHY RARs */
   11597 
   11598 	/* Copy MAC MTA to PHY MTA */
   11599 
   11600 	/* Configure PHY Rx Control register */
   11601 
   11602 	/* Enable PHY wakeup in MAC register */
   11603 
   11604 	/* Configure and enable PHY wakeup in PHY registers */
   11605 
   11606 	/* Activate PHY wakeup */
   11607 
   11608 	/* XXX */
   11609 #endif
   11610 }
   11611 
   11612 /* Power down workaround on D3 */
   11613 static void
   11614 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11615 {
   11616 	uint32_t reg;
   11617 	int i;
   11618 
   11619 	for (i = 0; i < 2; i++) {
   11620 		/* Disable link */
   11621 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11622 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11623 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11624 
   11625 		/*
   11626 		 * Call gig speed drop workaround on Gig disable before
   11627 		 * accessing any PHY registers
   11628 		 */
   11629 		if (sc->sc_type == WM_T_ICH8)
   11630 			wm_gig_downshift_workaround_ich8lan(sc);
   11631 
   11632 		/* Write VR power-down enable */
   11633 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11634 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11635 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11636 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11637 
   11638 		/* Read it back and test */
   11639 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11640 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11641 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11642 			break;
   11643 
   11644 		/* Issue PHY reset and repeat at most one more time */
   11645 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11646 	}
   11647 }
   11648 
   11649 static void
   11650 wm_enable_wakeup(struct wm_softc *sc)
   11651 {
   11652 	uint32_t reg, pmreg;
   11653 	pcireg_t pmode;
   11654 
   11655 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11656 		&pmreg, NULL) == 0)
   11657 		return;
   11658 
   11659 	/* Advertise the wakeup capability */
   11660 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11661 	    | CTRL_SWDPIN(3));
   11662 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11663 
   11664 	/* ICH workaround */
   11665 	switch (sc->sc_type) {
   11666 	case WM_T_ICH8:
   11667 	case WM_T_ICH9:
   11668 	case WM_T_ICH10:
   11669 	case WM_T_PCH:
   11670 	case WM_T_PCH2:
   11671 	case WM_T_PCH_LPT:
   11672 	case WM_T_PCH_SPT:
   11673 		/* Disable gig during WOL */
   11674 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11675 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11676 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11677 		if (sc->sc_type == WM_T_PCH)
   11678 			wm_gmii_reset(sc);
   11679 
   11680 		/* Power down workaround */
   11681 		if (sc->sc_phytype == WMPHY_82577) {
   11682 			struct mii_softc *child;
   11683 
   11684 			/* Assume that the PHY is copper */
   11685 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11686 			if (child->mii_mpd_rev <= 2)
   11687 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11688 				    (768 << 5) | 25, 0x0444); /* magic num */
   11689 		}
   11690 		break;
   11691 	default:
   11692 		break;
   11693 	}
   11694 
   11695 	/* Keep the laser running on fiber adapters */
   11696 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11697 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11698 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11699 		reg |= CTRL_EXT_SWDPIN(3);
   11700 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11701 	}
   11702 
   11703 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11704 #if 0	/* for the multicast packet */
   11705 	reg |= WUFC_MC;
   11706 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11707 #endif
   11708 
   11709 	if (sc->sc_type == WM_T_PCH) {
   11710 		wm_enable_phy_wakeup(sc);
   11711 	} else {
   11712 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11713 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11714 	}
   11715 
   11716 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11717 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11718 		|| (sc->sc_type == WM_T_PCH2))
   11719 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11720 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11721 
   11722 	/* Request PME */
   11723 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11724 #if 0
   11725 	/* Disable WOL */
   11726 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11727 #else
   11728 	/* For WOL */
   11729 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11730 #endif
   11731 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11732 }
   11733 #endif /* WM_WOL */
   11734 
   11735 /* LPLU */
   11736 
   11737 static void
   11738 wm_lplu_d0_disable(struct wm_softc *sc)
   11739 {
   11740 	uint32_t reg;
   11741 
   11742 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11743 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   11744 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11745 }
   11746 
   11747 static void
   11748 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   11749 {
   11750 	uint32_t reg;
   11751 
   11752 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   11753 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   11754 	reg |= HV_OEM_BITS_ANEGNOW;
   11755 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   11756 }
   11757 
   11758 /* EEE */
   11759 
   11760 static void
   11761 wm_set_eee_i350(struct wm_softc *sc)
   11762 {
   11763 	uint32_t ipcnfg, eeer;
   11764 
   11765 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11766 	eeer = CSR_READ(sc, WMREG_EEER);
   11767 
   11768 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11769 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11770 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11771 		    | EEER_LPI_FC);
   11772 	} else {
   11773 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11774 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11775 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11776 		    | EEER_LPI_FC);
   11777 	}
   11778 
   11779 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11780 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11781 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11782 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11783 }
   11784 
   11785 /*
   11786  * Workarounds (mainly PHY related).
   11787  * Basically, PHY's workarounds are in the PHY drivers.
   11788  */
   11789 
   11790 /* Work-around for 82566 Kumeran PCS lock loss */
   11791 static void
   11792 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11793 {
   11794 #if 0
   11795 	int miistatus, active, i;
   11796 	int reg;
   11797 
   11798 	miistatus = sc->sc_mii.mii_media_status;
   11799 
   11800 	/* If the link is not up, do nothing */
   11801 	if ((miistatus & IFM_ACTIVE) == 0)
   11802 		return;
   11803 
   11804 	active = sc->sc_mii.mii_media_active;
   11805 
   11806 	/* Nothing to do if the link is other than 1Gbps */
   11807 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11808 		return;
   11809 
   11810 	for (i = 0; i < 10; i++) {
   11811 		/* read twice */
   11812 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11813 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11814 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   11815 			goto out;	/* GOOD! */
   11816 
   11817 		/* Reset the PHY */
   11818 		wm_gmii_reset(sc);
   11819 		delay(5*1000);
   11820 	}
   11821 
   11822 	/* Disable GigE link negotiation */
   11823 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11824 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11825 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11826 
   11827 	/*
   11828 	 * Call gig speed drop workaround on Gig disable before accessing
   11829 	 * any PHY registers.
   11830 	 */
   11831 	wm_gig_downshift_workaround_ich8lan(sc);
   11832 
   11833 out:
   11834 	return;
   11835 #endif
   11836 }
   11837 
   11838 /* WOL from S5 stops working */
   11839 static void
   11840 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11841 {
   11842 	uint16_t kmrn_reg;
   11843 
   11844 	/* Only for igp3 */
   11845 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11846 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11847 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11848 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11849 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11850 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11851 	}
   11852 }
   11853 
   11854 /*
   11855  * Workaround for pch's PHYs
   11856  * XXX should be moved to new PHY driver?
   11857  */
   11858 static void
   11859 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11860 {
   11861 	if (sc->sc_phytype == WMPHY_82577)
   11862 		wm_set_mdio_slow_mode_hv(sc);
   11863 
   11864 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11865 
   11866 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11867 
   11868 	/* 82578 */
   11869 	if (sc->sc_phytype == WMPHY_82578) {
   11870 		/* PCH rev. < 3 */
   11871 		if (sc->sc_rev < 3) {
   11872 			/* XXX 6 bit shift? Why? Is it page2? */
   11873 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11874 			    0x66c0);
   11875 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11876 			    0xffff);
   11877 		}
   11878 
   11879 		/* XXX phy rev. < 2 */
   11880 	}
   11881 
   11882 	/* Select page 0 */
   11883 
   11884 	/* XXX acquire semaphore */
   11885 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11886 	/* XXX release semaphore */
   11887 
   11888 	/*
   11889 	 * Configure the K1 Si workaround during phy reset assuming there is
   11890 	 * link so that it disables K1 if link is in 1Gbps.
   11891 	 */
   11892 	wm_k1_gig_workaround_hv(sc, 1);
   11893 }
   11894 
   11895 static void
   11896 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11897 {
   11898 
   11899 	wm_set_mdio_slow_mode_hv(sc);
   11900 }
   11901 
   11902 static void
   11903 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11904 {
   11905 	int k1_enable = sc->sc_nvm_k1_enabled;
   11906 
   11907 	/* XXX acquire semaphore */
   11908 
   11909 	if (link) {
   11910 		k1_enable = 0;
   11911 
   11912 		/* Link stall fix for link up */
   11913 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11914 	} else {
   11915 		/* Link stall fix for link down */
   11916 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11917 	}
   11918 
   11919 	wm_configure_k1_ich8lan(sc, k1_enable);
   11920 
   11921 	/* XXX release semaphore */
   11922 }
   11923 
   11924 static void
   11925 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11926 {
   11927 	uint32_t reg;
   11928 
   11929 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11930 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11931 	    reg | HV_KMRN_MDIO_SLOW);
   11932 }
   11933 
   11934 static void
   11935 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11936 {
   11937 	uint32_t ctrl, ctrl_ext, tmp;
   11938 	uint16_t kmrn_reg;
   11939 
   11940 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11941 
   11942 	if (k1_enable)
   11943 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11944 	else
   11945 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11946 
   11947 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11948 
   11949 	delay(20);
   11950 
   11951 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11952 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11953 
   11954 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11955 	tmp |= CTRL_FRCSPD;
   11956 
   11957 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11958 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11959 	CSR_WRITE_FLUSH(sc);
   11960 	delay(20);
   11961 
   11962 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11963 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11964 	CSR_WRITE_FLUSH(sc);
   11965 	delay(20);
   11966 }
   11967 
   11968 /* special case - for 82575 - need to do manual init ... */
   11969 static void
   11970 wm_reset_init_script_82575(struct wm_softc *sc)
   11971 {
   11972 	/*
   11973 	 * remark: this is untested code - we have no board without EEPROM
   11974 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11975 	 */
   11976 
   11977 	/* SerDes configuration via SERDESCTRL */
   11978 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11979 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11980 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11981 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11982 
   11983 	/* CCM configuration via CCMCTL register */
   11984 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11985 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11986 
   11987 	/* PCIe lanes configuration */
   11988 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11989 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11990 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11991 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11992 
   11993 	/* PCIe PLL Configuration */
   11994 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11995 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   11996 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   11997 }
   11998 
   11999 static void
   12000 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12001 {
   12002 	uint32_t reg;
   12003 	uint16_t nvmword;
   12004 	int rv;
   12005 
   12006 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12007 		return;
   12008 
   12009 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12010 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12011 	if (rv != 0) {
   12012 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12013 		    __func__);
   12014 		return;
   12015 	}
   12016 
   12017 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12018 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12019 		reg |= MDICNFG_DEST;
   12020 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12021 		reg |= MDICNFG_COM_MDIO;
   12022 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12023 }
   12024 
   12025 /*
   12026  * I210 Errata 25 and I211 Errata 10
   12027  * Slow System Clock.
   12028  */
   12029 static void
   12030 wm_pll_workaround_i210(struct wm_softc *sc)
   12031 {
   12032 	uint32_t mdicnfg, wuc;
   12033 	uint32_t reg;
   12034 	pcireg_t pcireg;
   12035 	uint32_t pmreg;
   12036 	uint16_t nvmword, tmp_nvmword;
   12037 	int phyval;
   12038 	bool wa_done = false;
   12039 	int i;
   12040 
   12041 	/* Save WUC and MDICNFG registers */
   12042 	wuc = CSR_READ(sc, WMREG_WUC);
   12043 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12044 
   12045 	reg = mdicnfg & ~MDICNFG_DEST;
   12046 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12047 
   12048 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12049 		nvmword = INVM_DEFAULT_AL;
   12050 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12051 
   12052 	/* Get Power Management cap offset */
   12053 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12054 		&pmreg, NULL) == 0)
   12055 		return;
   12056 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12057 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12058 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12059 
   12060 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12061 			break; /* OK */
   12062 		}
   12063 
   12064 		wa_done = true;
   12065 		/* Directly reset the internal PHY */
   12066 		reg = CSR_READ(sc, WMREG_CTRL);
   12067 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12068 
   12069 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12070 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12071 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12072 
   12073 		CSR_WRITE(sc, WMREG_WUC, 0);
   12074 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12075 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12076 
   12077 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12078 		    pmreg + PCI_PMCSR);
   12079 		pcireg |= PCI_PMCSR_STATE_D3;
   12080 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12081 		    pmreg + PCI_PMCSR, pcireg);
   12082 		delay(1000);
   12083 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12084 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12085 		    pmreg + PCI_PMCSR, pcireg);
   12086 
   12087 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12088 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12089 
   12090 		/* Restore WUC register */
   12091 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12092 	}
   12093 
   12094 	/* Restore MDICNFG setting */
   12095 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12096 	if (wa_done)
   12097 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12098 }
   12099