Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.407
      1 /*	$NetBSD: if_wm.c,v 1.407 2016/05/20 08:17:14 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.407 2016/05/20 08:17:14 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		0x01
    147 #define	WM_DEBUG_TX		0x02
    148 #define	WM_DEBUG_RX		0x04
    149 #define	WM_DEBUG_GMII		0x08
    150 #define	WM_DEBUG_MANAGE		0x10
    151 #define	WM_DEBUG_NVM		0x20
    152 #define	WM_DEBUG_INIT		0x40
    153 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    154     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT;
    155 
    156 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    157 #else
    158 #define	DPRINTF(x, y)	/* nothing */
    159 #endif /* WM_DEBUG */
    160 
    161 #ifdef NET_MPSAFE
    162 #define WM_MPSAFE	1
    163 #endif
    164 
    165 /*
    166  * This device driver's max interrupt numbers.
    167  */
    168 #define WM_MAX_NQUEUEINTR	16
    169 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    170 
    171 /*
    172  * Transmit descriptor list size.  Due to errata, we can only have
    173  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    174  * on >= 82544.  We tell the upper layers that they can queue a lot
    175  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    176  * of them at a time.
    177  *
    178  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    179  * chains containing many small mbufs have been observed in zero-copy
    180  * situations with jumbo frames.
    181  */
    182 #define	WM_NTXSEGS		256
    183 #define	WM_IFQUEUELEN		256
    184 #define	WM_TXQUEUELEN_MAX	64
    185 #define	WM_TXQUEUELEN_MAX_82547	16
    186 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    187 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    188 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    189 #define	WM_NTXDESC_82542	256
    190 #define	WM_NTXDESC_82544	4096
    191 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    192 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    193 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    194 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    195 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    196 
    197 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    198 
    199 #define	WM_TXINTERQSIZE		256
    200 
    201 /*
    202  * Receive descriptor list size.  We have one Rx buffer for normal
    203  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    204  * packet.  We allocate 256 receive descriptors, each with a 2k
    205  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    206  */
    207 #define	WM_NRXDESC		256
    208 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    209 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    210 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    211 
    212 typedef union txdescs {
    213 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    214 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    215 } txdescs_t;
    216 
    217 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    218 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    219 
    220 /*
    221  * Software state for transmit jobs.
    222  */
    223 struct wm_txsoft {
    224 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    225 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    226 	int txs_firstdesc;		/* first descriptor in packet */
    227 	int txs_lastdesc;		/* last descriptor in packet */
    228 	int txs_ndesc;			/* # of descriptors used */
    229 };
    230 
    231 /*
    232  * Software state for receive buffers.  Each descriptor gets a
    233  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    234  * more than one buffer, we chain them together.
    235  */
    236 struct wm_rxsoft {
    237 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    238 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    239 };
    240 
    241 #define WM_LINKUP_TIMEOUT	50
    242 
    243 static uint16_t swfwphysem[] = {
    244 	SWFW_PHY0_SM,
    245 	SWFW_PHY1_SM,
    246 	SWFW_PHY2_SM,
    247 	SWFW_PHY3_SM
    248 };
    249 
    250 static const uint32_t wm_82580_rxpbs_table[] = {
    251 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    252 };
    253 
    254 struct wm_softc;
    255 
    256 struct wm_txqueue {
    257 	kmutex_t *txq_lock;		/* lock for tx operations */
    258 
    259 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    260 
    261 	/* Software state for the transmit descriptors. */
    262 	int txq_num;			/* must be a power of two */
    263 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    264 
    265 	/* TX control data structures. */
    266 	int txq_ndesc;			/* must be a power of two */
    267 	size_t txq_descsize;		/* a tx descriptor size */
    268 	txdescs_t *txq_descs_u;
    269         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    270 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    271 	int txq_desc_rseg;		/* real number of control segment */
    272 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    273 #define	txq_descs	txq_descs_u->sctxu_txdescs
    274 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    275 
    276 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    277 
    278 	int txq_free;			/* number of free Tx descriptors */
    279 	int txq_next;			/* next ready Tx descriptor */
    280 
    281 	int txq_sfree;			/* number of free Tx jobs */
    282 	int txq_snext;			/* next free Tx job */
    283 	int txq_sdirty;			/* dirty Tx jobs */
    284 
    285 	/* These 4 variables are used only on the 82547. */
    286 	int txq_fifo_size;		/* Tx FIFO size */
    287 	int txq_fifo_head;		/* current head of FIFO */
    288 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    289 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    290 
    291 	/*
    292 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    293 	 * CPUs. This queue intermediate them without block.
    294 	 */
    295 	pcq_t *txq_interq;
    296 
    297 	/*
    298 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    299 	 * to manage Tx H/W queue's busy flag.
    300 	 */
    301 	int txq_flags;			/* flags for H/W queue, see below */
    302 #define	WM_TXQ_NO_SPACE	0x1
    303 
    304 	/* XXX which event counter is required? */
    305 };
    306 
    307 struct wm_rxqueue {
    308 	kmutex_t *rxq_lock;		/* lock for rx operations */
    309 
    310 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    311 
    312 	/* Software state for the receive descriptors. */
    313 	wiseman_rxdesc_t *rxq_descs;
    314 
    315 	/* RX control data structures. */
    316 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    317 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    318 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    319 	int rxq_desc_rseg;		/* real number of control segment */
    320 	size_t rxq_desc_size;		/* control data size */
    321 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    322 
    323 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    324 
    325 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    326 	int rxq_discard;
    327 	int rxq_len;
    328 	struct mbuf *rxq_head;
    329 	struct mbuf *rxq_tail;
    330 	struct mbuf **rxq_tailp;
    331 
    332 	/* XXX which event counter is required? */
    333 };
    334 
    335 struct wm_queue {
    336 	int wmq_id;			/* index of transmit and receive queues */
    337 	int wmq_intr_idx;		/* index of MSI-X tables */
    338 
    339 	struct wm_txqueue wmq_txq;
    340 	struct wm_rxqueue wmq_rxq;
    341 };
    342 
    343 /*
    344  * Software state per device.
    345  */
    346 struct wm_softc {
    347 	device_t sc_dev;		/* generic device information */
    348 	bus_space_tag_t sc_st;		/* bus space tag */
    349 	bus_space_handle_t sc_sh;	/* bus space handle */
    350 	bus_size_t sc_ss;		/* bus space size */
    351 	bus_space_tag_t sc_iot;		/* I/O space tag */
    352 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    353 	bus_size_t sc_ios;		/* I/O space size */
    354 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    355 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    356 	bus_size_t sc_flashs;		/* flash registers space size */
    357 	off_t sc_flashreg_offset;	/*
    358 					 * offset to flash registers from
    359 					 * start of BAR
    360 					 */
    361 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    362 
    363 	struct ethercom sc_ethercom;	/* ethernet common data */
    364 	struct mii_data sc_mii;		/* MII/media information */
    365 
    366 	pci_chipset_tag_t sc_pc;
    367 	pcitag_t sc_pcitag;
    368 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    369 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    370 
    371 	uint16_t sc_pcidevid;		/* PCI device ID */
    372 	wm_chip_type sc_type;		/* MAC type */
    373 	int sc_rev;			/* MAC revision */
    374 	wm_phy_type sc_phytype;		/* PHY type */
    375 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    376 #define	WM_MEDIATYPE_UNKNOWN		0x00
    377 #define	WM_MEDIATYPE_FIBER		0x01
    378 #define	WM_MEDIATYPE_COPPER		0x02
    379 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    380 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    381 	int sc_flags;			/* flags; see below */
    382 	int sc_if_flags;		/* last if_flags */
    383 	int sc_flowflags;		/* 802.3x flow control flags */
    384 	int sc_align_tweak;
    385 
    386 	void *sc_ihs[WM_MAX_NINTR];	/*
    387 					 * interrupt cookie.
    388 					 * legacy and msi use sc_ihs[0].
    389 					 */
    390 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    391 	int sc_nintrs;			/* number of interrupts */
    392 
    393 	int sc_link_intr_idx;		/* index of MSI-X tables */
    394 
    395 	callout_t sc_tick_ch;		/* tick callout */
    396 	bool sc_stopping;
    397 
    398 	int sc_nvm_ver_major;
    399 	int sc_nvm_ver_minor;
    400 	int sc_nvm_ver_build;
    401 	int sc_nvm_addrbits;		/* NVM address bits */
    402 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    403 	int sc_ich8_flash_base;
    404 	int sc_ich8_flash_bank_size;
    405 	int sc_nvm_k1_enabled;
    406 
    407 	int sc_nqueues;
    408 	struct wm_queue *sc_queue;
    409 
    410 	int sc_affinity_offset;
    411 
    412 #ifdef WM_EVENT_COUNTERS
    413 	/* Event counters. */
    414 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    415 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    416 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    417 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    418 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    419 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    420 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    421 
    422 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    423 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    424 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    425 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    426 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    427 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    428 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    429 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    430 
    431 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    432 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped(too many segs) */
    433 
    434 	struct evcnt sc_ev_tu;		/* Tx underrun */
    435 
    436 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    437 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    438 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    439 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    440 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    441 #endif /* WM_EVENT_COUNTERS */
    442 
    443 	/* This variable are used only on the 82547. */
    444 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    445 
    446 	uint32_t sc_ctrl;		/* prototype CTRL register */
    447 #if 0
    448 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    449 #endif
    450 	uint32_t sc_icr;		/* prototype interrupt bits */
    451 	uint32_t sc_itr;		/* prototype intr throttling reg */
    452 	uint32_t sc_tctl;		/* prototype TCTL register */
    453 	uint32_t sc_rctl;		/* prototype RCTL register */
    454 	uint32_t sc_txcw;		/* prototype TXCW register */
    455 	uint32_t sc_tipg;		/* prototype TIPG register */
    456 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    457 	uint32_t sc_pba;		/* prototype PBA register */
    458 
    459 	int sc_tbi_linkup;		/* TBI link status */
    460 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    461 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    462 
    463 	int sc_mchash_type;		/* multicast filter offset */
    464 
    465 	krndsource_t rnd_source;	/* random source */
    466 
    467 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    468 
    469 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    470 };
    471 
    472 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
    473 #define WM_TX_TRYLOCK(_txq)	((_txq)->txq_lock == NULL || mutex_tryenter((_txq)->txq_lock))
    474 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
    475 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
    476 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
    477 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
    478 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
    479 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    480 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    481 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    482 
    483 #ifdef WM_MPSAFE
    484 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    485 #else
    486 #define CALLOUT_FLAGS	0
    487 #endif
    488 
    489 #define	WM_RXCHAIN_RESET(rxq)						\
    490 do {									\
    491 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    492 	*(rxq)->rxq_tailp = NULL;					\
    493 	(rxq)->rxq_len = 0;						\
    494 } while (/*CONSTCOND*/0)
    495 
    496 #define	WM_RXCHAIN_LINK(rxq, m)						\
    497 do {									\
    498 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    499 	(rxq)->rxq_tailp = &(m)->m_next;				\
    500 } while (/*CONSTCOND*/0)
    501 
    502 #ifdef WM_EVENT_COUNTERS
    503 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    504 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    505 #else
    506 #define	WM_EVCNT_INCR(ev)	/* nothing */
    507 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    508 #endif
    509 
    510 #define	CSR_READ(sc, reg)						\
    511 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    512 #define	CSR_WRITE(sc, reg, val)						\
    513 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    514 #define	CSR_WRITE_FLUSH(sc)						\
    515 	(void) CSR_READ((sc), WMREG_STATUS)
    516 
    517 #define ICH8_FLASH_READ32(sc, reg)					\
    518 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    519 	    (reg) + sc->sc_flashreg_offset)
    520 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    521 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    522 	    (reg) + sc->sc_flashreg_offset, (data))
    523 
    524 #define ICH8_FLASH_READ16(sc, reg)					\
    525 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    526 	    (reg) + sc->sc_flashreg_offset)
    527 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    528 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    529 	    (reg) + sc->sc_flashreg_offset, (data))
    530 
    531 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    532 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    533 
    534 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    535 #define	WM_CDTXADDR_HI(txq, x)						\
    536 	(sizeof(bus_addr_t) == 8 ?					\
    537 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    538 
    539 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    540 #define	WM_CDRXADDR_HI(rxq, x)						\
    541 	(sizeof(bus_addr_t) == 8 ?					\
    542 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    543 
    544 /*
    545  * Register read/write functions.
    546  * Other than CSR_{READ|WRITE}().
    547  */
    548 #if 0
    549 static inline uint32_t wm_io_read(struct wm_softc *, int);
    550 #endif
    551 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    552 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    553 	uint32_t, uint32_t);
    554 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    555 
    556 /*
    557  * Descriptor sync/init functions.
    558  */
    559 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    560 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    561 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    562 
    563 /*
    564  * Device driver interface functions and commonly used functions.
    565  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    566  */
    567 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    568 static int	wm_match(device_t, cfdata_t, void *);
    569 static void	wm_attach(device_t, device_t, void *);
    570 static int	wm_detach(device_t, int);
    571 static bool	wm_suspend(device_t, const pmf_qual_t *);
    572 static bool	wm_resume(device_t, const pmf_qual_t *);
    573 static void	wm_watchdog(struct ifnet *);
    574 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    575 static void	wm_tick(void *);
    576 static int	wm_ifflags_cb(struct ethercom *);
    577 static int	wm_ioctl(struct ifnet *, u_long, void *);
    578 /* MAC address related */
    579 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    580 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    581 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    582 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    583 static void	wm_set_filter(struct wm_softc *);
    584 /* Reset and init related */
    585 static void	wm_set_vlan(struct wm_softc *);
    586 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    587 static void	wm_get_auto_rd_done(struct wm_softc *);
    588 static void	wm_lan_init_done(struct wm_softc *);
    589 static void	wm_get_cfg_done(struct wm_softc *);
    590 static void	wm_initialize_hardware_bits(struct wm_softc *);
    591 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    592 static void	wm_reset(struct wm_softc *);
    593 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    594 static void	wm_rxdrain(struct wm_rxqueue *);
    595 static void	wm_rss_getkey(uint8_t *);
    596 static void	wm_init_rss(struct wm_softc *);
    597 static void	wm_adjust_qnum(struct wm_softc *, int);
    598 static int	wm_setup_legacy(struct wm_softc *);
    599 static int	wm_setup_msix(struct wm_softc *);
    600 static int	wm_init(struct ifnet *);
    601 static int	wm_init_locked(struct ifnet *);
    602 static void	wm_stop(struct ifnet *, int);
    603 static void	wm_stop_locked(struct ifnet *, int);
    604 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    605 static void	wm_82547_txfifo_stall(void *);
    606 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    607 /* DMA related */
    608 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    609 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    610 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    611 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    612     struct wm_txqueue *);
    613 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    614 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    615 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    616     struct wm_rxqueue *);
    617 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    618 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    619 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    620 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    621 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    622 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    623 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    624     struct wm_txqueue *);
    625 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    626     struct wm_rxqueue *);
    627 static int	wm_alloc_txrx_queues(struct wm_softc *);
    628 static void	wm_free_txrx_queues(struct wm_softc *);
    629 static int	wm_init_txrx_queues(struct wm_softc *);
    630 /* Start */
    631 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    632     uint32_t *, uint8_t *);
    633 static void	wm_start(struct ifnet *);
    634 static void	wm_start_locked(struct ifnet *);
    635 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    636     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    637 static void	wm_nq_start(struct ifnet *);
    638 static void	wm_nq_start_locked(struct ifnet *);
    639 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    640 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
    641 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    642 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    643 /* Interrupt */
    644 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    645 static void	wm_rxeof(struct wm_rxqueue *);
    646 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    647 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    648 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    649 static void	wm_linkintr(struct wm_softc *, uint32_t);
    650 static int	wm_intr_legacy(void *);
    651 static int	wm_txrxintr_msix(void *);
    652 static int	wm_linkintr_msix(void *);
    653 
    654 /*
    655  * Media related.
    656  * GMII, SGMII, TBI, SERDES and SFP.
    657  */
    658 /* Common */
    659 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    660 /* GMII related */
    661 static void	wm_gmii_reset(struct wm_softc *);
    662 static int	wm_get_phy_id_82575(struct wm_softc *);
    663 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    664 static int	wm_gmii_mediachange(struct ifnet *);
    665 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    666 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    667 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    668 static int	wm_gmii_i82543_readreg(device_t, int, int);
    669 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    670 static int	wm_gmii_i82544_readreg(device_t, int, int);
    671 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    672 static int	wm_gmii_i80003_readreg(device_t, int, int);
    673 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    674 static int	wm_gmii_bm_readreg(device_t, int, int);
    675 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    676 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    677 static int	wm_gmii_hv_readreg(device_t, int, int);
    678 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    679 static int	wm_gmii_82580_readreg(device_t, int, int);
    680 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    681 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    682 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    683 static void	wm_gmii_statchg(struct ifnet *);
    684 static int	wm_kmrn_readreg(struct wm_softc *, int);
    685 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    686 /* SGMII */
    687 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    688 static int	wm_sgmii_readreg(device_t, int, int);
    689 static void	wm_sgmii_writereg(device_t, int, int, int);
    690 /* TBI related */
    691 static void	wm_tbi_mediainit(struct wm_softc *);
    692 static int	wm_tbi_mediachange(struct ifnet *);
    693 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    694 static int	wm_check_for_link(struct wm_softc *);
    695 static void	wm_tbi_tick(struct wm_softc *);
    696 /* SERDES related */
    697 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    698 static int	wm_serdes_mediachange(struct ifnet *);
    699 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    700 static void	wm_serdes_tick(struct wm_softc *);
    701 /* SFP related */
    702 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    703 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    704 
    705 /*
    706  * NVM related.
    707  * Microwire, SPI (w/wo EERD) and Flash.
    708  */
    709 /* Misc functions */
    710 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    711 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    712 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    713 /* Microwire */
    714 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    715 /* SPI */
    716 static int	wm_nvm_ready_spi(struct wm_softc *);
    717 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    718 /* Using with EERD */
    719 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    720 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    721 /* Flash */
    722 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    723     unsigned int *);
    724 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    725 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    726 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    727 	uint32_t *);
    728 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    729 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    730 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    731 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    732 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    733 /* iNVM */
    734 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    735 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    736 /* Lock, detecting NVM type, validate checksum and read */
    737 static int	wm_nvm_acquire(struct wm_softc *);
    738 static void	wm_nvm_release(struct wm_softc *);
    739 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    740 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    741 static int	wm_nvm_validate_checksum(struct wm_softc *);
    742 static void	wm_nvm_version_invm(struct wm_softc *);
    743 static void	wm_nvm_version(struct wm_softc *);
    744 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    745 
    746 /*
    747  * Hardware semaphores.
    748  * Very complexed...
    749  */
    750 static int	wm_get_swsm_semaphore(struct wm_softc *);
    751 static void	wm_put_swsm_semaphore(struct wm_softc *);
    752 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    753 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    754 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    755 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    756 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    757 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    758 
    759 /*
    760  * Management mode and power management related subroutines.
    761  * BMC, AMT, suspend/resume and EEE.
    762  */
    763 #ifdef WM_WOL
    764 static int	wm_check_mng_mode(struct wm_softc *);
    765 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    766 static int	wm_check_mng_mode_82574(struct wm_softc *);
    767 static int	wm_check_mng_mode_generic(struct wm_softc *);
    768 #endif
    769 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    770 static bool	wm_phy_resetisblocked(struct wm_softc *);
    771 static void	wm_get_hw_control(struct wm_softc *);
    772 static void	wm_release_hw_control(struct wm_softc *);
    773 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    774 static void	wm_smbustopci(struct wm_softc *);
    775 static void	wm_init_manageability(struct wm_softc *);
    776 static void	wm_release_manageability(struct wm_softc *);
    777 static void	wm_get_wakeup(struct wm_softc *);
    778 #ifdef WM_WOL
    779 static void	wm_enable_phy_wakeup(struct wm_softc *);
    780 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    781 static void	wm_enable_wakeup(struct wm_softc *);
    782 #endif
    783 /* LPLU (Low Power Link Up) */
    784 static void	wm_lplu_d0_disable(struct wm_softc *);
    785 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    786 /* EEE */
    787 static void	wm_set_eee_i350(struct wm_softc *);
    788 
    789 /*
    790  * Workarounds (mainly PHY related).
    791  * Basically, PHY's workarounds are in the PHY drivers.
    792  */
    793 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    794 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    795 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    796 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    797 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    798 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    799 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    800 static void	wm_reset_init_script_82575(struct wm_softc *);
    801 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    802 static void	wm_pll_workaround_i210(struct wm_softc *);
    803 
    804 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    805     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    806 
    807 /*
    808  * Devices supported by this driver.
    809  */
    810 static const struct wm_product {
    811 	pci_vendor_id_t		wmp_vendor;
    812 	pci_product_id_t	wmp_product;
    813 	const char		*wmp_name;
    814 	wm_chip_type		wmp_type;
    815 	uint32_t		wmp_flags;
    816 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    817 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    818 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    819 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    820 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    821 } wm_products[] = {
    822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    823 	  "Intel i82542 1000BASE-X Ethernet",
    824 	  WM_T_82542_2_1,	WMP_F_FIBER },
    825 
    826 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    827 	  "Intel i82543GC 1000BASE-X Ethernet",
    828 	  WM_T_82543,		WMP_F_FIBER },
    829 
    830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    831 	  "Intel i82543GC 1000BASE-T Ethernet",
    832 	  WM_T_82543,		WMP_F_COPPER },
    833 
    834 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    835 	  "Intel i82544EI 1000BASE-T Ethernet",
    836 	  WM_T_82544,		WMP_F_COPPER },
    837 
    838 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    839 	  "Intel i82544EI 1000BASE-X Ethernet",
    840 	  WM_T_82544,		WMP_F_FIBER },
    841 
    842 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    843 	  "Intel i82544GC 1000BASE-T Ethernet",
    844 	  WM_T_82544,		WMP_F_COPPER },
    845 
    846 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    847 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    848 	  WM_T_82544,		WMP_F_COPPER },
    849 
    850 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    851 	  "Intel i82540EM 1000BASE-T Ethernet",
    852 	  WM_T_82540,		WMP_F_COPPER },
    853 
    854 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    855 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    856 	  WM_T_82540,		WMP_F_COPPER },
    857 
    858 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    859 	  "Intel i82540EP 1000BASE-T Ethernet",
    860 	  WM_T_82540,		WMP_F_COPPER },
    861 
    862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    863 	  "Intel i82540EP 1000BASE-T Ethernet",
    864 	  WM_T_82540,		WMP_F_COPPER },
    865 
    866 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    867 	  "Intel i82540EP 1000BASE-T Ethernet",
    868 	  WM_T_82540,		WMP_F_COPPER },
    869 
    870 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    871 	  "Intel i82545EM 1000BASE-T Ethernet",
    872 	  WM_T_82545,		WMP_F_COPPER },
    873 
    874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    875 	  "Intel i82545GM 1000BASE-T Ethernet",
    876 	  WM_T_82545_3,		WMP_F_COPPER },
    877 
    878 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    879 	  "Intel i82545GM 1000BASE-X Ethernet",
    880 	  WM_T_82545_3,		WMP_F_FIBER },
    881 
    882 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    883 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    884 	  WM_T_82545_3,		WMP_F_SERDES },
    885 
    886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    887 	  "Intel i82546EB 1000BASE-T Ethernet",
    888 	  WM_T_82546,		WMP_F_COPPER },
    889 
    890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    891 	  "Intel i82546EB 1000BASE-T Ethernet",
    892 	  WM_T_82546,		WMP_F_COPPER },
    893 
    894 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    895 	  "Intel i82545EM 1000BASE-X Ethernet",
    896 	  WM_T_82545,		WMP_F_FIBER },
    897 
    898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    899 	  "Intel i82546EB 1000BASE-X Ethernet",
    900 	  WM_T_82546,		WMP_F_FIBER },
    901 
    902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    903 	  "Intel i82546GB 1000BASE-T Ethernet",
    904 	  WM_T_82546_3,		WMP_F_COPPER },
    905 
    906 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    907 	  "Intel i82546GB 1000BASE-X Ethernet",
    908 	  WM_T_82546_3,		WMP_F_FIBER },
    909 
    910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    911 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    912 	  WM_T_82546_3,		WMP_F_SERDES },
    913 
    914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    915 	  "i82546GB quad-port Gigabit Ethernet",
    916 	  WM_T_82546_3,		WMP_F_COPPER },
    917 
    918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    919 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    920 	  WM_T_82546_3,		WMP_F_COPPER },
    921 
    922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    923 	  "Intel PRO/1000MT (82546GB)",
    924 	  WM_T_82546_3,		WMP_F_COPPER },
    925 
    926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    927 	  "Intel i82541EI 1000BASE-T Ethernet",
    928 	  WM_T_82541,		WMP_F_COPPER },
    929 
    930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    931 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    932 	  WM_T_82541,		WMP_F_COPPER },
    933 
    934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    935 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    936 	  WM_T_82541,		WMP_F_COPPER },
    937 
    938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    939 	  "Intel i82541ER 1000BASE-T Ethernet",
    940 	  WM_T_82541_2,		WMP_F_COPPER },
    941 
    942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    943 	  "Intel i82541GI 1000BASE-T Ethernet",
    944 	  WM_T_82541_2,		WMP_F_COPPER },
    945 
    946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    947 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    948 	  WM_T_82541_2,		WMP_F_COPPER },
    949 
    950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    951 	  "Intel i82541PI 1000BASE-T Ethernet",
    952 	  WM_T_82541_2,		WMP_F_COPPER },
    953 
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    955 	  "Intel i82547EI 1000BASE-T Ethernet",
    956 	  WM_T_82547,		WMP_F_COPPER },
    957 
    958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    959 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    960 	  WM_T_82547,		WMP_F_COPPER },
    961 
    962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    963 	  "Intel i82547GI 1000BASE-T Ethernet",
    964 	  WM_T_82547_2,		WMP_F_COPPER },
    965 
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    967 	  "Intel PRO/1000 PT (82571EB)",
    968 	  WM_T_82571,		WMP_F_COPPER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    971 	  "Intel PRO/1000 PF (82571EB)",
    972 	  WM_T_82571,		WMP_F_FIBER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    975 	  "Intel PRO/1000 PB (82571EB)",
    976 	  WM_T_82571,		WMP_F_SERDES },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    979 	  "Intel PRO/1000 QT (82571EB)",
    980 	  WM_T_82571,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    983 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    984 	  WM_T_82571,		WMP_F_COPPER, },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    987 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    988 	  WM_T_82571,		WMP_F_COPPER, },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    991 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    992 	  WM_T_82571,		WMP_F_SERDES, },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    995 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    996 	  WM_T_82571,		WMP_F_SERDES, },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    999 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1000 	  WM_T_82571,		WMP_F_FIBER, },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1003 	  "Intel i82572EI 1000baseT Ethernet",
   1004 	  WM_T_82572,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1007 	  "Intel i82572EI 1000baseX Ethernet",
   1008 	  WM_T_82572,		WMP_F_FIBER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1011 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1012 	  WM_T_82572,		WMP_F_SERDES },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1015 	  "Intel i82572EI 1000baseT Ethernet",
   1016 	  WM_T_82572,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1019 	  "Intel i82573E",
   1020 	  WM_T_82573,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1023 	  "Intel i82573E IAMT",
   1024 	  WM_T_82573,		WMP_F_COPPER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1027 	  "Intel i82573L Gigabit Ethernet",
   1028 	  WM_T_82573,		WMP_F_COPPER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1031 	  "Intel i82574L",
   1032 	  WM_T_82574,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1035 	  "Intel i82574L",
   1036 	  WM_T_82574,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1039 	  "Intel i82583V",
   1040 	  WM_T_82583,		WMP_F_COPPER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1043 	  "i80003 dual 1000baseT Ethernet",
   1044 	  WM_T_80003,		WMP_F_COPPER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1047 	  "i80003 dual 1000baseX Ethernet",
   1048 	  WM_T_80003,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1051 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1052 	  WM_T_80003,		WMP_F_SERDES },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1055 	  "Intel i80003 1000baseT Ethernet",
   1056 	  WM_T_80003,		WMP_F_COPPER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1059 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1060 	  WM_T_80003,		WMP_F_SERDES },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1063 	  "Intel i82801H (M_AMT) LAN Controller",
   1064 	  WM_T_ICH8,		WMP_F_COPPER },
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1066 	  "Intel i82801H (AMT) LAN Controller",
   1067 	  WM_T_ICH8,		WMP_F_COPPER },
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1069 	  "Intel i82801H LAN Controller",
   1070 	  WM_T_ICH8,		WMP_F_COPPER },
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1072 	  "Intel i82801H (IFE) LAN Controller",
   1073 	  WM_T_ICH8,		WMP_F_COPPER },
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1075 	  "Intel i82801H (M) LAN Controller",
   1076 	  WM_T_ICH8,		WMP_F_COPPER },
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1078 	  "Intel i82801H IFE (GT) LAN Controller",
   1079 	  WM_T_ICH8,		WMP_F_COPPER },
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1081 	  "Intel i82801H IFE (G) LAN Controller",
   1082 	  WM_T_ICH8,		WMP_F_COPPER },
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1084 	  "82801I (AMT) LAN Controller",
   1085 	  WM_T_ICH9,		WMP_F_COPPER },
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1087 	  "82801I LAN Controller",
   1088 	  WM_T_ICH9,		WMP_F_COPPER },
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1090 	  "82801I (G) LAN Controller",
   1091 	  WM_T_ICH9,		WMP_F_COPPER },
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1093 	  "82801I (GT) LAN Controller",
   1094 	  WM_T_ICH9,		WMP_F_COPPER },
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1096 	  "82801I (C) LAN Controller",
   1097 	  WM_T_ICH9,		WMP_F_COPPER },
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1099 	  "82801I mobile LAN Controller",
   1100 	  WM_T_ICH9,		WMP_F_COPPER },
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1102 	  "82801I mobile (V) LAN Controller",
   1103 	  WM_T_ICH9,		WMP_F_COPPER },
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1105 	  "82801I mobile (AMT) LAN Controller",
   1106 	  WM_T_ICH9,		WMP_F_COPPER },
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1108 	  "82567LM-4 LAN Controller",
   1109 	  WM_T_ICH9,		WMP_F_COPPER },
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1111 	  "82567V-3 LAN Controller",
   1112 	  WM_T_ICH9,		WMP_F_COPPER },
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1114 	  "82567LM-2 LAN Controller",
   1115 	  WM_T_ICH10,		WMP_F_COPPER },
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1117 	  "82567LF-2 LAN Controller",
   1118 	  WM_T_ICH10,		WMP_F_COPPER },
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1120 	  "82567LM-3 LAN Controller",
   1121 	  WM_T_ICH10,		WMP_F_COPPER },
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1123 	  "82567LF-3 LAN Controller",
   1124 	  WM_T_ICH10,		WMP_F_COPPER },
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1126 	  "82567V-2 LAN Controller",
   1127 	  WM_T_ICH10,		WMP_F_COPPER },
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1129 	  "82567V-3? LAN Controller",
   1130 	  WM_T_ICH10,		WMP_F_COPPER },
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1132 	  "HANKSVILLE LAN Controller",
   1133 	  WM_T_ICH10,		WMP_F_COPPER },
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1135 	  "PCH LAN (82577LM) Controller",
   1136 	  WM_T_PCH,		WMP_F_COPPER },
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1138 	  "PCH LAN (82577LC) Controller",
   1139 	  WM_T_PCH,		WMP_F_COPPER },
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1141 	  "PCH LAN (82578DM) Controller",
   1142 	  WM_T_PCH,		WMP_F_COPPER },
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1144 	  "PCH LAN (82578DC) Controller",
   1145 	  WM_T_PCH,		WMP_F_COPPER },
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1147 	  "PCH2 LAN (82579LM) Controller",
   1148 	  WM_T_PCH2,		WMP_F_COPPER },
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1150 	  "PCH2 LAN (82579V) Controller",
   1151 	  WM_T_PCH2,		WMP_F_COPPER },
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1153 	  "82575EB dual-1000baseT Ethernet",
   1154 	  WM_T_82575,		WMP_F_COPPER },
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1156 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1157 	  WM_T_82575,		WMP_F_SERDES },
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1159 	  "82575GB quad-1000baseT Ethernet",
   1160 	  WM_T_82575,		WMP_F_COPPER },
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1162 	  "82575GB quad-1000baseT Ethernet (PM)",
   1163 	  WM_T_82575,		WMP_F_COPPER },
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1165 	  "82576 1000BaseT Ethernet",
   1166 	  WM_T_82576,		WMP_F_COPPER },
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1168 	  "82576 1000BaseX Ethernet",
   1169 	  WM_T_82576,		WMP_F_FIBER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1172 	  "82576 gigabit Ethernet (SERDES)",
   1173 	  WM_T_82576,		WMP_F_SERDES },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1176 	  "82576 quad-1000BaseT Ethernet",
   1177 	  WM_T_82576,		WMP_F_COPPER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1180 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1181 	  WM_T_82576,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1184 	  "82576 gigabit Ethernet",
   1185 	  WM_T_82576,		WMP_F_COPPER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1188 	  "82576 gigabit Ethernet (SERDES)",
   1189 	  WM_T_82576,		WMP_F_SERDES },
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1191 	  "82576 quad-gigabit Ethernet (SERDES)",
   1192 	  WM_T_82576,		WMP_F_SERDES },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1195 	  "82580 1000BaseT Ethernet",
   1196 	  WM_T_82580,		WMP_F_COPPER },
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1198 	  "82580 1000BaseX Ethernet",
   1199 	  WM_T_82580,		WMP_F_FIBER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1202 	  "82580 1000BaseT Ethernet (SERDES)",
   1203 	  WM_T_82580,		WMP_F_SERDES },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1206 	  "82580 gigabit Ethernet (SGMII)",
   1207 	  WM_T_82580,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1209 	  "82580 dual-1000BaseT Ethernet",
   1210 	  WM_T_82580,		WMP_F_COPPER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1213 	  "82580 quad-1000BaseX Ethernet",
   1214 	  WM_T_82580,		WMP_F_FIBER },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1217 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1218 	  WM_T_82580,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1221 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1222 	  WM_T_82580,		WMP_F_SERDES },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1225 	  "DH89XXCC 1000BASE-KX Ethernet",
   1226 	  WM_T_82580,		WMP_F_SERDES },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1229 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1230 	  WM_T_82580,		WMP_F_SERDES },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1233 	  "I350 Gigabit Network Connection",
   1234 	  WM_T_I350,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1237 	  "I350 Gigabit Fiber Network Connection",
   1238 	  WM_T_I350,		WMP_F_FIBER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1241 	  "I350 Gigabit Backplane Connection",
   1242 	  WM_T_I350,		WMP_F_SERDES },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1245 	  "I350 Quad Port Gigabit Ethernet",
   1246 	  WM_T_I350,		WMP_F_SERDES },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1249 	  "I350 Gigabit Connection",
   1250 	  WM_T_I350,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1253 	  "I354 Gigabit Ethernet (KX)",
   1254 	  WM_T_I354,		WMP_F_SERDES },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1257 	  "I354 Gigabit Ethernet (SGMII)",
   1258 	  WM_T_I354,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1261 	  "I354 Gigabit Ethernet (2.5G)",
   1262 	  WM_T_I354,		WMP_F_COPPER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1265 	  "I210-T1 Ethernet Server Adapter",
   1266 	  WM_T_I210,		WMP_F_COPPER },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1269 	  "I210 Ethernet (Copper OEM)",
   1270 	  WM_T_I210,		WMP_F_COPPER },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1273 	  "I210 Ethernet (Copper IT)",
   1274 	  WM_T_I210,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1277 	  "I210 Ethernet (FLASH less)",
   1278 	  WM_T_I210,		WMP_F_COPPER },
   1279 
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1281 	  "I210 Gigabit Ethernet (Fiber)",
   1282 	  WM_T_I210,		WMP_F_FIBER },
   1283 
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1285 	  "I210 Gigabit Ethernet (SERDES)",
   1286 	  WM_T_I210,		WMP_F_SERDES },
   1287 
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1289 	  "I210 Gigabit Ethernet (FLASH less)",
   1290 	  WM_T_I210,		WMP_F_SERDES },
   1291 
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1293 	  "I210 Gigabit Ethernet (SGMII)",
   1294 	  WM_T_I210,		WMP_F_COPPER },
   1295 
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1297 	  "I211 Ethernet (COPPER)",
   1298 	  WM_T_I211,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1300 	  "I217 V Ethernet Connection",
   1301 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1303 	  "I217 LM Ethernet Connection",
   1304 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1306 	  "I218 V Ethernet Connection",
   1307 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1309 	  "I218 V Ethernet Connection",
   1310 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1312 	  "I218 V Ethernet Connection",
   1313 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1315 	  "I218 LM Ethernet Connection",
   1316 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1318 	  "I218 LM Ethernet Connection",
   1319 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1321 	  "I218 LM Ethernet Connection",
   1322 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1323 #if 0
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1325 	  "I219 V Ethernet Connection",
   1326 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1328 	  "I219 V Ethernet Connection",
   1329 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1331 	  "I219 LM Ethernet Connection",
   1332 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1334 	  "I219 LM Ethernet Connection",
   1335 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1336 #endif
   1337 	{ 0,			0,
   1338 	  NULL,
   1339 	  0,			0 },
   1340 };
   1341 
   1342 #ifdef WM_EVENT_COUNTERS
   1343 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1344 #endif /* WM_EVENT_COUNTERS */
   1345 
   1346 
   1347 /*
   1348  * Register read/write functions.
   1349  * Other than CSR_{READ|WRITE}().
   1350  */
   1351 
   1352 #if 0 /* Not currently used */
   1353 static inline uint32_t
   1354 wm_io_read(struct wm_softc *sc, int reg)
   1355 {
   1356 
   1357 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1358 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1359 }
   1360 #endif
   1361 
   1362 static inline void
   1363 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1364 {
   1365 
   1366 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1367 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1368 }
   1369 
   1370 static inline void
   1371 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1372     uint32_t data)
   1373 {
   1374 	uint32_t regval;
   1375 	int i;
   1376 
   1377 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1378 
   1379 	CSR_WRITE(sc, reg, regval);
   1380 
   1381 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1382 		delay(5);
   1383 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1384 			break;
   1385 	}
   1386 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1387 		aprint_error("%s: WARNING:"
   1388 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1389 		    device_xname(sc->sc_dev), reg);
   1390 	}
   1391 }
   1392 
   1393 static inline void
   1394 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1395 {
   1396 	wa->wa_low = htole32(v & 0xffffffffU);
   1397 	if (sizeof(bus_addr_t) == 8)
   1398 		wa->wa_high = htole32((uint64_t) v >> 32);
   1399 	else
   1400 		wa->wa_high = 0;
   1401 }
   1402 
   1403 /*
   1404  * Descriptor sync/init functions.
   1405  */
   1406 static inline void
   1407 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1408 {
   1409 	struct wm_softc *sc = txq->txq_sc;
   1410 
   1411 	/* If it will wrap around, sync to the end of the ring. */
   1412 	if ((start + num) > WM_NTXDESC(txq)) {
   1413 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1414 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1415 		    (WM_NTXDESC(txq) - start), ops);
   1416 		num -= (WM_NTXDESC(txq) - start);
   1417 		start = 0;
   1418 	}
   1419 
   1420 	/* Now sync whatever is left. */
   1421 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1422 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1423 }
   1424 
   1425 static inline void
   1426 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1427 {
   1428 	struct wm_softc *sc = rxq->rxq_sc;
   1429 
   1430 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1431 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1432 }
   1433 
   1434 static inline void
   1435 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1436 {
   1437 	struct wm_softc *sc = rxq->rxq_sc;
   1438 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1439 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1440 	struct mbuf *m = rxs->rxs_mbuf;
   1441 
   1442 	/*
   1443 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1444 	 * so that the payload after the Ethernet header is aligned
   1445 	 * to a 4-byte boundary.
   1446 
   1447 	 * XXX BRAINDAMAGE ALERT!
   1448 	 * The stupid chip uses the same size for every buffer, which
   1449 	 * is set in the Receive Control register.  We are using the 2K
   1450 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1451 	 * reason, we can't "scoot" packets longer than the standard
   1452 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1453 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1454 	 * the upper layer copy the headers.
   1455 	 */
   1456 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1457 
   1458 	wm_set_dma_addr(&rxd->wrx_addr,
   1459 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1460 	rxd->wrx_len = 0;
   1461 	rxd->wrx_cksum = 0;
   1462 	rxd->wrx_status = 0;
   1463 	rxd->wrx_errors = 0;
   1464 	rxd->wrx_special = 0;
   1465 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1466 
   1467 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1468 }
   1469 
   1470 /*
   1471  * Device driver interface functions and commonly used functions.
   1472  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1473  */
   1474 
   1475 /* Lookup supported device table */
   1476 static const struct wm_product *
   1477 wm_lookup(const struct pci_attach_args *pa)
   1478 {
   1479 	const struct wm_product *wmp;
   1480 
   1481 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1482 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1483 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1484 			return wmp;
   1485 	}
   1486 	return NULL;
   1487 }
   1488 
   1489 /* The match function (ca_match) */
   1490 static int
   1491 wm_match(device_t parent, cfdata_t cf, void *aux)
   1492 {
   1493 	struct pci_attach_args *pa = aux;
   1494 
   1495 	if (wm_lookup(pa) != NULL)
   1496 		return 1;
   1497 
   1498 	return 0;
   1499 }
   1500 
   1501 /* The attach function (ca_attach) */
   1502 static void
   1503 wm_attach(device_t parent, device_t self, void *aux)
   1504 {
   1505 	struct wm_softc *sc = device_private(self);
   1506 	struct pci_attach_args *pa = aux;
   1507 	prop_dictionary_t dict;
   1508 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1509 	pci_chipset_tag_t pc = pa->pa_pc;
   1510 	int counts[PCI_INTR_TYPE_SIZE];
   1511 	pci_intr_type_t max_type;
   1512 	const char *eetype, *xname;
   1513 	bus_space_tag_t memt;
   1514 	bus_space_handle_t memh;
   1515 	bus_size_t memsize;
   1516 	int memh_valid;
   1517 	int i, error;
   1518 	const struct wm_product *wmp;
   1519 	prop_data_t ea;
   1520 	prop_number_t pn;
   1521 	uint8_t enaddr[ETHER_ADDR_LEN];
   1522 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1523 	pcireg_t preg, memtype;
   1524 	uint16_t eeprom_data, apme_mask;
   1525 	bool force_clear_smbi;
   1526 	uint32_t link_mode;
   1527 	uint32_t reg;
   1528 
   1529 	sc->sc_dev = self;
   1530 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1531 	sc->sc_stopping = false;
   1532 
   1533 	wmp = wm_lookup(pa);
   1534 #ifdef DIAGNOSTIC
   1535 	if (wmp == NULL) {
   1536 		printf("\n");
   1537 		panic("wm_attach: impossible");
   1538 	}
   1539 #endif
   1540 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1541 
   1542 	sc->sc_pc = pa->pa_pc;
   1543 	sc->sc_pcitag = pa->pa_tag;
   1544 
   1545 	if (pci_dma64_available(pa))
   1546 		sc->sc_dmat = pa->pa_dmat64;
   1547 	else
   1548 		sc->sc_dmat = pa->pa_dmat;
   1549 
   1550 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1551 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1552 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1553 
   1554 	sc->sc_type = wmp->wmp_type;
   1555 	if (sc->sc_type < WM_T_82543) {
   1556 		if (sc->sc_rev < 2) {
   1557 			aprint_error_dev(sc->sc_dev,
   1558 			    "i82542 must be at least rev. 2\n");
   1559 			return;
   1560 		}
   1561 		if (sc->sc_rev < 3)
   1562 			sc->sc_type = WM_T_82542_2_0;
   1563 	}
   1564 
   1565 	/*
   1566 	 * Disable MSI for Errata:
   1567 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1568 	 *
   1569 	 *  82544: Errata 25
   1570 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1571 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1572 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1573 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1574 	 *
   1575 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1576 	 *
   1577 	 *  82571 & 82572: Errata 63
   1578 	 */
   1579 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1580 	    || (sc->sc_type == WM_T_82572))
   1581 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1582 
   1583 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1584 	    || (sc->sc_type == WM_T_82580)
   1585 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1586 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1587 		sc->sc_flags |= WM_F_NEWQUEUE;
   1588 
   1589 	/* Set device properties (mactype) */
   1590 	dict = device_properties(sc->sc_dev);
   1591 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1592 
   1593 	/*
   1594 	 * Map the device.  All devices support memory-mapped acccess,
   1595 	 * and it is really required for normal operation.
   1596 	 */
   1597 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1598 	switch (memtype) {
   1599 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1600 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1601 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1602 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1603 		break;
   1604 	default:
   1605 		memh_valid = 0;
   1606 		break;
   1607 	}
   1608 
   1609 	if (memh_valid) {
   1610 		sc->sc_st = memt;
   1611 		sc->sc_sh = memh;
   1612 		sc->sc_ss = memsize;
   1613 	} else {
   1614 		aprint_error_dev(sc->sc_dev,
   1615 		    "unable to map device registers\n");
   1616 		return;
   1617 	}
   1618 
   1619 	/*
   1620 	 * In addition, i82544 and later support I/O mapped indirect
   1621 	 * register access.  It is not desirable (nor supported in
   1622 	 * this driver) to use it for normal operation, though it is
   1623 	 * required to work around bugs in some chip versions.
   1624 	 */
   1625 	if (sc->sc_type >= WM_T_82544) {
   1626 		/* First we have to find the I/O BAR. */
   1627 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1628 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1629 			if (memtype == PCI_MAPREG_TYPE_IO)
   1630 				break;
   1631 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1632 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1633 				i += 4;	/* skip high bits, too */
   1634 		}
   1635 		if (i < PCI_MAPREG_END) {
   1636 			/*
   1637 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1638 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1639 			 * It's no problem because newer chips has no this
   1640 			 * bug.
   1641 			 *
   1642 			 * The i8254x doesn't apparently respond when the
   1643 			 * I/O BAR is 0, which looks somewhat like it's not
   1644 			 * been configured.
   1645 			 */
   1646 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1647 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1648 				aprint_error_dev(sc->sc_dev,
   1649 				    "WARNING: I/O BAR at zero.\n");
   1650 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1651 					0, &sc->sc_iot, &sc->sc_ioh,
   1652 					NULL, &sc->sc_ios) == 0) {
   1653 				sc->sc_flags |= WM_F_IOH_VALID;
   1654 			} else {
   1655 				aprint_error_dev(sc->sc_dev,
   1656 				    "WARNING: unable to map I/O space\n");
   1657 			}
   1658 		}
   1659 
   1660 	}
   1661 
   1662 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1663 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1664 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1665 	if (sc->sc_type < WM_T_82542_2_1)
   1666 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1667 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1668 
   1669 	/* power up chip */
   1670 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1671 	    NULL)) && error != EOPNOTSUPP) {
   1672 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1673 		return;
   1674 	}
   1675 
   1676 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1677 
   1678 	/* Allocation settings */
   1679 	max_type = PCI_INTR_TYPE_MSIX;
   1680 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1681 	counts[PCI_INTR_TYPE_MSI] = 1;
   1682 	counts[PCI_INTR_TYPE_INTX] = 1;
   1683 
   1684 alloc_retry:
   1685 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1686 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1687 		return;
   1688 	}
   1689 
   1690 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1691 		error = wm_setup_msix(sc);
   1692 		if (error) {
   1693 			pci_intr_release(pc, sc->sc_intrs,
   1694 			    counts[PCI_INTR_TYPE_MSIX]);
   1695 
   1696 			/* Setup for MSI: Disable MSI-X */
   1697 			max_type = PCI_INTR_TYPE_MSI;
   1698 			counts[PCI_INTR_TYPE_MSI] = 1;
   1699 			counts[PCI_INTR_TYPE_INTX] = 1;
   1700 			goto alloc_retry;
   1701 		}
   1702 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1703 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1704 		error = wm_setup_legacy(sc);
   1705 		if (error) {
   1706 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1707 			    counts[PCI_INTR_TYPE_MSI]);
   1708 
   1709 			/* The next try is for INTx: Disable MSI */
   1710 			max_type = PCI_INTR_TYPE_INTX;
   1711 			counts[PCI_INTR_TYPE_INTX] = 1;
   1712 			goto alloc_retry;
   1713 		}
   1714 	} else {
   1715 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1716 		error = wm_setup_legacy(sc);
   1717 		if (error) {
   1718 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1719 			    counts[PCI_INTR_TYPE_INTX]);
   1720 			return;
   1721 		}
   1722 	}
   1723 
   1724 	/*
   1725 	 * Check the function ID (unit number of the chip).
   1726 	 */
   1727 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1728 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1729 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1730 	    || (sc->sc_type == WM_T_82580)
   1731 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1732 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1733 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1734 	else
   1735 		sc->sc_funcid = 0;
   1736 
   1737 	/*
   1738 	 * Determine a few things about the bus we're connected to.
   1739 	 */
   1740 	if (sc->sc_type < WM_T_82543) {
   1741 		/* We don't really know the bus characteristics here. */
   1742 		sc->sc_bus_speed = 33;
   1743 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1744 		/*
   1745 		 * CSA (Communication Streaming Architecture) is about as fast
   1746 		 * a 32-bit 66MHz PCI Bus.
   1747 		 */
   1748 		sc->sc_flags |= WM_F_CSA;
   1749 		sc->sc_bus_speed = 66;
   1750 		aprint_verbose_dev(sc->sc_dev,
   1751 		    "Communication Streaming Architecture\n");
   1752 		if (sc->sc_type == WM_T_82547) {
   1753 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1754 			callout_setfunc(&sc->sc_txfifo_ch,
   1755 					wm_82547_txfifo_stall, sc);
   1756 			aprint_verbose_dev(sc->sc_dev,
   1757 			    "using 82547 Tx FIFO stall work-around\n");
   1758 		}
   1759 	} else if (sc->sc_type >= WM_T_82571) {
   1760 		sc->sc_flags |= WM_F_PCIE;
   1761 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1762 		    && (sc->sc_type != WM_T_ICH10)
   1763 		    && (sc->sc_type != WM_T_PCH)
   1764 		    && (sc->sc_type != WM_T_PCH2)
   1765 		    && (sc->sc_type != WM_T_PCH_LPT)
   1766 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1767 			/* ICH* and PCH* have no PCIe capability registers */
   1768 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1769 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1770 				NULL) == 0)
   1771 				aprint_error_dev(sc->sc_dev,
   1772 				    "unable to find PCIe capability\n");
   1773 		}
   1774 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1775 	} else {
   1776 		reg = CSR_READ(sc, WMREG_STATUS);
   1777 		if (reg & STATUS_BUS64)
   1778 			sc->sc_flags |= WM_F_BUS64;
   1779 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1780 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1781 
   1782 			sc->sc_flags |= WM_F_PCIX;
   1783 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1784 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1785 				aprint_error_dev(sc->sc_dev,
   1786 				    "unable to find PCIX capability\n");
   1787 			else if (sc->sc_type != WM_T_82545_3 &&
   1788 				 sc->sc_type != WM_T_82546_3) {
   1789 				/*
   1790 				 * Work around a problem caused by the BIOS
   1791 				 * setting the max memory read byte count
   1792 				 * incorrectly.
   1793 				 */
   1794 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1795 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1796 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1797 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1798 
   1799 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1800 				    PCIX_CMD_BYTECNT_SHIFT;
   1801 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1802 				    PCIX_STATUS_MAXB_SHIFT;
   1803 				if (bytecnt > maxb) {
   1804 					aprint_verbose_dev(sc->sc_dev,
   1805 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1806 					    512 << bytecnt, 512 << maxb);
   1807 					pcix_cmd = (pcix_cmd &
   1808 					    ~PCIX_CMD_BYTECNT_MASK) |
   1809 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1810 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1811 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1812 					    pcix_cmd);
   1813 				}
   1814 			}
   1815 		}
   1816 		/*
   1817 		 * The quad port adapter is special; it has a PCIX-PCIX
   1818 		 * bridge on the board, and can run the secondary bus at
   1819 		 * a higher speed.
   1820 		 */
   1821 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1822 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1823 								      : 66;
   1824 		} else if (sc->sc_flags & WM_F_PCIX) {
   1825 			switch (reg & STATUS_PCIXSPD_MASK) {
   1826 			case STATUS_PCIXSPD_50_66:
   1827 				sc->sc_bus_speed = 66;
   1828 				break;
   1829 			case STATUS_PCIXSPD_66_100:
   1830 				sc->sc_bus_speed = 100;
   1831 				break;
   1832 			case STATUS_PCIXSPD_100_133:
   1833 				sc->sc_bus_speed = 133;
   1834 				break;
   1835 			default:
   1836 				aprint_error_dev(sc->sc_dev,
   1837 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1838 				    reg & STATUS_PCIXSPD_MASK);
   1839 				sc->sc_bus_speed = 66;
   1840 				break;
   1841 			}
   1842 		} else
   1843 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1844 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1845 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1846 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1847 	}
   1848 
   1849 	/* clear interesting stat counters */
   1850 	CSR_READ(sc, WMREG_COLC);
   1851 	CSR_READ(sc, WMREG_RXERRC);
   1852 
   1853 	/* get PHY control from SMBus to PCIe */
   1854 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1855 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1856 		wm_smbustopci(sc);
   1857 
   1858 	/* Reset the chip to a known state. */
   1859 	wm_reset(sc);
   1860 
   1861 	/* Get some information about the EEPROM. */
   1862 	switch (sc->sc_type) {
   1863 	case WM_T_82542_2_0:
   1864 	case WM_T_82542_2_1:
   1865 	case WM_T_82543:
   1866 	case WM_T_82544:
   1867 		/* Microwire */
   1868 		sc->sc_nvm_wordsize = 64;
   1869 		sc->sc_nvm_addrbits = 6;
   1870 		break;
   1871 	case WM_T_82540:
   1872 	case WM_T_82545:
   1873 	case WM_T_82545_3:
   1874 	case WM_T_82546:
   1875 	case WM_T_82546_3:
   1876 		/* Microwire */
   1877 		reg = CSR_READ(sc, WMREG_EECD);
   1878 		if (reg & EECD_EE_SIZE) {
   1879 			sc->sc_nvm_wordsize = 256;
   1880 			sc->sc_nvm_addrbits = 8;
   1881 		} else {
   1882 			sc->sc_nvm_wordsize = 64;
   1883 			sc->sc_nvm_addrbits = 6;
   1884 		}
   1885 		sc->sc_flags |= WM_F_LOCK_EECD;
   1886 		break;
   1887 	case WM_T_82541:
   1888 	case WM_T_82541_2:
   1889 	case WM_T_82547:
   1890 	case WM_T_82547_2:
   1891 		sc->sc_flags |= WM_F_LOCK_EECD;
   1892 		reg = CSR_READ(sc, WMREG_EECD);
   1893 		if (reg & EECD_EE_TYPE) {
   1894 			/* SPI */
   1895 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1896 			wm_nvm_set_addrbits_size_eecd(sc);
   1897 		} else {
   1898 			/* Microwire */
   1899 			if ((reg & EECD_EE_ABITS) != 0) {
   1900 				sc->sc_nvm_wordsize = 256;
   1901 				sc->sc_nvm_addrbits = 8;
   1902 			} else {
   1903 				sc->sc_nvm_wordsize = 64;
   1904 				sc->sc_nvm_addrbits = 6;
   1905 			}
   1906 		}
   1907 		break;
   1908 	case WM_T_82571:
   1909 	case WM_T_82572:
   1910 		/* SPI */
   1911 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1912 		wm_nvm_set_addrbits_size_eecd(sc);
   1913 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1914 		break;
   1915 	case WM_T_82573:
   1916 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1917 		/* FALLTHROUGH */
   1918 	case WM_T_82574:
   1919 	case WM_T_82583:
   1920 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1921 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1922 			sc->sc_nvm_wordsize = 2048;
   1923 		} else {
   1924 			/* SPI */
   1925 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1926 			wm_nvm_set_addrbits_size_eecd(sc);
   1927 		}
   1928 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1929 		break;
   1930 	case WM_T_82575:
   1931 	case WM_T_82576:
   1932 	case WM_T_82580:
   1933 	case WM_T_I350:
   1934 	case WM_T_I354:
   1935 	case WM_T_80003:
   1936 		/* SPI */
   1937 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1938 		wm_nvm_set_addrbits_size_eecd(sc);
   1939 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1940 		    | WM_F_LOCK_SWSM;
   1941 		break;
   1942 	case WM_T_ICH8:
   1943 	case WM_T_ICH9:
   1944 	case WM_T_ICH10:
   1945 	case WM_T_PCH:
   1946 	case WM_T_PCH2:
   1947 	case WM_T_PCH_LPT:
   1948 		/* FLASH */
   1949 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1950 		sc->sc_nvm_wordsize = 2048;
   1951 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   1952 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1953 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1954 			aprint_error_dev(sc->sc_dev,
   1955 			    "can't map FLASH registers\n");
   1956 			goto out;
   1957 		}
   1958 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1959 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1960 		    ICH_FLASH_SECTOR_SIZE;
   1961 		sc->sc_ich8_flash_bank_size =
   1962 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1963 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   1964 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1965 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1966 		sc->sc_flashreg_offset = 0;
   1967 		break;
   1968 	case WM_T_PCH_SPT:
   1969 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   1970 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1971 		sc->sc_flasht = sc->sc_st;
   1972 		sc->sc_flashh = sc->sc_sh;
   1973 		sc->sc_ich8_flash_base = 0;
   1974 		sc->sc_nvm_wordsize =
   1975 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   1976 			* NVM_SIZE_MULTIPLIER;
   1977 		/* It is size in bytes, we want words */
   1978 		sc->sc_nvm_wordsize /= 2;
   1979 		/* assume 2 banks */
   1980 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   1981 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   1982 		break;
   1983 	case WM_T_I210:
   1984 	case WM_T_I211:
   1985 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1986 			wm_nvm_set_addrbits_size_eecd(sc);
   1987 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1988 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1989 		} else {
   1990 			sc->sc_nvm_wordsize = INVM_SIZE;
   1991 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1992 			sc->sc_flags |= WM_F_LOCK_SWFW;
   1993 		}
   1994 		break;
   1995 	default:
   1996 		break;
   1997 	}
   1998 
   1999 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2000 	switch (sc->sc_type) {
   2001 	case WM_T_82571:
   2002 	case WM_T_82572:
   2003 		reg = CSR_READ(sc, WMREG_SWSM2);
   2004 		if ((reg & SWSM2_LOCK) == 0) {
   2005 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2006 			force_clear_smbi = true;
   2007 		} else
   2008 			force_clear_smbi = false;
   2009 		break;
   2010 	case WM_T_82573:
   2011 	case WM_T_82574:
   2012 	case WM_T_82583:
   2013 		force_clear_smbi = true;
   2014 		break;
   2015 	default:
   2016 		force_clear_smbi = false;
   2017 		break;
   2018 	}
   2019 	if (force_clear_smbi) {
   2020 		reg = CSR_READ(sc, WMREG_SWSM);
   2021 		if ((reg & SWSM_SMBI) != 0)
   2022 			aprint_error_dev(sc->sc_dev,
   2023 			    "Please update the Bootagent\n");
   2024 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2025 	}
   2026 
   2027 	/*
   2028 	 * Defer printing the EEPROM type until after verifying the checksum
   2029 	 * This allows the EEPROM type to be printed correctly in the case
   2030 	 * that no EEPROM is attached.
   2031 	 */
   2032 	/*
   2033 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2034 	 * this for later, so we can fail future reads from the EEPROM.
   2035 	 */
   2036 	if (wm_nvm_validate_checksum(sc)) {
   2037 		/*
   2038 		 * Read twice again because some PCI-e parts fail the
   2039 		 * first check due to the link being in sleep state.
   2040 		 */
   2041 		if (wm_nvm_validate_checksum(sc))
   2042 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2043 	}
   2044 
   2045 	/* Set device properties (macflags) */
   2046 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2047 
   2048 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2049 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2050 	else {
   2051 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2052 		    sc->sc_nvm_wordsize);
   2053 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2054 			aprint_verbose("iNVM");
   2055 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2056 			aprint_verbose("FLASH(HW)");
   2057 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2058 			aprint_verbose("FLASH");
   2059 		else {
   2060 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2061 				eetype = "SPI";
   2062 			else
   2063 				eetype = "MicroWire";
   2064 			aprint_verbose("(%d address bits) %s EEPROM",
   2065 			    sc->sc_nvm_addrbits, eetype);
   2066 		}
   2067 	}
   2068 	wm_nvm_version(sc);
   2069 	aprint_verbose("\n");
   2070 
   2071 	/* Check for I21[01] PLL workaround */
   2072 	if (sc->sc_type == WM_T_I210)
   2073 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2074 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2075 		/* NVM image release 3.25 has a workaround */
   2076 		if ((sc->sc_nvm_ver_major < 3)
   2077 		    || ((sc->sc_nvm_ver_major == 3)
   2078 			&& (sc->sc_nvm_ver_minor < 25))) {
   2079 			aprint_verbose_dev(sc->sc_dev,
   2080 			    "ROM image version %d.%d is older than 3.25\n",
   2081 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2082 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2083 		}
   2084 	}
   2085 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2086 		wm_pll_workaround_i210(sc);
   2087 
   2088 	wm_get_wakeup(sc);
   2089 	switch (sc->sc_type) {
   2090 	case WM_T_82571:
   2091 	case WM_T_82572:
   2092 	case WM_T_82573:
   2093 	case WM_T_82574:
   2094 	case WM_T_82583:
   2095 	case WM_T_80003:
   2096 	case WM_T_ICH8:
   2097 	case WM_T_ICH9:
   2098 	case WM_T_ICH10:
   2099 	case WM_T_PCH:
   2100 	case WM_T_PCH2:
   2101 	case WM_T_PCH_LPT:
   2102 	case WM_T_PCH_SPT:
   2103 		/* Non-AMT based hardware can now take control from firmware */
   2104 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2105 			wm_get_hw_control(sc);
   2106 		break;
   2107 	default:
   2108 		break;
   2109 	}
   2110 
   2111 	/*
   2112 	 * Read the Ethernet address from the EEPROM, if not first found
   2113 	 * in device properties.
   2114 	 */
   2115 	ea = prop_dictionary_get(dict, "mac-address");
   2116 	if (ea != NULL) {
   2117 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2118 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2119 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2120 	} else {
   2121 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2122 			aprint_error_dev(sc->sc_dev,
   2123 			    "unable to read Ethernet address\n");
   2124 			goto out;
   2125 		}
   2126 	}
   2127 
   2128 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2129 	    ether_sprintf(enaddr));
   2130 
   2131 	/*
   2132 	 * Read the config info from the EEPROM, and set up various
   2133 	 * bits in the control registers based on their contents.
   2134 	 */
   2135 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2136 	if (pn != NULL) {
   2137 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2138 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2139 	} else {
   2140 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2141 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2142 			goto out;
   2143 		}
   2144 	}
   2145 
   2146 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2147 	if (pn != NULL) {
   2148 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2149 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2150 	} else {
   2151 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2152 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2153 			goto out;
   2154 		}
   2155 	}
   2156 
   2157 	/* check for WM_F_WOL */
   2158 	switch (sc->sc_type) {
   2159 	case WM_T_82542_2_0:
   2160 	case WM_T_82542_2_1:
   2161 	case WM_T_82543:
   2162 		/* dummy? */
   2163 		eeprom_data = 0;
   2164 		apme_mask = NVM_CFG3_APME;
   2165 		break;
   2166 	case WM_T_82544:
   2167 		apme_mask = NVM_CFG2_82544_APM_EN;
   2168 		eeprom_data = cfg2;
   2169 		break;
   2170 	case WM_T_82546:
   2171 	case WM_T_82546_3:
   2172 	case WM_T_82571:
   2173 	case WM_T_82572:
   2174 	case WM_T_82573:
   2175 	case WM_T_82574:
   2176 	case WM_T_82583:
   2177 	case WM_T_80003:
   2178 	default:
   2179 		apme_mask = NVM_CFG3_APME;
   2180 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2181 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2182 		break;
   2183 	case WM_T_82575:
   2184 	case WM_T_82576:
   2185 	case WM_T_82580:
   2186 	case WM_T_I350:
   2187 	case WM_T_I354: /* XXX ok? */
   2188 	case WM_T_ICH8:
   2189 	case WM_T_ICH9:
   2190 	case WM_T_ICH10:
   2191 	case WM_T_PCH:
   2192 	case WM_T_PCH2:
   2193 	case WM_T_PCH_LPT:
   2194 	case WM_T_PCH_SPT:
   2195 		/* XXX The funcid should be checked on some devices */
   2196 		apme_mask = WUC_APME;
   2197 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2198 		break;
   2199 	}
   2200 
   2201 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2202 	if ((eeprom_data & apme_mask) != 0)
   2203 		sc->sc_flags |= WM_F_WOL;
   2204 #ifdef WM_DEBUG
   2205 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2206 		printf("WOL\n");
   2207 #endif
   2208 
   2209 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2210 		/* Check NVM for autonegotiation */
   2211 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2212 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2213 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2214 		}
   2215 	}
   2216 
   2217 	/*
   2218 	 * XXX need special handling for some multiple port cards
   2219 	 * to disable a paticular port.
   2220 	 */
   2221 
   2222 	if (sc->sc_type >= WM_T_82544) {
   2223 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2224 		if (pn != NULL) {
   2225 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2226 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2227 		} else {
   2228 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2229 				aprint_error_dev(sc->sc_dev,
   2230 				    "unable to read SWDPIN\n");
   2231 				goto out;
   2232 			}
   2233 		}
   2234 	}
   2235 
   2236 	if (cfg1 & NVM_CFG1_ILOS)
   2237 		sc->sc_ctrl |= CTRL_ILOS;
   2238 
   2239 	/*
   2240 	 * XXX
   2241 	 * This code isn't correct because pin 2 and 3 are located
   2242 	 * in different position on newer chips. Check all datasheet.
   2243 	 *
   2244 	 * Until resolve this problem, check if a chip < 82580
   2245 	 */
   2246 	if (sc->sc_type <= WM_T_82580) {
   2247 		if (sc->sc_type >= WM_T_82544) {
   2248 			sc->sc_ctrl |=
   2249 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2250 			    CTRL_SWDPIO_SHIFT;
   2251 			sc->sc_ctrl |=
   2252 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2253 			    CTRL_SWDPINS_SHIFT;
   2254 		} else {
   2255 			sc->sc_ctrl |=
   2256 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2257 			    CTRL_SWDPIO_SHIFT;
   2258 		}
   2259 	}
   2260 
   2261 	/* XXX For other than 82580? */
   2262 	if (sc->sc_type == WM_T_82580) {
   2263 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2264 		if (nvmword & __BIT(13))
   2265 			sc->sc_ctrl |= CTRL_ILOS;
   2266 	}
   2267 
   2268 #if 0
   2269 	if (sc->sc_type >= WM_T_82544) {
   2270 		if (cfg1 & NVM_CFG1_IPS0)
   2271 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2272 		if (cfg1 & NVM_CFG1_IPS1)
   2273 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2274 		sc->sc_ctrl_ext |=
   2275 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2276 		    CTRL_EXT_SWDPIO_SHIFT;
   2277 		sc->sc_ctrl_ext |=
   2278 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2279 		    CTRL_EXT_SWDPINS_SHIFT;
   2280 	} else {
   2281 		sc->sc_ctrl_ext |=
   2282 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2283 		    CTRL_EXT_SWDPIO_SHIFT;
   2284 	}
   2285 #endif
   2286 
   2287 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2288 #if 0
   2289 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2290 #endif
   2291 
   2292 	if (sc->sc_type == WM_T_PCH) {
   2293 		uint16_t val;
   2294 
   2295 		/* Save the NVM K1 bit setting */
   2296 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2297 
   2298 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2299 			sc->sc_nvm_k1_enabled = 1;
   2300 		else
   2301 			sc->sc_nvm_k1_enabled = 0;
   2302 	}
   2303 
   2304 	/*
   2305 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2306 	 * media structures accordingly.
   2307 	 */
   2308 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2309 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2310 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2311 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2312 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2313 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2314 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2315 	} else if (sc->sc_type < WM_T_82543 ||
   2316 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2317 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2318 			aprint_error_dev(sc->sc_dev,
   2319 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2320 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2321 		}
   2322 		wm_tbi_mediainit(sc);
   2323 	} else {
   2324 		switch (sc->sc_type) {
   2325 		case WM_T_82575:
   2326 		case WM_T_82576:
   2327 		case WM_T_82580:
   2328 		case WM_T_I350:
   2329 		case WM_T_I354:
   2330 		case WM_T_I210:
   2331 		case WM_T_I211:
   2332 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2333 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2334 			switch (link_mode) {
   2335 			case CTRL_EXT_LINK_MODE_1000KX:
   2336 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2337 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2338 				break;
   2339 			case CTRL_EXT_LINK_MODE_SGMII:
   2340 				if (wm_sgmii_uses_mdio(sc)) {
   2341 					aprint_verbose_dev(sc->sc_dev,
   2342 					    "SGMII(MDIO)\n");
   2343 					sc->sc_flags |= WM_F_SGMII;
   2344 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2345 					break;
   2346 				}
   2347 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2348 				/*FALLTHROUGH*/
   2349 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2350 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2351 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2352 					if (link_mode
   2353 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2354 						sc->sc_mediatype
   2355 						    = WM_MEDIATYPE_COPPER;
   2356 						sc->sc_flags |= WM_F_SGMII;
   2357 					} else {
   2358 						sc->sc_mediatype
   2359 						    = WM_MEDIATYPE_SERDES;
   2360 						aprint_verbose_dev(sc->sc_dev,
   2361 						    "SERDES\n");
   2362 					}
   2363 					break;
   2364 				}
   2365 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2366 					aprint_verbose_dev(sc->sc_dev,
   2367 					    "SERDES\n");
   2368 
   2369 				/* Change current link mode setting */
   2370 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2371 				switch (sc->sc_mediatype) {
   2372 				case WM_MEDIATYPE_COPPER:
   2373 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2374 					break;
   2375 				case WM_MEDIATYPE_SERDES:
   2376 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2377 					break;
   2378 				default:
   2379 					break;
   2380 				}
   2381 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2382 				break;
   2383 			case CTRL_EXT_LINK_MODE_GMII:
   2384 			default:
   2385 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2386 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2387 				break;
   2388 			}
   2389 
   2390 			reg &= ~CTRL_EXT_I2C_ENA;
   2391 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2392 				reg |= CTRL_EXT_I2C_ENA;
   2393 			else
   2394 				reg &= ~CTRL_EXT_I2C_ENA;
   2395 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2396 
   2397 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2398 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2399 			else
   2400 				wm_tbi_mediainit(sc);
   2401 			break;
   2402 		default:
   2403 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2404 				aprint_error_dev(sc->sc_dev,
   2405 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2406 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2407 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2408 		}
   2409 	}
   2410 
   2411 	ifp = &sc->sc_ethercom.ec_if;
   2412 	xname = device_xname(sc->sc_dev);
   2413 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2414 	ifp->if_softc = sc;
   2415 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2416 	ifp->if_ioctl = wm_ioctl;
   2417 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2418 		ifp->if_start = wm_nq_start;
   2419 		if (sc->sc_nqueues > 1)
   2420 			ifp->if_transmit = wm_nq_transmit;
   2421 	} else
   2422 		ifp->if_start = wm_start;
   2423 	ifp->if_watchdog = wm_watchdog;
   2424 	ifp->if_init = wm_init;
   2425 	ifp->if_stop = wm_stop;
   2426 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2427 	IFQ_SET_READY(&ifp->if_snd);
   2428 
   2429 	/* Check for jumbo frame */
   2430 	switch (sc->sc_type) {
   2431 	case WM_T_82573:
   2432 		/* XXX limited to 9234 if ASPM is disabled */
   2433 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2434 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2435 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2436 		break;
   2437 	case WM_T_82571:
   2438 	case WM_T_82572:
   2439 	case WM_T_82574:
   2440 	case WM_T_82575:
   2441 	case WM_T_82576:
   2442 	case WM_T_82580:
   2443 	case WM_T_I350:
   2444 	case WM_T_I354: /* XXXX ok? */
   2445 	case WM_T_I210:
   2446 	case WM_T_I211:
   2447 	case WM_T_80003:
   2448 	case WM_T_ICH9:
   2449 	case WM_T_ICH10:
   2450 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2451 	case WM_T_PCH_LPT:
   2452 	case WM_T_PCH_SPT:
   2453 		/* XXX limited to 9234 */
   2454 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2455 		break;
   2456 	case WM_T_PCH:
   2457 		/* XXX limited to 4096 */
   2458 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2459 		break;
   2460 	case WM_T_82542_2_0:
   2461 	case WM_T_82542_2_1:
   2462 	case WM_T_82583:
   2463 	case WM_T_ICH8:
   2464 		/* No support for jumbo frame */
   2465 		break;
   2466 	default:
   2467 		/* ETHER_MAX_LEN_JUMBO */
   2468 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2469 		break;
   2470 	}
   2471 
   2472 	/* If we're a i82543 or greater, we can support VLANs. */
   2473 	if (sc->sc_type >= WM_T_82543)
   2474 		sc->sc_ethercom.ec_capabilities |=
   2475 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2476 
   2477 	/*
   2478 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2479 	 * on i82543 and later.
   2480 	 */
   2481 	if (sc->sc_type >= WM_T_82543) {
   2482 		ifp->if_capabilities |=
   2483 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2484 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2485 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2486 		    IFCAP_CSUM_TCPv6_Tx |
   2487 		    IFCAP_CSUM_UDPv6_Tx;
   2488 	}
   2489 
   2490 	/*
   2491 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2492 	 *
   2493 	 *	82541GI (8086:1076) ... no
   2494 	 *	82572EI (8086:10b9) ... yes
   2495 	 */
   2496 	if (sc->sc_type >= WM_T_82571) {
   2497 		ifp->if_capabilities |=
   2498 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2499 	}
   2500 
   2501 	/*
   2502 	 * If we're a i82544 or greater (except i82547), we can do
   2503 	 * TCP segmentation offload.
   2504 	 */
   2505 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2506 		ifp->if_capabilities |= IFCAP_TSOv4;
   2507 	}
   2508 
   2509 	if (sc->sc_type >= WM_T_82571) {
   2510 		ifp->if_capabilities |= IFCAP_TSOv6;
   2511 	}
   2512 
   2513 #ifdef WM_MPSAFE
   2514 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2515 #else
   2516 	sc->sc_core_lock = NULL;
   2517 #endif
   2518 
   2519 	/* Attach the interface. */
   2520 	if_initialize(ifp);
   2521 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2522 	ether_ifattach(ifp, enaddr);
   2523 	if_register(ifp);
   2524 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2525 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2526 			  RND_FLAG_DEFAULT);
   2527 
   2528 #ifdef WM_EVENT_COUNTERS
   2529 	/* Attach event counters. */
   2530 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2531 	    NULL, xname, "txsstall");
   2532 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2533 	    NULL, xname, "txdstall");
   2534 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2535 	    NULL, xname, "txfifo_stall");
   2536 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2537 	    NULL, xname, "txdw");
   2538 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2539 	    NULL, xname, "txqe");
   2540 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2541 	    NULL, xname, "rxintr");
   2542 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2543 	    NULL, xname, "linkintr");
   2544 
   2545 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2546 	    NULL, xname, "rxipsum");
   2547 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2548 	    NULL, xname, "rxtusum");
   2549 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2550 	    NULL, xname, "txipsum");
   2551 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2552 	    NULL, xname, "txtusum");
   2553 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2554 	    NULL, xname, "txtusum6");
   2555 
   2556 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2557 	    NULL, xname, "txtso");
   2558 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2559 	    NULL, xname, "txtso6");
   2560 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2561 	    NULL, xname, "txtsopain");
   2562 
   2563 	for (i = 0; i < WM_NTXSEGS; i++) {
   2564 		snprintf(wm_txseg_evcnt_names[i],
   2565 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2566 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2567 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2568 	}
   2569 
   2570 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2571 	    NULL, xname, "txdrop");
   2572 
   2573 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2574 	    NULL, xname, "tu");
   2575 
   2576 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2577 	    NULL, xname, "tx_xoff");
   2578 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2579 	    NULL, xname, "tx_xon");
   2580 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2581 	    NULL, xname, "rx_xoff");
   2582 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2583 	    NULL, xname, "rx_xon");
   2584 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2585 	    NULL, xname, "rx_macctl");
   2586 #endif /* WM_EVENT_COUNTERS */
   2587 
   2588 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2589 		pmf_class_network_register(self, ifp);
   2590 	else
   2591 		aprint_error_dev(self, "couldn't establish power handler\n");
   2592 
   2593 	sc->sc_flags |= WM_F_ATTACHED;
   2594  out:
   2595 	return;
   2596 }
   2597 
   2598 /* The detach function (ca_detach) */
   2599 static int
   2600 wm_detach(device_t self, int flags __unused)
   2601 {
   2602 	struct wm_softc *sc = device_private(self);
   2603 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2604 	int i;
   2605 #ifndef WM_MPSAFE
   2606 	int s;
   2607 #endif
   2608 
   2609 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2610 		return 0;
   2611 
   2612 #ifndef WM_MPSAFE
   2613 	s = splnet();
   2614 #endif
   2615 	/* Stop the interface. Callouts are stopped in it. */
   2616 	wm_stop(ifp, 1);
   2617 
   2618 #ifndef WM_MPSAFE
   2619 	splx(s);
   2620 #endif
   2621 
   2622 	pmf_device_deregister(self);
   2623 
   2624 	/* Tell the firmware about the release */
   2625 	WM_CORE_LOCK(sc);
   2626 	wm_release_manageability(sc);
   2627 	wm_release_hw_control(sc);
   2628 	WM_CORE_UNLOCK(sc);
   2629 
   2630 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2631 
   2632 	/* Delete all remaining media. */
   2633 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2634 
   2635 	ether_ifdetach(ifp);
   2636 	if_detach(ifp);
   2637 	if_percpuq_destroy(sc->sc_ipq);
   2638 
   2639 	/* Unload RX dmamaps and free mbufs */
   2640 	for (i = 0; i < sc->sc_nqueues; i++) {
   2641 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2642 		WM_RX_LOCK(rxq);
   2643 		wm_rxdrain(rxq);
   2644 		WM_RX_UNLOCK(rxq);
   2645 	}
   2646 	/* Must unlock here */
   2647 
   2648 	/* Disestablish the interrupt handler */
   2649 	for (i = 0; i < sc->sc_nintrs; i++) {
   2650 		if (sc->sc_ihs[i] != NULL) {
   2651 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2652 			sc->sc_ihs[i] = NULL;
   2653 		}
   2654 	}
   2655 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2656 
   2657 	wm_free_txrx_queues(sc);
   2658 
   2659 	/* Unmap the registers */
   2660 	if (sc->sc_ss) {
   2661 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2662 		sc->sc_ss = 0;
   2663 	}
   2664 	if (sc->sc_ios) {
   2665 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2666 		sc->sc_ios = 0;
   2667 	}
   2668 	if (sc->sc_flashs) {
   2669 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2670 		sc->sc_flashs = 0;
   2671 	}
   2672 
   2673 	if (sc->sc_core_lock)
   2674 		mutex_obj_free(sc->sc_core_lock);
   2675 
   2676 	return 0;
   2677 }
   2678 
   2679 static bool
   2680 wm_suspend(device_t self, const pmf_qual_t *qual)
   2681 {
   2682 	struct wm_softc *sc = device_private(self);
   2683 
   2684 	wm_release_manageability(sc);
   2685 	wm_release_hw_control(sc);
   2686 #ifdef WM_WOL
   2687 	wm_enable_wakeup(sc);
   2688 #endif
   2689 
   2690 	return true;
   2691 }
   2692 
   2693 static bool
   2694 wm_resume(device_t self, const pmf_qual_t *qual)
   2695 {
   2696 	struct wm_softc *sc = device_private(self);
   2697 
   2698 	wm_init_manageability(sc);
   2699 
   2700 	return true;
   2701 }
   2702 
   2703 /*
   2704  * wm_watchdog:		[ifnet interface function]
   2705  *
   2706  *	Watchdog timer handler.
   2707  */
   2708 static void
   2709 wm_watchdog(struct ifnet *ifp)
   2710 {
   2711 	int qid;
   2712 	struct wm_softc *sc = ifp->if_softc;
   2713 
   2714 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2715 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2716 
   2717 		wm_watchdog_txq(ifp, txq);
   2718 	}
   2719 
   2720 	/* Reset the interface. */
   2721 	(void) wm_init(ifp);
   2722 
   2723 	/*
   2724 	 * There are still some upper layer processing which call
   2725 	 * ifp->if_start(). e.g. ALTQ
   2726 	 */
   2727 	/* Try to get more packets going. */
   2728 	ifp->if_start(ifp);
   2729 }
   2730 
   2731 static void
   2732 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2733 {
   2734 	struct wm_softc *sc = ifp->if_softc;
   2735 
   2736 	/*
   2737 	 * Since we're using delayed interrupts, sweep up
   2738 	 * before we report an error.
   2739 	 */
   2740 	WM_TX_LOCK(txq);
   2741 	wm_txeof(sc, txq);
   2742 	WM_TX_UNLOCK(txq);
   2743 
   2744 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2745 #ifdef WM_DEBUG
   2746 		int i, j;
   2747 		struct wm_txsoft *txs;
   2748 #endif
   2749 		log(LOG_ERR,
   2750 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2751 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2752 		    txq->txq_next);
   2753 		ifp->if_oerrors++;
   2754 #ifdef WM_DEBUG
   2755 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2756 		    i = WM_NEXTTXS(txq, i)) {
   2757 		    txs = &txq->txq_soft[i];
   2758 		    printf("txs %d tx %d -> %d\n",
   2759 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2760 		    for (j = txs->txs_firstdesc; ;
   2761 			j = WM_NEXTTX(txq, j)) {
   2762 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2763 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2764 			printf("\t %#08x%08x\n",
   2765 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2766 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2767 			if (j == txs->txs_lastdesc)
   2768 				break;
   2769 			}
   2770 		}
   2771 #endif
   2772 	}
   2773 }
   2774 
   2775 /*
   2776  * wm_tick:
   2777  *
   2778  *	One second timer, used to check link status, sweep up
   2779  *	completed transmit jobs, etc.
   2780  */
   2781 static void
   2782 wm_tick(void *arg)
   2783 {
   2784 	struct wm_softc *sc = arg;
   2785 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2786 #ifndef WM_MPSAFE
   2787 	int s;
   2788 
   2789 	s = splnet();
   2790 #endif
   2791 
   2792 	WM_CORE_LOCK(sc);
   2793 
   2794 	if (sc->sc_stopping)
   2795 		goto out;
   2796 
   2797 	if (sc->sc_type >= WM_T_82542_2_1) {
   2798 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2799 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2800 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2801 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2802 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2803 	}
   2804 
   2805 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2806 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2807 	    + CSR_READ(sc, WMREG_CRCERRS)
   2808 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2809 	    + CSR_READ(sc, WMREG_SYMERRC)
   2810 	    + CSR_READ(sc, WMREG_RXERRC)
   2811 	    + CSR_READ(sc, WMREG_SEC)
   2812 	    + CSR_READ(sc, WMREG_CEXTERR)
   2813 	    + CSR_READ(sc, WMREG_RLEC);
   2814 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2815 
   2816 	if (sc->sc_flags & WM_F_HAS_MII)
   2817 		mii_tick(&sc->sc_mii);
   2818 	else if ((sc->sc_type >= WM_T_82575)
   2819 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2820 		wm_serdes_tick(sc);
   2821 	else
   2822 		wm_tbi_tick(sc);
   2823 
   2824 out:
   2825 	WM_CORE_UNLOCK(sc);
   2826 #ifndef WM_MPSAFE
   2827 	splx(s);
   2828 #endif
   2829 
   2830 	if (!sc->sc_stopping)
   2831 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2832 }
   2833 
   2834 static int
   2835 wm_ifflags_cb(struct ethercom *ec)
   2836 {
   2837 	struct ifnet *ifp = &ec->ec_if;
   2838 	struct wm_softc *sc = ifp->if_softc;
   2839 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2840 	int rc = 0;
   2841 
   2842 	WM_CORE_LOCK(sc);
   2843 
   2844 	if (change != 0)
   2845 		sc->sc_if_flags = ifp->if_flags;
   2846 
   2847 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2848 		rc = ENETRESET;
   2849 		goto out;
   2850 	}
   2851 
   2852 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2853 		wm_set_filter(sc);
   2854 
   2855 	wm_set_vlan(sc);
   2856 
   2857 out:
   2858 	WM_CORE_UNLOCK(sc);
   2859 
   2860 	return rc;
   2861 }
   2862 
   2863 /*
   2864  * wm_ioctl:		[ifnet interface function]
   2865  *
   2866  *	Handle control requests from the operator.
   2867  */
   2868 static int
   2869 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2870 {
   2871 	struct wm_softc *sc = ifp->if_softc;
   2872 	struct ifreq *ifr = (struct ifreq *) data;
   2873 	struct ifaddr *ifa = (struct ifaddr *)data;
   2874 	struct sockaddr_dl *sdl;
   2875 	int s, error;
   2876 
   2877 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2878 		device_xname(sc->sc_dev), __func__));
   2879 #ifndef WM_MPSAFE
   2880 	s = splnet();
   2881 #endif
   2882 	switch (cmd) {
   2883 	case SIOCSIFMEDIA:
   2884 	case SIOCGIFMEDIA:
   2885 		WM_CORE_LOCK(sc);
   2886 		/* Flow control requires full-duplex mode. */
   2887 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2888 		    (ifr->ifr_media & IFM_FDX) == 0)
   2889 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2890 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2891 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2892 				/* We can do both TXPAUSE and RXPAUSE. */
   2893 				ifr->ifr_media |=
   2894 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2895 			}
   2896 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2897 		}
   2898 		WM_CORE_UNLOCK(sc);
   2899 #ifdef WM_MPSAFE
   2900 		s = splnet();
   2901 #endif
   2902 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2903 #ifdef WM_MPSAFE
   2904 		splx(s);
   2905 #endif
   2906 		break;
   2907 	case SIOCINITIFADDR:
   2908 		WM_CORE_LOCK(sc);
   2909 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2910 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2911 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2912 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2913 			/* unicast address is first multicast entry */
   2914 			wm_set_filter(sc);
   2915 			error = 0;
   2916 			WM_CORE_UNLOCK(sc);
   2917 			break;
   2918 		}
   2919 		WM_CORE_UNLOCK(sc);
   2920 		/*FALLTHROUGH*/
   2921 	default:
   2922 #ifdef WM_MPSAFE
   2923 		s = splnet();
   2924 #endif
   2925 		/* It may call wm_start, so unlock here */
   2926 		error = ether_ioctl(ifp, cmd, data);
   2927 #ifdef WM_MPSAFE
   2928 		splx(s);
   2929 #endif
   2930 		if (error != ENETRESET)
   2931 			break;
   2932 
   2933 		error = 0;
   2934 
   2935 		if (cmd == SIOCSIFCAP) {
   2936 			error = (*ifp->if_init)(ifp);
   2937 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2938 			;
   2939 		else if (ifp->if_flags & IFF_RUNNING) {
   2940 			/*
   2941 			 * Multicast list has changed; set the hardware filter
   2942 			 * accordingly.
   2943 			 */
   2944 			WM_CORE_LOCK(sc);
   2945 			wm_set_filter(sc);
   2946 			WM_CORE_UNLOCK(sc);
   2947 		}
   2948 		break;
   2949 	}
   2950 
   2951 #ifndef WM_MPSAFE
   2952 	splx(s);
   2953 #endif
   2954 	return error;
   2955 }
   2956 
   2957 /* MAC address related */
   2958 
   2959 /*
   2960  * Get the offset of MAC address and return it.
   2961  * If error occured, use offset 0.
   2962  */
   2963 static uint16_t
   2964 wm_check_alt_mac_addr(struct wm_softc *sc)
   2965 {
   2966 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2967 	uint16_t offset = NVM_OFF_MACADDR;
   2968 
   2969 	/* Try to read alternative MAC address pointer */
   2970 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2971 		return 0;
   2972 
   2973 	/* Check pointer if it's valid or not. */
   2974 	if ((offset == 0x0000) || (offset == 0xffff))
   2975 		return 0;
   2976 
   2977 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2978 	/*
   2979 	 * Check whether alternative MAC address is valid or not.
   2980 	 * Some cards have non 0xffff pointer but those don't use
   2981 	 * alternative MAC address in reality.
   2982 	 *
   2983 	 * Check whether the broadcast bit is set or not.
   2984 	 */
   2985 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2986 		if (((myea[0] & 0xff) & 0x01) == 0)
   2987 			return offset; /* Found */
   2988 
   2989 	/* Not found */
   2990 	return 0;
   2991 }
   2992 
   2993 static int
   2994 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2995 {
   2996 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2997 	uint16_t offset = NVM_OFF_MACADDR;
   2998 	int do_invert = 0;
   2999 
   3000 	switch (sc->sc_type) {
   3001 	case WM_T_82580:
   3002 	case WM_T_I350:
   3003 	case WM_T_I354:
   3004 		/* EEPROM Top Level Partitioning */
   3005 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3006 		break;
   3007 	case WM_T_82571:
   3008 	case WM_T_82575:
   3009 	case WM_T_82576:
   3010 	case WM_T_80003:
   3011 	case WM_T_I210:
   3012 	case WM_T_I211:
   3013 		offset = wm_check_alt_mac_addr(sc);
   3014 		if (offset == 0)
   3015 			if ((sc->sc_funcid & 0x01) == 1)
   3016 				do_invert = 1;
   3017 		break;
   3018 	default:
   3019 		if ((sc->sc_funcid & 0x01) == 1)
   3020 			do_invert = 1;
   3021 		break;
   3022 	}
   3023 
   3024 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   3025 		myea) != 0)
   3026 		goto bad;
   3027 
   3028 	enaddr[0] = myea[0] & 0xff;
   3029 	enaddr[1] = myea[0] >> 8;
   3030 	enaddr[2] = myea[1] & 0xff;
   3031 	enaddr[3] = myea[1] >> 8;
   3032 	enaddr[4] = myea[2] & 0xff;
   3033 	enaddr[5] = myea[2] >> 8;
   3034 
   3035 	/*
   3036 	 * Toggle the LSB of the MAC address on the second port
   3037 	 * of some dual port cards.
   3038 	 */
   3039 	if (do_invert != 0)
   3040 		enaddr[5] ^= 1;
   3041 
   3042 	return 0;
   3043 
   3044  bad:
   3045 	return -1;
   3046 }
   3047 
   3048 /*
   3049  * wm_set_ral:
   3050  *
   3051  *	Set an entery in the receive address list.
   3052  */
   3053 static void
   3054 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3055 {
   3056 	uint32_t ral_lo, ral_hi;
   3057 
   3058 	if (enaddr != NULL) {
   3059 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3060 		    (enaddr[3] << 24);
   3061 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3062 		ral_hi |= RAL_AV;
   3063 	} else {
   3064 		ral_lo = 0;
   3065 		ral_hi = 0;
   3066 	}
   3067 
   3068 	if (sc->sc_type >= WM_T_82544) {
   3069 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3070 		    ral_lo);
   3071 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3072 		    ral_hi);
   3073 	} else {
   3074 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3075 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3076 	}
   3077 }
   3078 
   3079 /*
   3080  * wm_mchash:
   3081  *
   3082  *	Compute the hash of the multicast address for the 4096-bit
   3083  *	multicast filter.
   3084  */
   3085 static uint32_t
   3086 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3087 {
   3088 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3089 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3090 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3091 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3092 	uint32_t hash;
   3093 
   3094 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3095 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3096 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3097 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3098 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3099 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3100 		return (hash & 0x3ff);
   3101 	}
   3102 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3103 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3104 
   3105 	return (hash & 0xfff);
   3106 }
   3107 
   3108 /*
   3109  * wm_set_filter:
   3110  *
   3111  *	Set up the receive filter.
   3112  */
   3113 static void
   3114 wm_set_filter(struct wm_softc *sc)
   3115 {
   3116 	struct ethercom *ec = &sc->sc_ethercom;
   3117 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3118 	struct ether_multi *enm;
   3119 	struct ether_multistep step;
   3120 	bus_addr_t mta_reg;
   3121 	uint32_t hash, reg, bit;
   3122 	int i, size, ralmax;
   3123 
   3124 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3125 		device_xname(sc->sc_dev), __func__));
   3126 	if (sc->sc_type >= WM_T_82544)
   3127 		mta_reg = WMREG_CORDOVA_MTA;
   3128 	else
   3129 		mta_reg = WMREG_MTA;
   3130 
   3131 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3132 
   3133 	if (ifp->if_flags & IFF_BROADCAST)
   3134 		sc->sc_rctl |= RCTL_BAM;
   3135 	if (ifp->if_flags & IFF_PROMISC) {
   3136 		sc->sc_rctl |= RCTL_UPE;
   3137 		goto allmulti;
   3138 	}
   3139 
   3140 	/*
   3141 	 * Set the station address in the first RAL slot, and
   3142 	 * clear the remaining slots.
   3143 	 */
   3144 	if (sc->sc_type == WM_T_ICH8)
   3145 		size = WM_RAL_TABSIZE_ICH8 -1;
   3146 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3147 	    || (sc->sc_type == WM_T_PCH))
   3148 		size = WM_RAL_TABSIZE_ICH8;
   3149 	else if (sc->sc_type == WM_T_PCH2)
   3150 		size = WM_RAL_TABSIZE_PCH2;
   3151 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3152 		size = WM_RAL_TABSIZE_PCH_LPT;
   3153 	else if (sc->sc_type == WM_T_82575)
   3154 		size = WM_RAL_TABSIZE_82575;
   3155 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3156 		size = WM_RAL_TABSIZE_82576;
   3157 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3158 		size = WM_RAL_TABSIZE_I350;
   3159 	else
   3160 		size = WM_RAL_TABSIZE;
   3161 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3162 
   3163 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3164 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3165 		switch (i) {
   3166 		case 0:
   3167 			/* We can use all entries */
   3168 			ralmax = size;
   3169 			break;
   3170 		case 1:
   3171 			/* Only RAR[0] */
   3172 			ralmax = 1;
   3173 			break;
   3174 		default:
   3175 			/* available SHRA + RAR[0] */
   3176 			ralmax = i + 1;
   3177 		}
   3178 	} else
   3179 		ralmax = size;
   3180 	for (i = 1; i < size; i++) {
   3181 		if (i < ralmax)
   3182 			wm_set_ral(sc, NULL, i);
   3183 	}
   3184 
   3185 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3186 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3187 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3188 	    || (sc->sc_type == WM_T_PCH_SPT))
   3189 		size = WM_ICH8_MC_TABSIZE;
   3190 	else
   3191 		size = WM_MC_TABSIZE;
   3192 	/* Clear out the multicast table. */
   3193 	for (i = 0; i < size; i++)
   3194 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3195 
   3196 	ETHER_FIRST_MULTI(step, ec, enm);
   3197 	while (enm != NULL) {
   3198 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3199 			/*
   3200 			 * We must listen to a range of multicast addresses.
   3201 			 * For now, just accept all multicasts, rather than
   3202 			 * trying to set only those filter bits needed to match
   3203 			 * the range.  (At this time, the only use of address
   3204 			 * ranges is for IP multicast routing, for which the
   3205 			 * range is big enough to require all bits set.)
   3206 			 */
   3207 			goto allmulti;
   3208 		}
   3209 
   3210 		hash = wm_mchash(sc, enm->enm_addrlo);
   3211 
   3212 		reg = (hash >> 5);
   3213 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3214 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3215 		    || (sc->sc_type == WM_T_PCH2)
   3216 		    || (sc->sc_type == WM_T_PCH_LPT)
   3217 		    || (sc->sc_type == WM_T_PCH_SPT))
   3218 			reg &= 0x1f;
   3219 		else
   3220 			reg &= 0x7f;
   3221 		bit = hash & 0x1f;
   3222 
   3223 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3224 		hash |= 1U << bit;
   3225 
   3226 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3227 			/*
   3228 			 * 82544 Errata 9: Certain register cannot be written
   3229 			 * with particular alignments in PCI-X bus operation
   3230 			 * (FCAH, MTA and VFTA).
   3231 			 */
   3232 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3233 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3234 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3235 		} else
   3236 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3237 
   3238 		ETHER_NEXT_MULTI(step, enm);
   3239 	}
   3240 
   3241 	ifp->if_flags &= ~IFF_ALLMULTI;
   3242 	goto setit;
   3243 
   3244  allmulti:
   3245 	ifp->if_flags |= IFF_ALLMULTI;
   3246 	sc->sc_rctl |= RCTL_MPE;
   3247 
   3248  setit:
   3249 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3250 }
   3251 
   3252 /* Reset and init related */
   3253 
   3254 static void
   3255 wm_set_vlan(struct wm_softc *sc)
   3256 {
   3257 
   3258 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3259 		device_xname(sc->sc_dev), __func__));
   3260 	/* Deal with VLAN enables. */
   3261 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3262 		sc->sc_ctrl |= CTRL_VME;
   3263 	else
   3264 		sc->sc_ctrl &= ~CTRL_VME;
   3265 
   3266 	/* Write the control registers. */
   3267 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3268 }
   3269 
   3270 static void
   3271 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3272 {
   3273 	uint32_t gcr;
   3274 	pcireg_t ctrl2;
   3275 
   3276 	gcr = CSR_READ(sc, WMREG_GCR);
   3277 
   3278 	/* Only take action if timeout value is defaulted to 0 */
   3279 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3280 		goto out;
   3281 
   3282 	if ((gcr & GCR_CAP_VER2) == 0) {
   3283 		gcr |= GCR_CMPL_TMOUT_10MS;
   3284 		goto out;
   3285 	}
   3286 
   3287 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3288 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3289 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3290 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3291 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3292 
   3293 out:
   3294 	/* Disable completion timeout resend */
   3295 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3296 
   3297 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3298 }
   3299 
   3300 void
   3301 wm_get_auto_rd_done(struct wm_softc *sc)
   3302 {
   3303 	int i;
   3304 
   3305 	/* wait for eeprom to reload */
   3306 	switch (sc->sc_type) {
   3307 	case WM_T_82571:
   3308 	case WM_T_82572:
   3309 	case WM_T_82573:
   3310 	case WM_T_82574:
   3311 	case WM_T_82583:
   3312 	case WM_T_82575:
   3313 	case WM_T_82576:
   3314 	case WM_T_82580:
   3315 	case WM_T_I350:
   3316 	case WM_T_I354:
   3317 	case WM_T_I210:
   3318 	case WM_T_I211:
   3319 	case WM_T_80003:
   3320 	case WM_T_ICH8:
   3321 	case WM_T_ICH9:
   3322 		for (i = 0; i < 10; i++) {
   3323 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3324 				break;
   3325 			delay(1000);
   3326 		}
   3327 		if (i == 10) {
   3328 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3329 			    "complete\n", device_xname(sc->sc_dev));
   3330 		}
   3331 		break;
   3332 	default:
   3333 		break;
   3334 	}
   3335 }
   3336 
   3337 void
   3338 wm_lan_init_done(struct wm_softc *sc)
   3339 {
   3340 	uint32_t reg = 0;
   3341 	int i;
   3342 
   3343 	/* wait for eeprom to reload */
   3344 	switch (sc->sc_type) {
   3345 	case WM_T_ICH10:
   3346 	case WM_T_PCH:
   3347 	case WM_T_PCH2:
   3348 	case WM_T_PCH_LPT:
   3349 	case WM_T_PCH_SPT:
   3350 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3351 			reg = CSR_READ(sc, WMREG_STATUS);
   3352 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3353 				break;
   3354 			delay(100);
   3355 		}
   3356 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3357 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3358 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3359 		}
   3360 		break;
   3361 	default:
   3362 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3363 		    __func__);
   3364 		break;
   3365 	}
   3366 
   3367 	reg &= ~STATUS_LAN_INIT_DONE;
   3368 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3369 }
   3370 
   3371 void
   3372 wm_get_cfg_done(struct wm_softc *sc)
   3373 {
   3374 	int mask;
   3375 	uint32_t reg;
   3376 	int i;
   3377 
   3378 	/* wait for eeprom to reload */
   3379 	switch (sc->sc_type) {
   3380 	case WM_T_82542_2_0:
   3381 	case WM_T_82542_2_1:
   3382 		/* null */
   3383 		break;
   3384 	case WM_T_82543:
   3385 	case WM_T_82544:
   3386 	case WM_T_82540:
   3387 	case WM_T_82545:
   3388 	case WM_T_82545_3:
   3389 	case WM_T_82546:
   3390 	case WM_T_82546_3:
   3391 	case WM_T_82541:
   3392 	case WM_T_82541_2:
   3393 	case WM_T_82547:
   3394 	case WM_T_82547_2:
   3395 	case WM_T_82573:
   3396 	case WM_T_82574:
   3397 	case WM_T_82583:
   3398 		/* generic */
   3399 		delay(10*1000);
   3400 		break;
   3401 	case WM_T_80003:
   3402 	case WM_T_82571:
   3403 	case WM_T_82572:
   3404 	case WM_T_82575:
   3405 	case WM_T_82576:
   3406 	case WM_T_82580:
   3407 	case WM_T_I350:
   3408 	case WM_T_I354:
   3409 	case WM_T_I210:
   3410 	case WM_T_I211:
   3411 		if (sc->sc_type == WM_T_82571) {
   3412 			/* Only 82571 shares port 0 */
   3413 			mask = EEMNGCTL_CFGDONE_0;
   3414 		} else
   3415 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3416 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3417 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3418 				break;
   3419 			delay(1000);
   3420 		}
   3421 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3422 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3423 				device_xname(sc->sc_dev), __func__));
   3424 		}
   3425 		break;
   3426 	case WM_T_ICH8:
   3427 	case WM_T_ICH9:
   3428 	case WM_T_ICH10:
   3429 	case WM_T_PCH:
   3430 	case WM_T_PCH2:
   3431 	case WM_T_PCH_LPT:
   3432 	case WM_T_PCH_SPT:
   3433 		delay(10*1000);
   3434 		if (sc->sc_type >= WM_T_ICH10)
   3435 			wm_lan_init_done(sc);
   3436 		else
   3437 			wm_get_auto_rd_done(sc);
   3438 
   3439 		reg = CSR_READ(sc, WMREG_STATUS);
   3440 		if ((reg & STATUS_PHYRA) != 0)
   3441 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3442 		break;
   3443 	default:
   3444 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3445 		    __func__);
   3446 		break;
   3447 	}
   3448 }
   3449 
   3450 /* Init hardware bits */
   3451 void
   3452 wm_initialize_hardware_bits(struct wm_softc *sc)
   3453 {
   3454 	uint32_t tarc0, tarc1, reg;
   3455 
   3456 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3457 		device_xname(sc->sc_dev), __func__));
   3458 	/* For 82571 variant, 80003 and ICHs */
   3459 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3460 	    || (sc->sc_type >= WM_T_80003)) {
   3461 
   3462 		/* Transmit Descriptor Control 0 */
   3463 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3464 		reg |= TXDCTL_COUNT_DESC;
   3465 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3466 
   3467 		/* Transmit Descriptor Control 1 */
   3468 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3469 		reg |= TXDCTL_COUNT_DESC;
   3470 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3471 
   3472 		/* TARC0 */
   3473 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3474 		switch (sc->sc_type) {
   3475 		case WM_T_82571:
   3476 		case WM_T_82572:
   3477 		case WM_T_82573:
   3478 		case WM_T_82574:
   3479 		case WM_T_82583:
   3480 		case WM_T_80003:
   3481 			/* Clear bits 30..27 */
   3482 			tarc0 &= ~__BITS(30, 27);
   3483 			break;
   3484 		default:
   3485 			break;
   3486 		}
   3487 
   3488 		switch (sc->sc_type) {
   3489 		case WM_T_82571:
   3490 		case WM_T_82572:
   3491 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3492 
   3493 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3494 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3495 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3496 			/* 8257[12] Errata No.7 */
   3497 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3498 
   3499 			/* TARC1 bit 28 */
   3500 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3501 				tarc1 &= ~__BIT(28);
   3502 			else
   3503 				tarc1 |= __BIT(28);
   3504 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3505 
   3506 			/*
   3507 			 * 8257[12] Errata No.13
   3508 			 * Disable Dyamic Clock Gating.
   3509 			 */
   3510 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3511 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3512 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3513 			break;
   3514 		case WM_T_82573:
   3515 		case WM_T_82574:
   3516 		case WM_T_82583:
   3517 			if ((sc->sc_type == WM_T_82574)
   3518 			    || (sc->sc_type == WM_T_82583))
   3519 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3520 
   3521 			/* Extended Device Control */
   3522 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3523 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3524 			reg |= __BIT(22);	/* Set bit 22 */
   3525 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3526 
   3527 			/* Device Control */
   3528 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3529 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3530 
   3531 			/* PCIe Control Register */
   3532 			/*
   3533 			 * 82573 Errata (unknown).
   3534 			 *
   3535 			 * 82574 Errata 25 and 82583 Errata 12
   3536 			 * "Dropped Rx Packets":
   3537 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3538 			 */
   3539 			reg = CSR_READ(sc, WMREG_GCR);
   3540 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3541 			CSR_WRITE(sc, WMREG_GCR, reg);
   3542 
   3543 			if ((sc->sc_type == WM_T_82574)
   3544 			    || (sc->sc_type == WM_T_82583)) {
   3545 				/*
   3546 				 * Document says this bit must be set for
   3547 				 * proper operation.
   3548 				 */
   3549 				reg = CSR_READ(sc, WMREG_GCR);
   3550 				reg |= __BIT(22);
   3551 				CSR_WRITE(sc, WMREG_GCR, reg);
   3552 
   3553 				/*
   3554 				 * Apply workaround for hardware errata
   3555 				 * documented in errata docs Fixes issue where
   3556 				 * some error prone or unreliable PCIe
   3557 				 * completions are occurring, particularly
   3558 				 * with ASPM enabled. Without fix, issue can
   3559 				 * cause Tx timeouts.
   3560 				 */
   3561 				reg = CSR_READ(sc, WMREG_GCR2);
   3562 				reg |= __BIT(0);
   3563 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3564 			}
   3565 			break;
   3566 		case WM_T_80003:
   3567 			/* TARC0 */
   3568 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3569 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3570 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3571 
   3572 			/* TARC1 bit 28 */
   3573 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3574 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3575 				tarc1 &= ~__BIT(28);
   3576 			else
   3577 				tarc1 |= __BIT(28);
   3578 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3579 			break;
   3580 		case WM_T_ICH8:
   3581 		case WM_T_ICH9:
   3582 		case WM_T_ICH10:
   3583 		case WM_T_PCH:
   3584 		case WM_T_PCH2:
   3585 		case WM_T_PCH_LPT:
   3586 		case WM_T_PCH_SPT:
   3587 			/* TARC0 */
   3588 			if ((sc->sc_type == WM_T_ICH8)
   3589 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3590 				/* Set TARC0 bits 29 and 28 */
   3591 				tarc0 |= __BITS(29, 28);
   3592 			}
   3593 			/* Set TARC0 bits 23,24,26,27 */
   3594 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3595 
   3596 			/* CTRL_EXT */
   3597 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3598 			reg |= __BIT(22);	/* Set bit 22 */
   3599 			/*
   3600 			 * Enable PHY low-power state when MAC is at D3
   3601 			 * w/o WoL
   3602 			 */
   3603 			if (sc->sc_type >= WM_T_PCH)
   3604 				reg |= CTRL_EXT_PHYPDEN;
   3605 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3606 
   3607 			/* TARC1 */
   3608 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3609 			/* bit 28 */
   3610 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3611 				tarc1 &= ~__BIT(28);
   3612 			else
   3613 				tarc1 |= __BIT(28);
   3614 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3615 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3616 
   3617 			/* Device Status */
   3618 			if (sc->sc_type == WM_T_ICH8) {
   3619 				reg = CSR_READ(sc, WMREG_STATUS);
   3620 				reg &= ~__BIT(31);
   3621 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3622 
   3623 			}
   3624 
   3625 			/* IOSFPC */
   3626 			if (sc->sc_type == WM_T_PCH_SPT) {
   3627 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3628 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3629 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3630 			}
   3631 			/*
   3632 			 * Work-around descriptor data corruption issue during
   3633 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3634 			 * capability.
   3635 			 */
   3636 			reg = CSR_READ(sc, WMREG_RFCTL);
   3637 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3638 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3639 			break;
   3640 		default:
   3641 			break;
   3642 		}
   3643 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3644 
   3645 		/*
   3646 		 * 8257[12] Errata No.52 and some others.
   3647 		 * Avoid RSS Hash Value bug.
   3648 		 */
   3649 		switch (sc->sc_type) {
   3650 		case WM_T_82571:
   3651 		case WM_T_82572:
   3652 		case WM_T_82573:
   3653 		case WM_T_80003:
   3654 		case WM_T_ICH8:
   3655 			reg = CSR_READ(sc, WMREG_RFCTL);
   3656 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3657 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3658 			break;
   3659 		default:
   3660 			break;
   3661 		}
   3662 	}
   3663 }
   3664 
   3665 static uint32_t
   3666 wm_rxpbs_adjust_82580(uint32_t val)
   3667 {
   3668 	uint32_t rv = 0;
   3669 
   3670 	if (val < __arraycount(wm_82580_rxpbs_table))
   3671 		rv = wm_82580_rxpbs_table[val];
   3672 
   3673 	return rv;
   3674 }
   3675 
   3676 /*
   3677  * wm_reset:
   3678  *
   3679  *	Reset the i82542 chip.
   3680  */
   3681 static void
   3682 wm_reset(struct wm_softc *sc)
   3683 {
   3684 	int phy_reset = 0;
   3685 	int i, error = 0;
   3686 	uint32_t reg, mask;
   3687 
   3688 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3689 		device_xname(sc->sc_dev), __func__));
   3690 	/*
   3691 	 * Allocate on-chip memory according to the MTU size.
   3692 	 * The Packet Buffer Allocation register must be written
   3693 	 * before the chip is reset.
   3694 	 */
   3695 	switch (sc->sc_type) {
   3696 	case WM_T_82547:
   3697 	case WM_T_82547_2:
   3698 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3699 		    PBA_22K : PBA_30K;
   3700 		for (i = 0; i < sc->sc_nqueues; i++) {
   3701 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3702 			txq->txq_fifo_head = 0;
   3703 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3704 			txq->txq_fifo_size =
   3705 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3706 			txq->txq_fifo_stall = 0;
   3707 		}
   3708 		break;
   3709 	case WM_T_82571:
   3710 	case WM_T_82572:
   3711 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3712 	case WM_T_80003:
   3713 		sc->sc_pba = PBA_32K;
   3714 		break;
   3715 	case WM_T_82573:
   3716 		sc->sc_pba = PBA_12K;
   3717 		break;
   3718 	case WM_T_82574:
   3719 	case WM_T_82583:
   3720 		sc->sc_pba = PBA_20K;
   3721 		break;
   3722 	case WM_T_82576:
   3723 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3724 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3725 		break;
   3726 	case WM_T_82580:
   3727 	case WM_T_I350:
   3728 	case WM_T_I354:
   3729 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3730 		break;
   3731 	case WM_T_I210:
   3732 	case WM_T_I211:
   3733 		sc->sc_pba = PBA_34K;
   3734 		break;
   3735 	case WM_T_ICH8:
   3736 		/* Workaround for a bit corruption issue in FIFO memory */
   3737 		sc->sc_pba = PBA_8K;
   3738 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3739 		break;
   3740 	case WM_T_ICH9:
   3741 	case WM_T_ICH10:
   3742 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3743 		    PBA_14K : PBA_10K;
   3744 		break;
   3745 	case WM_T_PCH:
   3746 	case WM_T_PCH2:
   3747 	case WM_T_PCH_LPT:
   3748 	case WM_T_PCH_SPT:
   3749 		sc->sc_pba = PBA_26K;
   3750 		break;
   3751 	default:
   3752 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3753 		    PBA_40K : PBA_48K;
   3754 		break;
   3755 	}
   3756 	/*
   3757 	 * Only old or non-multiqueue devices have the PBA register
   3758 	 * XXX Need special handling for 82575.
   3759 	 */
   3760 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3761 	    || (sc->sc_type == WM_T_82575))
   3762 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3763 
   3764 	/* Prevent the PCI-E bus from sticking */
   3765 	if (sc->sc_flags & WM_F_PCIE) {
   3766 		int timeout = 800;
   3767 
   3768 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3769 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3770 
   3771 		while (timeout--) {
   3772 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3773 			    == 0)
   3774 				break;
   3775 			delay(100);
   3776 		}
   3777 	}
   3778 
   3779 	/* Set the completion timeout for interface */
   3780 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3781 	    || (sc->sc_type == WM_T_82580)
   3782 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3783 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3784 		wm_set_pcie_completion_timeout(sc);
   3785 
   3786 	/* Clear interrupt */
   3787 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3788 	if (sc->sc_nintrs > 1) {
   3789 		if (sc->sc_type != WM_T_82574) {
   3790 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3791 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3792 		} else {
   3793 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3794 		}
   3795 	}
   3796 
   3797 	/* Stop the transmit and receive processes. */
   3798 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3799 	sc->sc_rctl &= ~RCTL_EN;
   3800 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3801 	CSR_WRITE_FLUSH(sc);
   3802 
   3803 	/* XXX set_tbi_sbp_82543() */
   3804 
   3805 	delay(10*1000);
   3806 
   3807 	/* Must acquire the MDIO ownership before MAC reset */
   3808 	switch (sc->sc_type) {
   3809 	case WM_T_82573:
   3810 	case WM_T_82574:
   3811 	case WM_T_82583:
   3812 		error = wm_get_hw_semaphore_82573(sc);
   3813 		break;
   3814 	default:
   3815 		break;
   3816 	}
   3817 
   3818 	/*
   3819 	 * 82541 Errata 29? & 82547 Errata 28?
   3820 	 * See also the description about PHY_RST bit in CTRL register
   3821 	 * in 8254x_GBe_SDM.pdf.
   3822 	 */
   3823 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3824 		CSR_WRITE(sc, WMREG_CTRL,
   3825 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3826 		CSR_WRITE_FLUSH(sc);
   3827 		delay(5000);
   3828 	}
   3829 
   3830 	switch (sc->sc_type) {
   3831 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3832 	case WM_T_82541:
   3833 	case WM_T_82541_2:
   3834 	case WM_T_82547:
   3835 	case WM_T_82547_2:
   3836 		/*
   3837 		 * On some chipsets, a reset through a memory-mapped write
   3838 		 * cycle can cause the chip to reset before completing the
   3839 		 * write cycle.  This causes major headache that can be
   3840 		 * avoided by issuing the reset via indirect register writes
   3841 		 * through I/O space.
   3842 		 *
   3843 		 * So, if we successfully mapped the I/O BAR at attach time,
   3844 		 * use that.  Otherwise, try our luck with a memory-mapped
   3845 		 * reset.
   3846 		 */
   3847 		if (sc->sc_flags & WM_F_IOH_VALID)
   3848 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3849 		else
   3850 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3851 		break;
   3852 	case WM_T_82545_3:
   3853 	case WM_T_82546_3:
   3854 		/* Use the shadow control register on these chips. */
   3855 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3856 		break;
   3857 	case WM_T_80003:
   3858 		mask = swfwphysem[sc->sc_funcid];
   3859 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3860 		wm_get_swfw_semaphore(sc, mask);
   3861 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3862 		wm_put_swfw_semaphore(sc, mask);
   3863 		break;
   3864 	case WM_T_ICH8:
   3865 	case WM_T_ICH9:
   3866 	case WM_T_ICH10:
   3867 	case WM_T_PCH:
   3868 	case WM_T_PCH2:
   3869 	case WM_T_PCH_LPT:
   3870 	case WM_T_PCH_SPT:
   3871 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3872 		if (wm_phy_resetisblocked(sc) == false) {
   3873 			/*
   3874 			 * Gate automatic PHY configuration by hardware on
   3875 			 * non-managed 82579
   3876 			 */
   3877 			if ((sc->sc_type == WM_T_PCH2)
   3878 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3879 				== 0))
   3880 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3881 
   3882 			reg |= CTRL_PHY_RESET;
   3883 			phy_reset = 1;
   3884 		} else
   3885 			printf("XXX reset is blocked!!!\n");
   3886 		wm_get_swfwhw_semaphore(sc);
   3887 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3888 		/* Don't insert a completion barrier when reset */
   3889 		delay(20*1000);
   3890 		wm_put_swfwhw_semaphore(sc);
   3891 		break;
   3892 	case WM_T_82580:
   3893 	case WM_T_I350:
   3894 	case WM_T_I354:
   3895 	case WM_T_I210:
   3896 	case WM_T_I211:
   3897 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3898 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3899 			CSR_WRITE_FLUSH(sc);
   3900 		delay(5000);
   3901 		break;
   3902 	case WM_T_82542_2_0:
   3903 	case WM_T_82542_2_1:
   3904 	case WM_T_82543:
   3905 	case WM_T_82540:
   3906 	case WM_T_82545:
   3907 	case WM_T_82546:
   3908 	case WM_T_82571:
   3909 	case WM_T_82572:
   3910 	case WM_T_82573:
   3911 	case WM_T_82574:
   3912 	case WM_T_82575:
   3913 	case WM_T_82576:
   3914 	case WM_T_82583:
   3915 	default:
   3916 		/* Everything else can safely use the documented method. */
   3917 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3918 		break;
   3919 	}
   3920 
   3921 	/* Must release the MDIO ownership after MAC reset */
   3922 	switch (sc->sc_type) {
   3923 	case WM_T_82573:
   3924 	case WM_T_82574:
   3925 	case WM_T_82583:
   3926 		if (error == 0)
   3927 			wm_put_hw_semaphore_82573(sc);
   3928 		break;
   3929 	default:
   3930 		break;
   3931 	}
   3932 
   3933 	if (phy_reset != 0)
   3934 		wm_get_cfg_done(sc);
   3935 
   3936 	/* reload EEPROM */
   3937 	switch (sc->sc_type) {
   3938 	case WM_T_82542_2_0:
   3939 	case WM_T_82542_2_1:
   3940 	case WM_T_82543:
   3941 	case WM_T_82544:
   3942 		delay(10);
   3943 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3944 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3945 		CSR_WRITE_FLUSH(sc);
   3946 		delay(2000);
   3947 		break;
   3948 	case WM_T_82540:
   3949 	case WM_T_82545:
   3950 	case WM_T_82545_3:
   3951 	case WM_T_82546:
   3952 	case WM_T_82546_3:
   3953 		delay(5*1000);
   3954 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3955 		break;
   3956 	case WM_T_82541:
   3957 	case WM_T_82541_2:
   3958 	case WM_T_82547:
   3959 	case WM_T_82547_2:
   3960 		delay(20000);
   3961 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3962 		break;
   3963 	case WM_T_82571:
   3964 	case WM_T_82572:
   3965 	case WM_T_82573:
   3966 	case WM_T_82574:
   3967 	case WM_T_82583:
   3968 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3969 			delay(10);
   3970 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3971 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3972 			CSR_WRITE_FLUSH(sc);
   3973 		}
   3974 		/* check EECD_EE_AUTORD */
   3975 		wm_get_auto_rd_done(sc);
   3976 		/*
   3977 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3978 		 * is set.
   3979 		 */
   3980 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3981 		    || (sc->sc_type == WM_T_82583))
   3982 			delay(25*1000);
   3983 		break;
   3984 	case WM_T_82575:
   3985 	case WM_T_82576:
   3986 	case WM_T_82580:
   3987 	case WM_T_I350:
   3988 	case WM_T_I354:
   3989 	case WM_T_I210:
   3990 	case WM_T_I211:
   3991 	case WM_T_80003:
   3992 		/* check EECD_EE_AUTORD */
   3993 		wm_get_auto_rd_done(sc);
   3994 		break;
   3995 	case WM_T_ICH8:
   3996 	case WM_T_ICH9:
   3997 	case WM_T_ICH10:
   3998 	case WM_T_PCH:
   3999 	case WM_T_PCH2:
   4000 	case WM_T_PCH_LPT:
   4001 	case WM_T_PCH_SPT:
   4002 		break;
   4003 	default:
   4004 		panic("%s: unknown type\n", __func__);
   4005 	}
   4006 
   4007 	/* Check whether EEPROM is present or not */
   4008 	switch (sc->sc_type) {
   4009 	case WM_T_82575:
   4010 	case WM_T_82576:
   4011 	case WM_T_82580:
   4012 	case WM_T_I350:
   4013 	case WM_T_I354:
   4014 	case WM_T_ICH8:
   4015 	case WM_T_ICH9:
   4016 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4017 			/* Not found */
   4018 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4019 			if (sc->sc_type == WM_T_82575)
   4020 				wm_reset_init_script_82575(sc);
   4021 		}
   4022 		break;
   4023 	default:
   4024 		break;
   4025 	}
   4026 
   4027 	if ((sc->sc_type == WM_T_82580)
   4028 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4029 		/* clear global device reset status bit */
   4030 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4031 	}
   4032 
   4033 	/* Clear any pending interrupt events. */
   4034 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4035 	reg = CSR_READ(sc, WMREG_ICR);
   4036 	if (sc->sc_nintrs > 1) {
   4037 		if (sc->sc_type != WM_T_82574) {
   4038 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4039 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4040 		} else
   4041 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4042 	}
   4043 
   4044 	/* reload sc_ctrl */
   4045 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4046 
   4047 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4048 		wm_set_eee_i350(sc);
   4049 
   4050 	/* dummy read from WUC */
   4051 	if (sc->sc_type == WM_T_PCH)
   4052 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4053 	/*
   4054 	 * For PCH, this write will make sure that any noise will be detected
   4055 	 * as a CRC error and be dropped rather than show up as a bad packet
   4056 	 * to the DMA engine
   4057 	 */
   4058 	if (sc->sc_type == WM_T_PCH)
   4059 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4060 
   4061 	if (sc->sc_type >= WM_T_82544)
   4062 		CSR_WRITE(sc, WMREG_WUC, 0);
   4063 
   4064 	wm_reset_mdicnfg_82580(sc);
   4065 
   4066 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4067 		wm_pll_workaround_i210(sc);
   4068 }
   4069 
   4070 /*
   4071  * wm_add_rxbuf:
   4072  *
   4073  *	Add a receive buffer to the indiciated descriptor.
   4074  */
   4075 static int
   4076 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4077 {
   4078 	struct wm_softc *sc = rxq->rxq_sc;
   4079 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4080 	struct mbuf *m;
   4081 	int error;
   4082 
   4083 	KASSERT(WM_RX_LOCKED(rxq));
   4084 
   4085 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4086 	if (m == NULL)
   4087 		return ENOBUFS;
   4088 
   4089 	MCLGET(m, M_DONTWAIT);
   4090 	if ((m->m_flags & M_EXT) == 0) {
   4091 		m_freem(m);
   4092 		return ENOBUFS;
   4093 	}
   4094 
   4095 	if (rxs->rxs_mbuf != NULL)
   4096 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4097 
   4098 	rxs->rxs_mbuf = m;
   4099 
   4100 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4101 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4102 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4103 	if (error) {
   4104 		/* XXX XXX XXX */
   4105 		aprint_error_dev(sc->sc_dev,
   4106 		    "unable to load rx DMA map %d, error = %d\n",
   4107 		    idx, error);
   4108 		panic("wm_add_rxbuf");
   4109 	}
   4110 
   4111 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4112 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4113 
   4114 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4115 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4116 			wm_init_rxdesc(rxq, idx);
   4117 	} else
   4118 		wm_init_rxdesc(rxq, idx);
   4119 
   4120 	return 0;
   4121 }
   4122 
   4123 /*
   4124  * wm_rxdrain:
   4125  *
   4126  *	Drain the receive queue.
   4127  */
   4128 static void
   4129 wm_rxdrain(struct wm_rxqueue *rxq)
   4130 {
   4131 	struct wm_softc *sc = rxq->rxq_sc;
   4132 	struct wm_rxsoft *rxs;
   4133 	int i;
   4134 
   4135 	KASSERT(WM_RX_LOCKED(rxq));
   4136 
   4137 	for (i = 0; i < WM_NRXDESC; i++) {
   4138 		rxs = &rxq->rxq_soft[i];
   4139 		if (rxs->rxs_mbuf != NULL) {
   4140 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4141 			m_freem(rxs->rxs_mbuf);
   4142 			rxs->rxs_mbuf = NULL;
   4143 		}
   4144 	}
   4145 }
   4146 
   4147 
   4148 /*
   4149  * XXX copy from FreeBSD's sys/net/rss_config.c
   4150  */
   4151 /*
   4152  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4153  * effectiveness may be limited by algorithm choice and available entropy
   4154  * during the boot.
   4155  *
   4156  * XXXRW: And that we don't randomize it yet!
   4157  *
   4158  * This is the default Microsoft RSS specification key which is also
   4159  * the Chelsio T5 firmware default key.
   4160  */
   4161 #define RSS_KEYSIZE 40
   4162 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4163 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4164 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4165 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4166 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4167 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4168 };
   4169 
   4170 /*
   4171  * Caller must pass an array of size sizeof(rss_key).
   4172  *
   4173  * XXX
   4174  * As if_ixgbe may use this function, this function should not be
   4175  * if_wm specific function.
   4176  */
   4177 static void
   4178 wm_rss_getkey(uint8_t *key)
   4179 {
   4180 
   4181 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4182 }
   4183 
   4184 /*
   4185  * Setup registers for RSS.
   4186  *
   4187  * XXX not yet VMDq support
   4188  */
   4189 static void
   4190 wm_init_rss(struct wm_softc *sc)
   4191 {
   4192 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4193 	int i;
   4194 
   4195 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4196 
   4197 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4198 		int qid, reta_ent;
   4199 
   4200 		qid  = i % sc->sc_nqueues;
   4201 		switch(sc->sc_type) {
   4202 		case WM_T_82574:
   4203 			reta_ent = __SHIFTIN(qid,
   4204 			    RETA_ENT_QINDEX_MASK_82574);
   4205 			break;
   4206 		case WM_T_82575:
   4207 			reta_ent = __SHIFTIN(qid,
   4208 			    RETA_ENT_QINDEX1_MASK_82575);
   4209 			break;
   4210 		default:
   4211 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4212 			break;
   4213 		}
   4214 
   4215 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4216 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4217 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4218 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4219 	}
   4220 
   4221 	wm_rss_getkey((uint8_t *)rss_key);
   4222 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4223 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4224 
   4225 	if (sc->sc_type == WM_T_82574)
   4226 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4227 	else
   4228 		mrqc = MRQC_ENABLE_RSS_MQ;
   4229 
   4230 	/* XXXX
   4231 	 * The same as FreeBSD igb.
   4232 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4233 	 */
   4234 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4235 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4236 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4237 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4238 
   4239 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4240 }
   4241 
   4242 /*
   4243  * Adjust TX and RX queue numbers which the system actulally uses.
   4244  *
   4245  * The numbers are affected by below parameters.
   4246  *     - The nubmer of hardware queues
   4247  *     - The number of MSI-X vectors (= "nvectors" argument)
   4248  *     - ncpu
   4249  */
   4250 static void
   4251 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4252 {
   4253 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4254 
   4255 	if (nvectors < 2) {
   4256 		sc->sc_nqueues = 1;
   4257 		return;
   4258 	}
   4259 
   4260 	switch(sc->sc_type) {
   4261 	case WM_T_82572:
   4262 		hw_ntxqueues = 2;
   4263 		hw_nrxqueues = 2;
   4264 		break;
   4265 	case WM_T_82574:
   4266 		hw_ntxqueues = 2;
   4267 		hw_nrxqueues = 2;
   4268 		break;
   4269 	case WM_T_82575:
   4270 		hw_ntxqueues = 4;
   4271 		hw_nrxqueues = 4;
   4272 		break;
   4273 	case WM_T_82576:
   4274 		hw_ntxqueues = 16;
   4275 		hw_nrxqueues = 16;
   4276 		break;
   4277 	case WM_T_82580:
   4278 	case WM_T_I350:
   4279 	case WM_T_I354:
   4280 		hw_ntxqueues = 8;
   4281 		hw_nrxqueues = 8;
   4282 		break;
   4283 	case WM_T_I210:
   4284 		hw_ntxqueues = 4;
   4285 		hw_nrxqueues = 4;
   4286 		break;
   4287 	case WM_T_I211:
   4288 		hw_ntxqueues = 2;
   4289 		hw_nrxqueues = 2;
   4290 		break;
   4291 		/*
   4292 		 * As below ethernet controllers does not support MSI-X,
   4293 		 * this driver let them not use multiqueue.
   4294 		 *     - WM_T_80003
   4295 		 *     - WM_T_ICH8
   4296 		 *     - WM_T_ICH9
   4297 		 *     - WM_T_ICH10
   4298 		 *     - WM_T_PCH
   4299 		 *     - WM_T_PCH2
   4300 		 *     - WM_T_PCH_LPT
   4301 		 */
   4302 	default:
   4303 		hw_ntxqueues = 1;
   4304 		hw_nrxqueues = 1;
   4305 		break;
   4306 	}
   4307 
   4308 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4309 
   4310 	/*
   4311 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4312 	 * the number of queues used actually.
   4313 	 */
   4314 	if (nvectors < hw_nqueues + 1) {
   4315 		sc->sc_nqueues = nvectors - 1;
   4316 	} else {
   4317 		sc->sc_nqueues = hw_nqueues;
   4318 	}
   4319 
   4320 	/*
   4321 	 * As queues more then cpus cannot improve scaling, we limit
   4322 	 * the number of queues used actually.
   4323 	 */
   4324 	if (ncpu < sc->sc_nqueues)
   4325 		sc->sc_nqueues = ncpu;
   4326 }
   4327 
   4328 /*
   4329  * Both single interrupt MSI and INTx can use this function.
   4330  */
   4331 static int
   4332 wm_setup_legacy(struct wm_softc *sc)
   4333 {
   4334 	pci_chipset_tag_t pc = sc->sc_pc;
   4335 	const char *intrstr = NULL;
   4336 	char intrbuf[PCI_INTRSTR_LEN];
   4337 	int error;
   4338 
   4339 	error = wm_alloc_txrx_queues(sc);
   4340 	if (error) {
   4341 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4342 		    error);
   4343 		return ENOMEM;
   4344 	}
   4345 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4346 	    sizeof(intrbuf));
   4347 #ifdef WM_MPSAFE
   4348 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4349 #endif
   4350 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4351 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4352 	if (sc->sc_ihs[0] == NULL) {
   4353 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4354 		    (pci_intr_type(sc->sc_intrs[0])
   4355 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4356 		return ENOMEM;
   4357 	}
   4358 
   4359 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4360 	sc->sc_nintrs = 1;
   4361 	return 0;
   4362 }
   4363 
   4364 static int
   4365 wm_setup_msix(struct wm_softc *sc)
   4366 {
   4367 	void *vih;
   4368 	kcpuset_t *affinity;
   4369 	int qidx, error, intr_idx, txrx_established;
   4370 	pci_chipset_tag_t pc = sc->sc_pc;
   4371 	const char *intrstr = NULL;
   4372 	char intrbuf[PCI_INTRSTR_LEN];
   4373 	char intr_xname[INTRDEVNAMEBUF];
   4374 
   4375 	if (sc->sc_nqueues < ncpu) {
   4376 		/*
   4377 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4378 		 * interrupts start from CPU#1.
   4379 		 */
   4380 		sc->sc_affinity_offset = 1;
   4381 	} else {
   4382 		/*
   4383 		 * In this case, this device use all CPUs. So, we unify
   4384 		 * affinitied cpu_index to msix vector number for readability.
   4385 		 */
   4386 		sc->sc_affinity_offset = 0;
   4387 	}
   4388 
   4389 	error = wm_alloc_txrx_queues(sc);
   4390 	if (error) {
   4391 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4392 		    error);
   4393 		return ENOMEM;
   4394 	}
   4395 
   4396 	kcpuset_create(&affinity, false);
   4397 	intr_idx = 0;
   4398 
   4399 	/*
   4400 	 * TX and RX
   4401 	 */
   4402 	txrx_established = 0;
   4403 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4404 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4405 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4406 
   4407 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4408 		    sizeof(intrbuf));
   4409 #ifdef WM_MPSAFE
   4410 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4411 		    PCI_INTR_MPSAFE, true);
   4412 #endif
   4413 		memset(intr_xname, 0, sizeof(intr_xname));
   4414 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4415 		    device_xname(sc->sc_dev), qidx);
   4416 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4417 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4418 		if (vih == NULL) {
   4419 			aprint_error_dev(sc->sc_dev,
   4420 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4421 			    intrstr ? " at " : "",
   4422 			    intrstr ? intrstr : "");
   4423 
   4424 			goto fail;
   4425 		}
   4426 		kcpuset_zero(affinity);
   4427 		/* Round-robin affinity */
   4428 		kcpuset_set(affinity, affinity_to);
   4429 		error = interrupt_distribute(vih, affinity, NULL);
   4430 		if (error == 0) {
   4431 			aprint_normal_dev(sc->sc_dev,
   4432 			    "for TX and RX interrupting at %s affinity to %u\n",
   4433 			    intrstr, affinity_to);
   4434 		} else {
   4435 			aprint_normal_dev(sc->sc_dev,
   4436 			    "for TX and RX interrupting at %s\n", intrstr);
   4437 		}
   4438 		sc->sc_ihs[intr_idx] = vih;
   4439 		wmq->wmq_id= qidx;
   4440 		wmq->wmq_intr_idx = intr_idx;
   4441 
   4442 		txrx_established++;
   4443 		intr_idx++;
   4444 	}
   4445 
   4446 	/*
   4447 	 * LINK
   4448 	 */
   4449 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4450 	    sizeof(intrbuf));
   4451 #ifdef WM_MPSAFE
   4452 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4453 #endif
   4454 	memset(intr_xname, 0, sizeof(intr_xname));
   4455 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4456 	    device_xname(sc->sc_dev));
   4457 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4458 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4459 	if (vih == NULL) {
   4460 		aprint_error_dev(sc->sc_dev,
   4461 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4462 		    intrstr ? " at " : "",
   4463 		    intrstr ? intrstr : "");
   4464 
   4465 		goto fail;
   4466 	}
   4467 	/* keep default affinity to LINK interrupt */
   4468 	aprint_normal_dev(sc->sc_dev,
   4469 	    "for LINK interrupting at %s\n", intrstr);
   4470 	sc->sc_ihs[intr_idx] = vih;
   4471 	sc->sc_link_intr_idx = intr_idx;
   4472 
   4473 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4474 	kcpuset_destroy(affinity);
   4475 	return 0;
   4476 
   4477  fail:
   4478 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4479 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4480 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4481 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4482 	}
   4483 
   4484 	kcpuset_destroy(affinity);
   4485 	return ENOMEM;
   4486 }
   4487 
   4488 /*
   4489  * wm_init:		[ifnet interface function]
   4490  *
   4491  *	Initialize the interface.
   4492  */
   4493 static int
   4494 wm_init(struct ifnet *ifp)
   4495 {
   4496 	struct wm_softc *sc = ifp->if_softc;
   4497 	int ret;
   4498 
   4499 	WM_CORE_LOCK(sc);
   4500 	ret = wm_init_locked(ifp);
   4501 	WM_CORE_UNLOCK(sc);
   4502 
   4503 	return ret;
   4504 }
   4505 
   4506 static int
   4507 wm_init_locked(struct ifnet *ifp)
   4508 {
   4509 	struct wm_softc *sc = ifp->if_softc;
   4510 	int i, j, trynum, error = 0;
   4511 	uint32_t reg;
   4512 
   4513 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4514 		device_xname(sc->sc_dev), __func__));
   4515 	KASSERT(WM_CORE_LOCKED(sc));
   4516 	/*
   4517 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4518 	 * There is a small but measurable benefit to avoiding the adjusment
   4519 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4520 	 * on such platforms.  One possibility is that the DMA itself is
   4521 	 * slightly more efficient if the front of the entire packet (instead
   4522 	 * of the front of the headers) is aligned.
   4523 	 *
   4524 	 * Note we must always set align_tweak to 0 if we are using
   4525 	 * jumbo frames.
   4526 	 */
   4527 #ifdef __NO_STRICT_ALIGNMENT
   4528 	sc->sc_align_tweak = 0;
   4529 #else
   4530 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4531 		sc->sc_align_tweak = 0;
   4532 	else
   4533 		sc->sc_align_tweak = 2;
   4534 #endif /* __NO_STRICT_ALIGNMENT */
   4535 
   4536 	/* Cancel any pending I/O. */
   4537 	wm_stop_locked(ifp, 0);
   4538 
   4539 	/* update statistics before reset */
   4540 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4541 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4542 
   4543 	/* Reset the chip to a known state. */
   4544 	wm_reset(sc);
   4545 
   4546 	switch (sc->sc_type) {
   4547 	case WM_T_82571:
   4548 	case WM_T_82572:
   4549 	case WM_T_82573:
   4550 	case WM_T_82574:
   4551 	case WM_T_82583:
   4552 	case WM_T_80003:
   4553 	case WM_T_ICH8:
   4554 	case WM_T_ICH9:
   4555 	case WM_T_ICH10:
   4556 	case WM_T_PCH:
   4557 	case WM_T_PCH2:
   4558 	case WM_T_PCH_LPT:
   4559 	case WM_T_PCH_SPT:
   4560 		/* AMT based hardware can now take control from firmware */
   4561 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4562 			wm_get_hw_control(sc);
   4563 		break;
   4564 	default:
   4565 		break;
   4566 	}
   4567 
   4568 	/* Init hardware bits */
   4569 	wm_initialize_hardware_bits(sc);
   4570 
   4571 	/* Reset the PHY. */
   4572 	if (sc->sc_flags & WM_F_HAS_MII)
   4573 		wm_gmii_reset(sc);
   4574 
   4575 	/* Calculate (E)ITR value */
   4576 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4577 		sc->sc_itr = 450;	/* For EITR */
   4578 	} else if (sc->sc_type >= WM_T_82543) {
   4579 		/*
   4580 		 * Set up the interrupt throttling register (units of 256ns)
   4581 		 * Note that a footnote in Intel's documentation says this
   4582 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4583 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4584 		 * that that is also true for the 1024ns units of the other
   4585 		 * interrupt-related timer registers -- so, really, we ought
   4586 		 * to divide this value by 4 when the link speed is low.
   4587 		 *
   4588 		 * XXX implement this division at link speed change!
   4589 		 */
   4590 
   4591 		/*
   4592 		 * For N interrupts/sec, set this value to:
   4593 		 * 1000000000 / (N * 256).  Note that we set the
   4594 		 * absolute and packet timer values to this value
   4595 		 * divided by 4 to get "simple timer" behavior.
   4596 		 */
   4597 
   4598 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4599 	}
   4600 
   4601 	error = wm_init_txrx_queues(sc);
   4602 	if (error)
   4603 		goto out;
   4604 
   4605 	/*
   4606 	 * Clear out the VLAN table -- we don't use it (yet).
   4607 	 */
   4608 	CSR_WRITE(sc, WMREG_VET, 0);
   4609 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4610 		trynum = 10; /* Due to hw errata */
   4611 	else
   4612 		trynum = 1;
   4613 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4614 		for (j = 0; j < trynum; j++)
   4615 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4616 
   4617 	/*
   4618 	 * Set up flow-control parameters.
   4619 	 *
   4620 	 * XXX Values could probably stand some tuning.
   4621 	 */
   4622 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4623 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4624 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4625 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4626 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4627 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4628 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4629 	}
   4630 
   4631 	sc->sc_fcrtl = FCRTL_DFLT;
   4632 	if (sc->sc_type < WM_T_82543) {
   4633 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4634 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4635 	} else {
   4636 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4637 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4638 	}
   4639 
   4640 	if (sc->sc_type == WM_T_80003)
   4641 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4642 	else
   4643 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4644 
   4645 	/* Writes the control register. */
   4646 	wm_set_vlan(sc);
   4647 
   4648 	if (sc->sc_flags & WM_F_HAS_MII) {
   4649 		int val;
   4650 
   4651 		switch (sc->sc_type) {
   4652 		case WM_T_80003:
   4653 		case WM_T_ICH8:
   4654 		case WM_T_ICH9:
   4655 		case WM_T_ICH10:
   4656 		case WM_T_PCH:
   4657 		case WM_T_PCH2:
   4658 		case WM_T_PCH_LPT:
   4659 		case WM_T_PCH_SPT:
   4660 			/*
   4661 			 * Set the mac to wait the maximum time between each
   4662 			 * iteration and increase the max iterations when
   4663 			 * polling the phy; this fixes erroneous timeouts at
   4664 			 * 10Mbps.
   4665 			 */
   4666 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4667 			    0xFFFF);
   4668 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4669 			val |= 0x3F;
   4670 			wm_kmrn_writereg(sc,
   4671 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4672 			break;
   4673 		default:
   4674 			break;
   4675 		}
   4676 
   4677 		if (sc->sc_type == WM_T_80003) {
   4678 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4679 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4680 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4681 
   4682 			/* Bypass RX and TX FIFO's */
   4683 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4684 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4685 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4686 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4687 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4688 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4689 		}
   4690 	}
   4691 #if 0
   4692 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4693 #endif
   4694 
   4695 	/* Set up checksum offload parameters. */
   4696 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4697 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4698 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4699 		reg |= RXCSUM_IPOFL;
   4700 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4701 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4702 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4703 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4704 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4705 
   4706 	/* Set up MSI-X */
   4707 	if (sc->sc_nintrs > 1) {
   4708 		uint32_t ivar;
   4709 		struct wm_queue *wmq;
   4710 		int qid, qintr_idx;
   4711 
   4712 		if (sc->sc_type == WM_T_82575) {
   4713 			/* Interrupt control */
   4714 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4715 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4716 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4717 
   4718 			/* TX and RX */
   4719 			for (i = 0; i < sc->sc_nqueues; i++) {
   4720 				wmq = &sc->sc_queue[i];
   4721 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4722 				    EITR_TX_QUEUE(wmq->wmq_id)
   4723 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4724 			}
   4725 			/* Link status */
   4726 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4727 			    EITR_OTHER);
   4728 		} else if (sc->sc_type == WM_T_82574) {
   4729 			/* Interrupt control */
   4730 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4731 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4732 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4733 
   4734 			ivar = 0;
   4735 			/* TX and RX */
   4736 			for (i = 0; i < sc->sc_nqueues; i++) {
   4737 				wmq = &sc->sc_queue[i];
   4738 				qid = wmq->wmq_id;
   4739 				qintr_idx = wmq->wmq_intr_idx;
   4740 
   4741 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4742 				    IVAR_TX_MASK_Q_82574(qid));
   4743 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4744 				    IVAR_RX_MASK_Q_82574(qid));
   4745 			}
   4746 			/* Link status */
   4747 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4748 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4749 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4750 		} else {
   4751 			/* Interrupt control */
   4752 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4753 			    | GPIE_EIAME | GPIE_PBA);
   4754 
   4755 			switch (sc->sc_type) {
   4756 			case WM_T_82580:
   4757 			case WM_T_I350:
   4758 			case WM_T_I354:
   4759 			case WM_T_I210:
   4760 			case WM_T_I211:
   4761 				/* TX and RX */
   4762 				for (i = 0; i < sc->sc_nqueues; i++) {
   4763 					wmq = &sc->sc_queue[i];
   4764 					qid = wmq->wmq_id;
   4765 					qintr_idx = wmq->wmq_intr_idx;
   4766 
   4767 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4768 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4769 					ivar |= __SHIFTIN((qintr_idx
   4770 						| IVAR_VALID),
   4771 					    IVAR_TX_MASK_Q(qid));
   4772 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4773 					ivar |= __SHIFTIN((qintr_idx
   4774 						| IVAR_VALID),
   4775 					    IVAR_RX_MASK_Q(qid));
   4776 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4777 				}
   4778 				break;
   4779 			case WM_T_82576:
   4780 				/* TX and RX */
   4781 				for (i = 0; i < sc->sc_nqueues; i++) {
   4782 					wmq = &sc->sc_queue[i];
   4783 					qid = wmq->wmq_id;
   4784 					qintr_idx = wmq->wmq_intr_idx;
   4785 
   4786 					ivar = CSR_READ(sc,
   4787 					    WMREG_IVAR_Q_82576(qid));
   4788 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4789 					ivar |= __SHIFTIN((qintr_idx
   4790 						| IVAR_VALID),
   4791 					    IVAR_TX_MASK_Q_82576(qid));
   4792 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4793 					ivar |= __SHIFTIN((qintr_idx
   4794 						| IVAR_VALID),
   4795 					    IVAR_RX_MASK_Q_82576(qid));
   4796 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4797 					    ivar);
   4798 				}
   4799 				break;
   4800 			default:
   4801 				break;
   4802 			}
   4803 
   4804 			/* Link status */
   4805 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4806 			    IVAR_MISC_OTHER);
   4807 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4808 		}
   4809 
   4810 		if (sc->sc_nqueues > 1) {
   4811 			wm_init_rss(sc);
   4812 
   4813 			/*
   4814 			** NOTE: Receive Full-Packet Checksum Offload
   4815 			** is mutually exclusive with Multiqueue. However
   4816 			** this is not the same as TCP/IP checksums which
   4817 			** still work.
   4818 			*/
   4819 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4820 			reg |= RXCSUM_PCSD;
   4821 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4822 		}
   4823 	}
   4824 
   4825 	/* Set up the interrupt registers. */
   4826 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4827 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4828 	    ICR_RXO | ICR_RXT0;
   4829 	if (sc->sc_nintrs > 1) {
   4830 		uint32_t mask;
   4831 		struct wm_queue *wmq;
   4832 
   4833 		switch (sc->sc_type) {
   4834 		case WM_T_82574:
   4835 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4836 			    WMREG_EIAC_82574_MSIX_MASK);
   4837 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4838 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4839 			break;
   4840 		default:
   4841 			if (sc->sc_type == WM_T_82575) {
   4842 				mask = 0;
   4843 				for (i = 0; i < sc->sc_nqueues; i++) {
   4844 					wmq = &sc->sc_queue[i];
   4845 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   4846 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   4847 				}
   4848 				mask |= EITR_OTHER;
   4849 			} else {
   4850 				mask = 0;
   4851 				for (i = 0; i < sc->sc_nqueues; i++) {
   4852 					wmq = &sc->sc_queue[i];
   4853 					mask |= 1 << wmq->wmq_intr_idx;
   4854 				}
   4855 				mask |= 1 << sc->sc_link_intr_idx;
   4856 			}
   4857 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4858 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4859 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4860 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4861 			break;
   4862 		}
   4863 	} else
   4864 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4865 
   4866 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4867 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4868 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4869 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4870 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4871 		reg |= KABGTXD_BGSQLBIAS;
   4872 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4873 	}
   4874 
   4875 	/* Set up the inter-packet gap. */
   4876 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4877 
   4878 	if (sc->sc_type >= WM_T_82543) {
   4879 		/*
   4880 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4881 		 * the multi queue function with MSI-X.
   4882 		 */
   4883 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4884 			int qidx;
   4885 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4886 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   4887 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   4888 				    sc->sc_itr);
   4889 			}
   4890 			/*
   4891 			 * Link interrupts occur much less than TX
   4892 			 * interrupts and RX interrupts. So, we don't
   4893 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4894 			 * FreeBSD's if_igb.
   4895 			 */
   4896 		} else
   4897 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4898 	}
   4899 
   4900 	/* Set the VLAN ethernetype. */
   4901 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4902 
   4903 	/*
   4904 	 * Set up the transmit control register; we start out with
   4905 	 * a collision distance suitable for FDX, but update it whe
   4906 	 * we resolve the media type.
   4907 	 */
   4908 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4909 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4910 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4911 	if (sc->sc_type >= WM_T_82571)
   4912 		sc->sc_tctl |= TCTL_MULR;
   4913 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4914 
   4915 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4916 		/* Write TDT after TCTL.EN is set. See the document. */
   4917 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4918 	}
   4919 
   4920 	if (sc->sc_type == WM_T_80003) {
   4921 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4922 		reg &= ~TCTL_EXT_GCEX_MASK;
   4923 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4924 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4925 	}
   4926 
   4927 	/* Set the media. */
   4928 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4929 		goto out;
   4930 
   4931 	/* Configure for OS presence */
   4932 	wm_init_manageability(sc);
   4933 
   4934 	/*
   4935 	 * Set up the receive control register; we actually program
   4936 	 * the register when we set the receive filter.  Use multicast
   4937 	 * address offset type 0.
   4938 	 *
   4939 	 * Only the i82544 has the ability to strip the incoming
   4940 	 * CRC, so we don't enable that feature.
   4941 	 */
   4942 	sc->sc_mchash_type = 0;
   4943 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4944 	    | RCTL_MO(sc->sc_mchash_type);
   4945 
   4946 	/*
   4947 	 * The I350 has a bug where it always strips the CRC whether
   4948 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4949 	 */
   4950 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4951 	    || (sc->sc_type == WM_T_I210))
   4952 		sc->sc_rctl |= RCTL_SECRC;
   4953 
   4954 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4955 	    && (ifp->if_mtu > ETHERMTU)) {
   4956 		sc->sc_rctl |= RCTL_LPE;
   4957 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4958 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4959 	}
   4960 
   4961 	if (MCLBYTES == 2048) {
   4962 		sc->sc_rctl |= RCTL_2k;
   4963 	} else {
   4964 		if (sc->sc_type >= WM_T_82543) {
   4965 			switch (MCLBYTES) {
   4966 			case 4096:
   4967 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4968 				break;
   4969 			case 8192:
   4970 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4971 				break;
   4972 			case 16384:
   4973 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4974 				break;
   4975 			default:
   4976 				panic("wm_init: MCLBYTES %d unsupported",
   4977 				    MCLBYTES);
   4978 				break;
   4979 			}
   4980 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4981 	}
   4982 
   4983 	/* Set the receive filter. */
   4984 	wm_set_filter(sc);
   4985 
   4986 	/* Enable ECC */
   4987 	switch (sc->sc_type) {
   4988 	case WM_T_82571:
   4989 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4990 		reg |= PBA_ECC_CORR_EN;
   4991 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4992 		break;
   4993 	case WM_T_PCH_LPT:
   4994 	case WM_T_PCH_SPT:
   4995 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4996 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4997 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4998 
   4999 		reg = CSR_READ(sc, WMREG_CTRL);
   5000 		reg |= CTRL_MEHE;
   5001 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5002 		break;
   5003 	default:
   5004 		break;
   5005 	}
   5006 
   5007 	/* On 575 and later set RDT only if RX enabled */
   5008 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5009 		int qidx;
   5010 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5011 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5012 			for (i = 0; i < WM_NRXDESC; i++) {
   5013 				WM_RX_LOCK(rxq);
   5014 				wm_init_rxdesc(rxq, i);
   5015 				WM_RX_UNLOCK(rxq);
   5016 
   5017 			}
   5018 		}
   5019 	}
   5020 
   5021 	sc->sc_stopping = false;
   5022 
   5023 	/* Start the one second link check clock. */
   5024 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5025 
   5026 	/* ...all done! */
   5027 	ifp->if_flags |= IFF_RUNNING;
   5028 	ifp->if_flags &= ~IFF_OACTIVE;
   5029 
   5030  out:
   5031 	sc->sc_if_flags = ifp->if_flags;
   5032 	if (error)
   5033 		log(LOG_ERR, "%s: interface not running\n",
   5034 		    device_xname(sc->sc_dev));
   5035 	return error;
   5036 }
   5037 
   5038 /*
   5039  * wm_stop:		[ifnet interface function]
   5040  *
   5041  *	Stop transmission on the interface.
   5042  */
   5043 static void
   5044 wm_stop(struct ifnet *ifp, int disable)
   5045 {
   5046 	struct wm_softc *sc = ifp->if_softc;
   5047 
   5048 	WM_CORE_LOCK(sc);
   5049 	wm_stop_locked(ifp, disable);
   5050 	WM_CORE_UNLOCK(sc);
   5051 }
   5052 
   5053 static void
   5054 wm_stop_locked(struct ifnet *ifp, int disable)
   5055 {
   5056 	struct wm_softc *sc = ifp->if_softc;
   5057 	struct wm_txsoft *txs;
   5058 	int i, qidx;
   5059 
   5060 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5061 		device_xname(sc->sc_dev), __func__));
   5062 	KASSERT(WM_CORE_LOCKED(sc));
   5063 
   5064 	sc->sc_stopping = true;
   5065 
   5066 	/* Stop the one second clock. */
   5067 	callout_stop(&sc->sc_tick_ch);
   5068 
   5069 	/* Stop the 82547 Tx FIFO stall check timer. */
   5070 	if (sc->sc_type == WM_T_82547)
   5071 		callout_stop(&sc->sc_txfifo_ch);
   5072 
   5073 	if (sc->sc_flags & WM_F_HAS_MII) {
   5074 		/* Down the MII. */
   5075 		mii_down(&sc->sc_mii);
   5076 	} else {
   5077 #if 0
   5078 		/* Should we clear PHY's status properly? */
   5079 		wm_reset(sc);
   5080 #endif
   5081 	}
   5082 
   5083 	/* Stop the transmit and receive processes. */
   5084 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5085 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5086 	sc->sc_rctl &= ~RCTL_EN;
   5087 
   5088 	/*
   5089 	 * Clear the interrupt mask to ensure the device cannot assert its
   5090 	 * interrupt line.
   5091 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5092 	 * service any currently pending or shared interrupt.
   5093 	 */
   5094 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5095 	sc->sc_icr = 0;
   5096 	if (sc->sc_nintrs > 1) {
   5097 		if (sc->sc_type != WM_T_82574) {
   5098 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5099 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5100 		} else
   5101 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5102 	}
   5103 
   5104 	/* Release any queued transmit buffers. */
   5105 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5106 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5107 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5108 		WM_TX_LOCK(txq);
   5109 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5110 			txs = &txq->txq_soft[i];
   5111 			if (txs->txs_mbuf != NULL) {
   5112 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5113 				m_freem(txs->txs_mbuf);
   5114 				txs->txs_mbuf = NULL;
   5115 			}
   5116 		}
   5117 		if (sc->sc_type == WM_T_PCH_SPT) {
   5118 			pcireg_t preg;
   5119 			uint32_t reg;
   5120 			int nexttx;
   5121 
   5122 			/* First, disable MULR fix in FEXTNVM11 */
   5123 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5124 			reg |= FEXTNVM11_DIS_MULRFIX;
   5125 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5126 
   5127 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5128 			    WM_PCI_DESCRING_STATUS);
   5129 			reg = CSR_READ(sc, WMREG_TDLEN(0));
   5130 			printf("XXX RST: FLUSH = %08x, len = %u\n",
   5131 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
   5132 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
   5133 			    && (reg != 0)) {
   5134 				/* TX */
   5135 				printf("XXX need TX flush (reg = %08x)\n",
   5136 				    preg);
   5137 				wm_init_tx_descs(sc, txq);
   5138 				wm_init_tx_regs(sc, wmq, txq);
   5139 				nexttx = txq->txq_next;
   5140 				wm_set_dma_addr(
   5141 					&txq->txq_descs[nexttx].wtx_addr,
   5142 					WM_CDTXADDR(txq, nexttx));
   5143 				txq->txq_descs[nexttx].wtx_cmdlen
   5144 				    = htole32(WTX_CMD_IFCS | 512);
   5145 				wm_cdtxsync(txq, nexttx, 1,
   5146 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5147 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5148 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5149 				CSR_WRITE_FLUSH(sc);
   5150 				delay(250);
   5151 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5152 			}
   5153 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5154 			    WM_PCI_DESCRING_STATUS);
   5155 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5156 				/* RX */
   5157 				printf("XXX need RX flush\n");
   5158 			}
   5159 		}
   5160 		WM_TX_UNLOCK(txq);
   5161 	}
   5162 
   5163 	/* Mark the interface as down and cancel the watchdog timer. */
   5164 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5165 	ifp->if_timer = 0;
   5166 
   5167 	if (disable) {
   5168 		for (i = 0; i < sc->sc_nqueues; i++) {
   5169 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5170 			WM_RX_LOCK(rxq);
   5171 			wm_rxdrain(rxq);
   5172 			WM_RX_UNLOCK(rxq);
   5173 		}
   5174 	}
   5175 
   5176 #if 0 /* notyet */
   5177 	if (sc->sc_type >= WM_T_82544)
   5178 		CSR_WRITE(sc, WMREG_WUC, 0);
   5179 #endif
   5180 }
   5181 
   5182 static void
   5183 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5184 {
   5185 	struct mbuf *m;
   5186 	int i;
   5187 
   5188 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5189 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5190 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5191 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5192 		    m->m_data, m->m_len, m->m_flags);
   5193 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5194 	    i, i == 1 ? "" : "s");
   5195 }
   5196 
   5197 /*
   5198  * wm_82547_txfifo_stall:
   5199  *
   5200  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5201  *	reset the FIFO pointers, and restart packet transmission.
   5202  */
   5203 static void
   5204 wm_82547_txfifo_stall(void *arg)
   5205 {
   5206 	struct wm_softc *sc = arg;
   5207 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5208 #ifndef WM_MPSAFE
   5209 	int s;
   5210 
   5211 	s = splnet();
   5212 #endif
   5213 	WM_TX_LOCK(txq);
   5214 
   5215 	if (sc->sc_stopping)
   5216 		goto out;
   5217 
   5218 	if (txq->txq_fifo_stall) {
   5219 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5220 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5221 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5222 			/*
   5223 			 * Packets have drained.  Stop transmitter, reset
   5224 			 * FIFO pointers, restart transmitter, and kick
   5225 			 * the packet queue.
   5226 			 */
   5227 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5228 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5229 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5230 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5231 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5232 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5233 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5234 			CSR_WRITE_FLUSH(sc);
   5235 
   5236 			txq->txq_fifo_head = 0;
   5237 			txq->txq_fifo_stall = 0;
   5238 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5239 		} else {
   5240 			/*
   5241 			 * Still waiting for packets to drain; try again in
   5242 			 * another tick.
   5243 			 */
   5244 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5245 		}
   5246 	}
   5247 
   5248 out:
   5249 	WM_TX_UNLOCK(txq);
   5250 #ifndef WM_MPSAFE
   5251 	splx(s);
   5252 #endif
   5253 }
   5254 
   5255 /*
   5256  * wm_82547_txfifo_bugchk:
   5257  *
   5258  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5259  *	prevent enqueueing a packet that would wrap around the end
   5260  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5261  *
   5262  *	We do this by checking the amount of space before the end
   5263  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5264  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5265  *	the internal FIFO pointers to the beginning, and restart
   5266  *	transmission on the interface.
   5267  */
   5268 #define	WM_FIFO_HDR		0x10
   5269 #define	WM_82547_PAD_LEN	0x3e0
   5270 static int
   5271 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5272 {
   5273 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5274 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5275 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5276 
   5277 	/* Just return if already stalled. */
   5278 	if (txq->txq_fifo_stall)
   5279 		return 1;
   5280 
   5281 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5282 		/* Stall only occurs in half-duplex mode. */
   5283 		goto send_packet;
   5284 	}
   5285 
   5286 	if (len >= WM_82547_PAD_LEN + space) {
   5287 		txq->txq_fifo_stall = 1;
   5288 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5289 		return 1;
   5290 	}
   5291 
   5292  send_packet:
   5293 	txq->txq_fifo_head += len;
   5294 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5295 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5296 
   5297 	return 0;
   5298 }
   5299 
   5300 static int
   5301 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5302 {
   5303 	int error;
   5304 
   5305 	/*
   5306 	 * Allocate the control data structures, and create and load the
   5307 	 * DMA map for it.
   5308 	 *
   5309 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5310 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5311 	 * both sets within the same 4G segment.
   5312 	 */
   5313 	if (sc->sc_type < WM_T_82544)
   5314 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5315 	else
   5316 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5317 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5318 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5319 	else
   5320 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5321 
   5322 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5323 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5324 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5325 		aprint_error_dev(sc->sc_dev,
   5326 		    "unable to allocate TX control data, error = %d\n",
   5327 		    error);
   5328 		goto fail_0;
   5329 	}
   5330 
   5331 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5332 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5333 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5334 		aprint_error_dev(sc->sc_dev,
   5335 		    "unable to map TX control data, error = %d\n", error);
   5336 		goto fail_1;
   5337 	}
   5338 
   5339 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5340 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5341 		aprint_error_dev(sc->sc_dev,
   5342 		    "unable to create TX control data DMA map, error = %d\n",
   5343 		    error);
   5344 		goto fail_2;
   5345 	}
   5346 
   5347 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5348 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5349 		aprint_error_dev(sc->sc_dev,
   5350 		    "unable to load TX control data DMA map, error = %d\n",
   5351 		    error);
   5352 		goto fail_3;
   5353 	}
   5354 
   5355 	return 0;
   5356 
   5357  fail_3:
   5358 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5359  fail_2:
   5360 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5361 	    WM_TXDESCS_SIZE(txq));
   5362  fail_1:
   5363 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5364  fail_0:
   5365 	return error;
   5366 }
   5367 
   5368 static void
   5369 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5370 {
   5371 
   5372 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5373 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5374 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5375 	    WM_TXDESCS_SIZE(txq));
   5376 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5377 }
   5378 
   5379 static int
   5380 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5381 {
   5382 	int error;
   5383 
   5384 	/*
   5385 	 * Allocate the control data structures, and create and load the
   5386 	 * DMA map for it.
   5387 	 *
   5388 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5389 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5390 	 * both sets within the same 4G segment.
   5391 	 */
   5392 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5393 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5394 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5395 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5396 		aprint_error_dev(sc->sc_dev,
   5397 		    "unable to allocate RX control data, error = %d\n",
   5398 		    error);
   5399 		goto fail_0;
   5400 	}
   5401 
   5402 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5403 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5404 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5405 		aprint_error_dev(sc->sc_dev,
   5406 		    "unable to map RX control data, error = %d\n", error);
   5407 		goto fail_1;
   5408 	}
   5409 
   5410 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5411 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5412 		aprint_error_dev(sc->sc_dev,
   5413 		    "unable to create RX control data DMA map, error = %d\n",
   5414 		    error);
   5415 		goto fail_2;
   5416 	}
   5417 
   5418 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5419 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5420 		aprint_error_dev(sc->sc_dev,
   5421 		    "unable to load RX control data DMA map, error = %d\n",
   5422 		    error);
   5423 		goto fail_3;
   5424 	}
   5425 
   5426 	return 0;
   5427 
   5428  fail_3:
   5429 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5430  fail_2:
   5431 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5432 	    rxq->rxq_desc_size);
   5433  fail_1:
   5434 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5435  fail_0:
   5436 	return error;
   5437 }
   5438 
   5439 static void
   5440 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5441 {
   5442 
   5443 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5444 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5445 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5446 	    rxq->rxq_desc_size);
   5447 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5448 }
   5449 
   5450 
   5451 static int
   5452 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5453 {
   5454 	int i, error;
   5455 
   5456 	/* Create the transmit buffer DMA maps. */
   5457 	WM_TXQUEUELEN(txq) =
   5458 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5459 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5460 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5461 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5462 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5463 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5464 			aprint_error_dev(sc->sc_dev,
   5465 			    "unable to create Tx DMA map %d, error = %d\n",
   5466 			    i, error);
   5467 			goto fail;
   5468 		}
   5469 	}
   5470 
   5471 	return 0;
   5472 
   5473  fail:
   5474 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5475 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5476 			bus_dmamap_destroy(sc->sc_dmat,
   5477 			    txq->txq_soft[i].txs_dmamap);
   5478 	}
   5479 	return error;
   5480 }
   5481 
   5482 static void
   5483 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5484 {
   5485 	int i;
   5486 
   5487 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5488 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5489 			bus_dmamap_destroy(sc->sc_dmat,
   5490 			    txq->txq_soft[i].txs_dmamap);
   5491 	}
   5492 }
   5493 
   5494 static int
   5495 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5496 {
   5497 	int i, error;
   5498 
   5499 	/* Create the receive buffer DMA maps. */
   5500 	for (i = 0; i < WM_NRXDESC; i++) {
   5501 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5502 			    MCLBYTES, 0, 0,
   5503 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5504 			aprint_error_dev(sc->sc_dev,
   5505 			    "unable to create Rx DMA map %d error = %d\n",
   5506 			    i, error);
   5507 			goto fail;
   5508 		}
   5509 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5510 	}
   5511 
   5512 	return 0;
   5513 
   5514  fail:
   5515 	for (i = 0; i < WM_NRXDESC; i++) {
   5516 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5517 			bus_dmamap_destroy(sc->sc_dmat,
   5518 			    rxq->rxq_soft[i].rxs_dmamap);
   5519 	}
   5520 	return error;
   5521 }
   5522 
   5523 static void
   5524 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5525 {
   5526 	int i;
   5527 
   5528 	for (i = 0; i < WM_NRXDESC; i++) {
   5529 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5530 			bus_dmamap_destroy(sc->sc_dmat,
   5531 			    rxq->rxq_soft[i].rxs_dmamap);
   5532 	}
   5533 }
   5534 
   5535 /*
   5536  * wm_alloc_quques:
   5537  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5538  */
   5539 static int
   5540 wm_alloc_txrx_queues(struct wm_softc *sc)
   5541 {
   5542 	int i, error, tx_done, rx_done;
   5543 
   5544 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5545 	    KM_SLEEP);
   5546 	if (sc->sc_queue == NULL) {
   5547 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5548 		error = ENOMEM;
   5549 		goto fail_0;
   5550 	}
   5551 
   5552 	/*
   5553 	 * For transmission
   5554 	 */
   5555 	error = 0;
   5556 	tx_done = 0;
   5557 	for (i = 0; i < sc->sc_nqueues; i++) {
   5558 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5559 		txq->txq_sc = sc;
   5560 #ifdef WM_MPSAFE
   5561 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5562 #else
   5563 		txq->txq_lock = NULL;
   5564 #endif
   5565 		error = wm_alloc_tx_descs(sc, txq);
   5566 		if (error)
   5567 			break;
   5568 		error = wm_alloc_tx_buffer(sc, txq);
   5569 		if (error) {
   5570 			wm_free_tx_descs(sc, txq);
   5571 			break;
   5572 		}
   5573 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5574 		if (txq->txq_interq == NULL) {
   5575 			wm_free_tx_descs(sc, txq);
   5576 			wm_free_tx_buffer(sc, txq);
   5577 			error = ENOMEM;
   5578 			break;
   5579 		}
   5580 		tx_done++;
   5581 	}
   5582 	if (error)
   5583 		goto fail_1;
   5584 
   5585 	/*
   5586 	 * For recieve
   5587 	 */
   5588 	error = 0;
   5589 	rx_done = 0;
   5590 	for (i = 0; i < sc->sc_nqueues; i++) {
   5591 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5592 		rxq->rxq_sc = sc;
   5593 #ifdef WM_MPSAFE
   5594 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5595 #else
   5596 		rxq->rxq_lock = NULL;
   5597 #endif
   5598 		error = wm_alloc_rx_descs(sc, rxq);
   5599 		if (error)
   5600 			break;
   5601 
   5602 		error = wm_alloc_rx_buffer(sc, rxq);
   5603 		if (error) {
   5604 			wm_free_rx_descs(sc, rxq);
   5605 			break;
   5606 		}
   5607 
   5608 		rx_done++;
   5609 	}
   5610 	if (error)
   5611 		goto fail_2;
   5612 
   5613 	return 0;
   5614 
   5615  fail_2:
   5616 	for (i = 0; i < rx_done; i++) {
   5617 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5618 		wm_free_rx_buffer(sc, rxq);
   5619 		wm_free_rx_descs(sc, rxq);
   5620 		if (rxq->rxq_lock)
   5621 			mutex_obj_free(rxq->rxq_lock);
   5622 	}
   5623  fail_1:
   5624 	for (i = 0; i < tx_done; i++) {
   5625 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5626 		pcq_destroy(txq->txq_interq);
   5627 		wm_free_tx_buffer(sc, txq);
   5628 		wm_free_tx_descs(sc, txq);
   5629 		if (txq->txq_lock)
   5630 			mutex_obj_free(txq->txq_lock);
   5631 	}
   5632 
   5633 	kmem_free(sc->sc_queue,
   5634 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5635  fail_0:
   5636 	return error;
   5637 }
   5638 
   5639 /*
   5640  * wm_free_quques:
   5641  *	Free {tx,rx}descs and {tx,rx} buffers
   5642  */
   5643 static void
   5644 wm_free_txrx_queues(struct wm_softc *sc)
   5645 {
   5646 	int i;
   5647 
   5648 	for (i = 0; i < sc->sc_nqueues; i++) {
   5649 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5650 		wm_free_rx_buffer(sc, rxq);
   5651 		wm_free_rx_descs(sc, rxq);
   5652 		if (rxq->rxq_lock)
   5653 			mutex_obj_free(rxq->rxq_lock);
   5654 	}
   5655 
   5656 	for (i = 0; i < sc->sc_nqueues; i++) {
   5657 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5658 		wm_free_tx_buffer(sc, txq);
   5659 		wm_free_tx_descs(sc, txq);
   5660 		if (txq->txq_lock)
   5661 			mutex_obj_free(txq->txq_lock);
   5662 	}
   5663 
   5664 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5665 }
   5666 
   5667 static void
   5668 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5669 {
   5670 
   5671 	KASSERT(WM_TX_LOCKED(txq));
   5672 
   5673 	/* Initialize the transmit descriptor ring. */
   5674 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5675 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5676 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5677 	txq->txq_free = WM_NTXDESC(txq);
   5678 	txq->txq_next = 0;
   5679 }
   5680 
   5681 static void
   5682 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5683     struct wm_txqueue *txq)
   5684 {
   5685 
   5686 	KASSERT(WM_TX_LOCKED(txq));
   5687 
   5688 	if (sc->sc_type < WM_T_82543) {
   5689 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5690 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5691 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5692 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5693 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5694 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5695 	} else {
   5696 		int qid = wmq->wmq_id;
   5697 
   5698 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5699 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5700 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5701 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5702 
   5703 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5704 			/*
   5705 			 * Don't write TDT before TCTL.EN is set.
   5706 			 * See the document.
   5707 			 */
   5708 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5709 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5710 			    | TXDCTL_WTHRESH(0));
   5711 		else {
   5712 			/* ITR / 4 */
   5713 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5714 			if (sc->sc_type >= WM_T_82540) {
   5715 				/* should be same */
   5716 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5717 			}
   5718 
   5719 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5720 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5721 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5722 		}
   5723 	}
   5724 }
   5725 
   5726 static void
   5727 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5728 {
   5729 	int i;
   5730 
   5731 	KASSERT(WM_TX_LOCKED(txq));
   5732 
   5733 	/* Initialize the transmit job descriptors. */
   5734 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5735 		txq->txq_soft[i].txs_mbuf = NULL;
   5736 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5737 	txq->txq_snext = 0;
   5738 	txq->txq_sdirty = 0;
   5739 }
   5740 
   5741 static void
   5742 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5743     struct wm_txqueue *txq)
   5744 {
   5745 
   5746 	KASSERT(WM_TX_LOCKED(txq));
   5747 
   5748 	/*
   5749 	 * Set up some register offsets that are different between
   5750 	 * the i82542 and the i82543 and later chips.
   5751 	 */
   5752 	if (sc->sc_type < WM_T_82543)
   5753 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5754 	else
   5755 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5756 
   5757 	wm_init_tx_descs(sc, txq);
   5758 	wm_init_tx_regs(sc, wmq, txq);
   5759 	wm_init_tx_buffer(sc, txq);
   5760 }
   5761 
   5762 static void
   5763 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5764     struct wm_rxqueue *rxq)
   5765 {
   5766 
   5767 	KASSERT(WM_RX_LOCKED(rxq));
   5768 
   5769 	/*
   5770 	 * Initialize the receive descriptor and receive job
   5771 	 * descriptor rings.
   5772 	 */
   5773 	if (sc->sc_type < WM_T_82543) {
   5774 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5775 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5776 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5777 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5778 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5779 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5780 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5781 
   5782 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5783 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5784 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5785 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5786 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5787 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5788 	} else {
   5789 		int qid = wmq->wmq_id;
   5790 
   5791 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5792 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5793 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5794 
   5795 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5796 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5797 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5798 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5799 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5800 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5801 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5802 			    | RXDCTL_WTHRESH(1));
   5803 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5804 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5805 		} else {
   5806 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5807 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5808 			/* ITR / 4 */
   5809 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5810 			/* MUST be same */
   5811 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5812 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5813 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5814 		}
   5815 	}
   5816 }
   5817 
   5818 static int
   5819 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5820 {
   5821 	struct wm_rxsoft *rxs;
   5822 	int error, i;
   5823 
   5824 	KASSERT(WM_RX_LOCKED(rxq));
   5825 
   5826 	for (i = 0; i < WM_NRXDESC; i++) {
   5827 		rxs = &rxq->rxq_soft[i];
   5828 		if (rxs->rxs_mbuf == NULL) {
   5829 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5830 				log(LOG_ERR, "%s: unable to allocate or map "
   5831 				    "rx buffer %d, error = %d\n",
   5832 				    device_xname(sc->sc_dev), i, error);
   5833 				/*
   5834 				 * XXX Should attempt to run with fewer receive
   5835 				 * XXX buffers instead of just failing.
   5836 				 */
   5837 				wm_rxdrain(rxq);
   5838 				return ENOMEM;
   5839 			}
   5840 		} else {
   5841 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5842 				wm_init_rxdesc(rxq, i);
   5843 			/*
   5844 			 * For 82575 and newer device, the RX descriptors
   5845 			 * must be initialized after the setting of RCTL.EN in
   5846 			 * wm_set_filter()
   5847 			 */
   5848 		}
   5849 	}
   5850 	rxq->rxq_ptr = 0;
   5851 	rxq->rxq_discard = 0;
   5852 	WM_RXCHAIN_RESET(rxq);
   5853 
   5854 	return 0;
   5855 }
   5856 
   5857 static int
   5858 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5859     struct wm_rxqueue *rxq)
   5860 {
   5861 
   5862 	KASSERT(WM_RX_LOCKED(rxq));
   5863 
   5864 	/*
   5865 	 * Set up some register offsets that are different between
   5866 	 * the i82542 and the i82543 and later chips.
   5867 	 */
   5868 	if (sc->sc_type < WM_T_82543)
   5869 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5870 	else
   5871 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   5872 
   5873 	wm_init_rx_regs(sc, wmq, rxq);
   5874 	return wm_init_rx_buffer(sc, rxq);
   5875 }
   5876 
   5877 /*
   5878  * wm_init_quques:
   5879  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5880  */
   5881 static int
   5882 wm_init_txrx_queues(struct wm_softc *sc)
   5883 {
   5884 	int i, error = 0;
   5885 
   5886 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5887 		device_xname(sc->sc_dev), __func__));
   5888 	for (i = 0; i < sc->sc_nqueues; i++) {
   5889 		struct wm_queue *wmq = &sc->sc_queue[i];
   5890 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5891 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5892 
   5893 		WM_TX_LOCK(txq);
   5894 		wm_init_tx_queue(sc, wmq, txq);
   5895 		WM_TX_UNLOCK(txq);
   5896 
   5897 		WM_RX_LOCK(rxq);
   5898 		error = wm_init_rx_queue(sc, wmq, rxq);
   5899 		WM_RX_UNLOCK(rxq);
   5900 		if (error)
   5901 			break;
   5902 	}
   5903 
   5904 	return error;
   5905 }
   5906 
   5907 /*
   5908  * wm_tx_offload:
   5909  *
   5910  *	Set up TCP/IP checksumming parameters for the
   5911  *	specified packet.
   5912  */
   5913 static int
   5914 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5915     uint8_t *fieldsp)
   5916 {
   5917 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5918 	struct mbuf *m0 = txs->txs_mbuf;
   5919 	struct livengood_tcpip_ctxdesc *t;
   5920 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5921 	uint32_t ipcse;
   5922 	struct ether_header *eh;
   5923 	int offset, iphl;
   5924 	uint8_t fields;
   5925 
   5926 	/*
   5927 	 * XXX It would be nice if the mbuf pkthdr had offset
   5928 	 * fields for the protocol headers.
   5929 	 */
   5930 
   5931 	eh = mtod(m0, struct ether_header *);
   5932 	switch (htons(eh->ether_type)) {
   5933 	case ETHERTYPE_IP:
   5934 	case ETHERTYPE_IPV6:
   5935 		offset = ETHER_HDR_LEN;
   5936 		break;
   5937 
   5938 	case ETHERTYPE_VLAN:
   5939 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5940 		break;
   5941 
   5942 	default:
   5943 		/*
   5944 		 * Don't support this protocol or encapsulation.
   5945 		 */
   5946 		*fieldsp = 0;
   5947 		*cmdp = 0;
   5948 		return 0;
   5949 	}
   5950 
   5951 	if ((m0->m_pkthdr.csum_flags &
   5952 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   5953 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5954 	} else {
   5955 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5956 	}
   5957 	ipcse = offset + iphl - 1;
   5958 
   5959 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   5960 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   5961 	seg = 0;
   5962 	fields = 0;
   5963 
   5964 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5965 		int hlen = offset + iphl;
   5966 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5967 
   5968 		if (__predict_false(m0->m_len <
   5969 				    (hlen + sizeof(struct tcphdr)))) {
   5970 			/*
   5971 			 * TCP/IP headers are not in the first mbuf; we need
   5972 			 * to do this the slow and painful way.  Let's just
   5973 			 * hope this doesn't happen very often.
   5974 			 */
   5975 			struct tcphdr th;
   5976 
   5977 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   5978 
   5979 			m_copydata(m0, hlen, sizeof(th), &th);
   5980 			if (v4) {
   5981 				struct ip ip;
   5982 
   5983 				m_copydata(m0, offset, sizeof(ip), &ip);
   5984 				ip.ip_len = 0;
   5985 				m_copyback(m0,
   5986 				    offset + offsetof(struct ip, ip_len),
   5987 				    sizeof(ip.ip_len), &ip.ip_len);
   5988 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5989 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5990 			} else {
   5991 				struct ip6_hdr ip6;
   5992 
   5993 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   5994 				ip6.ip6_plen = 0;
   5995 				m_copyback(m0,
   5996 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   5997 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   5998 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   5999 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6000 			}
   6001 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6002 			    sizeof(th.th_sum), &th.th_sum);
   6003 
   6004 			hlen += th.th_off << 2;
   6005 		} else {
   6006 			/*
   6007 			 * TCP/IP headers are in the first mbuf; we can do
   6008 			 * this the easy way.
   6009 			 */
   6010 			struct tcphdr *th;
   6011 
   6012 			if (v4) {
   6013 				struct ip *ip =
   6014 				    (void *)(mtod(m0, char *) + offset);
   6015 				th = (void *)(mtod(m0, char *) + hlen);
   6016 
   6017 				ip->ip_len = 0;
   6018 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6019 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6020 			} else {
   6021 				struct ip6_hdr *ip6 =
   6022 				    (void *)(mtod(m0, char *) + offset);
   6023 				th = (void *)(mtod(m0, char *) + hlen);
   6024 
   6025 				ip6->ip6_plen = 0;
   6026 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6027 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6028 			}
   6029 			hlen += th->th_off << 2;
   6030 		}
   6031 
   6032 		if (v4) {
   6033 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6034 			cmdlen |= WTX_TCPIP_CMD_IP;
   6035 		} else {
   6036 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6037 			ipcse = 0;
   6038 		}
   6039 		cmd |= WTX_TCPIP_CMD_TSE;
   6040 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6041 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6042 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6043 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6044 	}
   6045 
   6046 	/*
   6047 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6048 	 * offload feature, if we load the context descriptor, we
   6049 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6050 	 */
   6051 
   6052 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6053 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6054 	    WTX_TCPIP_IPCSE(ipcse);
   6055 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6056 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   6057 		fields |= WTX_IXSM;
   6058 	}
   6059 
   6060 	offset += iphl;
   6061 
   6062 	if (m0->m_pkthdr.csum_flags &
   6063 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6064 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6065 		fields |= WTX_TXSM;
   6066 		tucs = WTX_TCPIP_TUCSS(offset) |
   6067 		    WTX_TCPIP_TUCSO(offset +
   6068 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6069 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6070 	} else if ((m0->m_pkthdr.csum_flags &
   6071 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6072 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6073 		fields |= WTX_TXSM;
   6074 		tucs = WTX_TCPIP_TUCSS(offset) |
   6075 		    WTX_TCPIP_TUCSO(offset +
   6076 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6077 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6078 	} else {
   6079 		/* Just initialize it to a valid TCP context. */
   6080 		tucs = WTX_TCPIP_TUCSS(offset) |
   6081 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6082 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6083 	}
   6084 
   6085 	/* Fill in the context descriptor. */
   6086 	t = (struct livengood_tcpip_ctxdesc *)
   6087 	    &txq->txq_descs[txq->txq_next];
   6088 	t->tcpip_ipcs = htole32(ipcs);
   6089 	t->tcpip_tucs = htole32(tucs);
   6090 	t->tcpip_cmdlen = htole32(cmdlen);
   6091 	t->tcpip_seg = htole32(seg);
   6092 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6093 
   6094 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6095 	txs->txs_ndesc++;
   6096 
   6097 	*cmdp = cmd;
   6098 	*fieldsp = fields;
   6099 
   6100 	return 0;
   6101 }
   6102 
   6103 /*
   6104  * wm_start:		[ifnet interface function]
   6105  *
   6106  *	Start packet transmission on the interface.
   6107  */
   6108 static void
   6109 wm_start(struct ifnet *ifp)
   6110 {
   6111 	struct wm_softc *sc = ifp->if_softc;
   6112 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6113 
   6114 	WM_TX_LOCK(txq);
   6115 	if (!sc->sc_stopping)
   6116 		wm_start_locked(ifp);
   6117 	WM_TX_UNLOCK(txq);
   6118 }
   6119 
   6120 static void
   6121 wm_start_locked(struct ifnet *ifp)
   6122 {
   6123 	struct wm_softc *sc = ifp->if_softc;
   6124 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6125 	struct mbuf *m0;
   6126 	struct m_tag *mtag;
   6127 	struct wm_txsoft *txs;
   6128 	bus_dmamap_t dmamap;
   6129 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6130 	bus_addr_t curaddr;
   6131 	bus_size_t seglen, curlen;
   6132 	uint32_t cksumcmd;
   6133 	uint8_t cksumfields;
   6134 
   6135 	KASSERT(WM_TX_LOCKED(txq));
   6136 
   6137 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6138 		return;
   6139 
   6140 	/* Remember the previous number of free descriptors. */
   6141 	ofree = txq->txq_free;
   6142 
   6143 	/*
   6144 	 * Loop through the send queue, setting up transmit descriptors
   6145 	 * until we drain the queue, or use up all available transmit
   6146 	 * descriptors.
   6147 	 */
   6148 	for (;;) {
   6149 		m0 = NULL;
   6150 
   6151 		/* Get a work queue entry. */
   6152 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6153 			wm_txeof(sc, txq);
   6154 			if (txq->txq_sfree == 0) {
   6155 				DPRINTF(WM_DEBUG_TX,
   6156 				    ("%s: TX: no free job descriptors\n",
   6157 					device_xname(sc->sc_dev)));
   6158 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6159 				break;
   6160 			}
   6161 		}
   6162 
   6163 		/* Grab a packet off the queue. */
   6164 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6165 		if (m0 == NULL)
   6166 			break;
   6167 
   6168 		DPRINTF(WM_DEBUG_TX,
   6169 		    ("%s: TX: have packet to transmit: %p\n",
   6170 		    device_xname(sc->sc_dev), m0));
   6171 
   6172 		txs = &txq->txq_soft[txq->txq_snext];
   6173 		dmamap = txs->txs_dmamap;
   6174 
   6175 		use_tso = (m0->m_pkthdr.csum_flags &
   6176 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6177 
   6178 		/*
   6179 		 * So says the Linux driver:
   6180 		 * The controller does a simple calculation to make sure
   6181 		 * there is enough room in the FIFO before initiating the
   6182 		 * DMA for each buffer.  The calc is:
   6183 		 *	4 = ceil(buffer len / MSS)
   6184 		 * To make sure we don't overrun the FIFO, adjust the max
   6185 		 * buffer len if the MSS drops.
   6186 		 */
   6187 		dmamap->dm_maxsegsz =
   6188 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6189 		    ? m0->m_pkthdr.segsz << 2
   6190 		    : WTX_MAX_LEN;
   6191 
   6192 		/*
   6193 		 * Load the DMA map.  If this fails, the packet either
   6194 		 * didn't fit in the allotted number of segments, or we
   6195 		 * were short on resources.  For the too-many-segments
   6196 		 * case, we simply report an error and drop the packet,
   6197 		 * since we can't sanely copy a jumbo packet to a single
   6198 		 * buffer.
   6199 		 */
   6200 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6201 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6202 		if (error) {
   6203 			if (error == EFBIG) {
   6204 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6205 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6206 				    "DMA segments, dropping...\n",
   6207 				    device_xname(sc->sc_dev));
   6208 				wm_dump_mbuf_chain(sc, m0);
   6209 				m_freem(m0);
   6210 				continue;
   6211 			}
   6212 			/*  Short on resources, just stop for now. */
   6213 			DPRINTF(WM_DEBUG_TX,
   6214 			    ("%s: TX: dmamap load failed: %d\n",
   6215 			    device_xname(sc->sc_dev), error));
   6216 			break;
   6217 		}
   6218 
   6219 		segs_needed = dmamap->dm_nsegs;
   6220 		if (use_tso) {
   6221 			/* For sentinel descriptor; see below. */
   6222 			segs_needed++;
   6223 		}
   6224 
   6225 		/*
   6226 		 * Ensure we have enough descriptors free to describe
   6227 		 * the packet.  Note, we always reserve one descriptor
   6228 		 * at the end of the ring due to the semantics of the
   6229 		 * TDT register, plus one more in the event we need
   6230 		 * to load offload context.
   6231 		 */
   6232 		if (segs_needed > txq->txq_free - 2) {
   6233 			/*
   6234 			 * Not enough free descriptors to transmit this
   6235 			 * packet.  We haven't committed anything yet,
   6236 			 * so just unload the DMA map, put the packet
   6237 			 * pack on the queue, and punt.  Notify the upper
   6238 			 * layer that there are no more slots left.
   6239 			 */
   6240 			DPRINTF(WM_DEBUG_TX,
   6241 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6242 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6243 			    segs_needed, txq->txq_free - 1));
   6244 			ifp->if_flags |= IFF_OACTIVE;
   6245 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6246 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6247 			break;
   6248 		}
   6249 
   6250 		/*
   6251 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6252 		 * once we know we can transmit the packet, since we
   6253 		 * do some internal FIFO space accounting here.
   6254 		 */
   6255 		if (sc->sc_type == WM_T_82547 &&
   6256 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6257 			DPRINTF(WM_DEBUG_TX,
   6258 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6259 			    device_xname(sc->sc_dev)));
   6260 			ifp->if_flags |= IFF_OACTIVE;
   6261 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6262 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   6263 			break;
   6264 		}
   6265 
   6266 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6267 
   6268 		DPRINTF(WM_DEBUG_TX,
   6269 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6270 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6271 
   6272 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6273 
   6274 		/*
   6275 		 * Store a pointer to the packet so that we can free it
   6276 		 * later.
   6277 		 *
   6278 		 * Initially, we consider the number of descriptors the
   6279 		 * packet uses the number of DMA segments.  This may be
   6280 		 * incremented by 1 if we do checksum offload (a descriptor
   6281 		 * is used to set the checksum context).
   6282 		 */
   6283 		txs->txs_mbuf = m0;
   6284 		txs->txs_firstdesc = txq->txq_next;
   6285 		txs->txs_ndesc = segs_needed;
   6286 
   6287 		/* Set up offload parameters for this packet. */
   6288 		if (m0->m_pkthdr.csum_flags &
   6289 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6290 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6291 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6292 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6293 					  &cksumfields) != 0) {
   6294 				/* Error message already displayed. */
   6295 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6296 				continue;
   6297 			}
   6298 		} else {
   6299 			cksumcmd = 0;
   6300 			cksumfields = 0;
   6301 		}
   6302 
   6303 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6304 
   6305 		/* Sync the DMA map. */
   6306 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6307 		    BUS_DMASYNC_PREWRITE);
   6308 
   6309 		/* Initialize the transmit descriptor. */
   6310 		for (nexttx = txq->txq_next, seg = 0;
   6311 		     seg < dmamap->dm_nsegs; seg++) {
   6312 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6313 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6314 			     seglen != 0;
   6315 			     curaddr += curlen, seglen -= curlen,
   6316 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6317 				curlen = seglen;
   6318 
   6319 				/*
   6320 				 * So says the Linux driver:
   6321 				 * Work around for premature descriptor
   6322 				 * write-backs in TSO mode.  Append a
   6323 				 * 4-byte sentinel descriptor.
   6324 				 */
   6325 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6326 				    curlen > 8)
   6327 					curlen -= 4;
   6328 
   6329 				wm_set_dma_addr(
   6330 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6331 				txq->txq_descs[nexttx].wtx_cmdlen
   6332 				    = htole32(cksumcmd | curlen);
   6333 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6334 				    = 0;
   6335 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6336 				    = cksumfields;
   6337 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6338 				lasttx = nexttx;
   6339 
   6340 				DPRINTF(WM_DEBUG_TX,
   6341 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6342 				     "len %#04zx\n",
   6343 				    device_xname(sc->sc_dev), nexttx,
   6344 				    (uint64_t)curaddr, curlen));
   6345 			}
   6346 		}
   6347 
   6348 		KASSERT(lasttx != -1);
   6349 
   6350 		/*
   6351 		 * Set up the command byte on the last descriptor of
   6352 		 * the packet.  If we're in the interrupt delay window,
   6353 		 * delay the interrupt.
   6354 		 */
   6355 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6356 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6357 
   6358 		/*
   6359 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6360 		 * up the descriptor to encapsulate the packet for us.
   6361 		 *
   6362 		 * This is only valid on the last descriptor of the packet.
   6363 		 */
   6364 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6365 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6366 			    htole32(WTX_CMD_VLE);
   6367 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6368 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6369 		}
   6370 
   6371 		txs->txs_lastdesc = lasttx;
   6372 
   6373 		DPRINTF(WM_DEBUG_TX,
   6374 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6375 		    device_xname(sc->sc_dev),
   6376 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6377 
   6378 		/* Sync the descriptors we're using. */
   6379 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6380 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6381 
   6382 		/* Give the packet to the chip. */
   6383 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6384 
   6385 		DPRINTF(WM_DEBUG_TX,
   6386 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6387 
   6388 		DPRINTF(WM_DEBUG_TX,
   6389 		    ("%s: TX: finished transmitting packet, job %d\n",
   6390 		    device_xname(sc->sc_dev), txq->txq_snext));
   6391 
   6392 		/* Advance the tx pointer. */
   6393 		txq->txq_free -= txs->txs_ndesc;
   6394 		txq->txq_next = nexttx;
   6395 
   6396 		txq->txq_sfree--;
   6397 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6398 
   6399 		/* Pass the packet to any BPF listeners. */
   6400 		bpf_mtap(ifp, m0);
   6401 	}
   6402 
   6403 	if (m0 != NULL) {
   6404 		ifp->if_flags |= IFF_OACTIVE;
   6405 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6406 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6407 			__func__));
   6408 		m_freem(m0);
   6409 	}
   6410 
   6411 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6412 		/* No more slots; notify upper layer. */
   6413 		ifp->if_flags |= IFF_OACTIVE;
   6414 	}
   6415 
   6416 	if (txq->txq_free != ofree) {
   6417 		/* Set a watchdog timer in case the chip flakes out. */
   6418 		ifp->if_timer = 5;
   6419 	}
   6420 }
   6421 
   6422 /*
   6423  * wm_nq_tx_offload:
   6424  *
   6425  *	Set up TCP/IP checksumming parameters for the
   6426  *	specified packet, for NEWQUEUE devices
   6427  */
   6428 static int
   6429 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6430     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6431 {
   6432 	struct mbuf *m0 = txs->txs_mbuf;
   6433 	struct m_tag *mtag;
   6434 	uint32_t vl_len, mssidx, cmdc;
   6435 	struct ether_header *eh;
   6436 	int offset, iphl;
   6437 
   6438 	/*
   6439 	 * XXX It would be nice if the mbuf pkthdr had offset
   6440 	 * fields for the protocol headers.
   6441 	 */
   6442 	*cmdlenp = 0;
   6443 	*fieldsp = 0;
   6444 
   6445 	eh = mtod(m0, struct ether_header *);
   6446 	switch (htons(eh->ether_type)) {
   6447 	case ETHERTYPE_IP:
   6448 	case ETHERTYPE_IPV6:
   6449 		offset = ETHER_HDR_LEN;
   6450 		break;
   6451 
   6452 	case ETHERTYPE_VLAN:
   6453 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6454 		break;
   6455 
   6456 	default:
   6457 		/* Don't support this protocol or encapsulation. */
   6458 		*do_csum = false;
   6459 		return 0;
   6460 	}
   6461 	*do_csum = true;
   6462 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6463 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6464 
   6465 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6466 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6467 
   6468 	if ((m0->m_pkthdr.csum_flags &
   6469 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6470 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6471 	} else {
   6472 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6473 	}
   6474 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6475 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6476 
   6477 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6478 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6479 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6480 		*cmdlenp |= NQTX_CMD_VLE;
   6481 	}
   6482 
   6483 	mssidx = 0;
   6484 
   6485 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6486 		int hlen = offset + iphl;
   6487 		int tcp_hlen;
   6488 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6489 
   6490 		if (__predict_false(m0->m_len <
   6491 				    (hlen + sizeof(struct tcphdr)))) {
   6492 			/*
   6493 			 * TCP/IP headers are not in the first mbuf; we need
   6494 			 * to do this the slow and painful way.  Let's just
   6495 			 * hope this doesn't happen very often.
   6496 			 */
   6497 			struct tcphdr th;
   6498 
   6499 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6500 
   6501 			m_copydata(m0, hlen, sizeof(th), &th);
   6502 			if (v4) {
   6503 				struct ip ip;
   6504 
   6505 				m_copydata(m0, offset, sizeof(ip), &ip);
   6506 				ip.ip_len = 0;
   6507 				m_copyback(m0,
   6508 				    offset + offsetof(struct ip, ip_len),
   6509 				    sizeof(ip.ip_len), &ip.ip_len);
   6510 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6511 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6512 			} else {
   6513 				struct ip6_hdr ip6;
   6514 
   6515 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6516 				ip6.ip6_plen = 0;
   6517 				m_copyback(m0,
   6518 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6519 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6520 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6521 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6522 			}
   6523 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6524 			    sizeof(th.th_sum), &th.th_sum);
   6525 
   6526 			tcp_hlen = th.th_off << 2;
   6527 		} else {
   6528 			/*
   6529 			 * TCP/IP headers are in the first mbuf; we can do
   6530 			 * this the easy way.
   6531 			 */
   6532 			struct tcphdr *th;
   6533 
   6534 			if (v4) {
   6535 				struct ip *ip =
   6536 				    (void *)(mtod(m0, char *) + offset);
   6537 				th = (void *)(mtod(m0, char *) + hlen);
   6538 
   6539 				ip->ip_len = 0;
   6540 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6541 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6542 			} else {
   6543 				struct ip6_hdr *ip6 =
   6544 				    (void *)(mtod(m0, char *) + offset);
   6545 				th = (void *)(mtod(m0, char *) + hlen);
   6546 
   6547 				ip6->ip6_plen = 0;
   6548 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6549 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6550 			}
   6551 			tcp_hlen = th->th_off << 2;
   6552 		}
   6553 		hlen += tcp_hlen;
   6554 		*cmdlenp |= NQTX_CMD_TSE;
   6555 
   6556 		if (v4) {
   6557 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6558 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6559 		} else {
   6560 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6561 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6562 		}
   6563 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6564 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6565 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6566 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6567 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6568 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6569 	} else {
   6570 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6571 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6572 	}
   6573 
   6574 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6575 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6576 		cmdc |= NQTXC_CMD_IP4;
   6577 	}
   6578 
   6579 	if (m0->m_pkthdr.csum_flags &
   6580 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6581 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6582 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6583 			cmdc |= NQTXC_CMD_TCP;
   6584 		} else {
   6585 			cmdc |= NQTXC_CMD_UDP;
   6586 		}
   6587 		cmdc |= NQTXC_CMD_IP4;
   6588 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6589 	}
   6590 	if (m0->m_pkthdr.csum_flags &
   6591 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6592 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6593 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6594 			cmdc |= NQTXC_CMD_TCP;
   6595 		} else {
   6596 			cmdc |= NQTXC_CMD_UDP;
   6597 		}
   6598 		cmdc |= NQTXC_CMD_IP6;
   6599 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6600 	}
   6601 
   6602 	/* Fill in the context descriptor. */
   6603 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6604 	    htole32(vl_len);
   6605 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6606 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6607 	    htole32(cmdc);
   6608 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6609 	    htole32(mssidx);
   6610 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6611 	DPRINTF(WM_DEBUG_TX,
   6612 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6613 	    txq->txq_next, 0, vl_len));
   6614 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6615 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6616 	txs->txs_ndesc++;
   6617 	return 0;
   6618 }
   6619 
   6620 /*
   6621  * wm_nq_start:		[ifnet interface function]
   6622  *
   6623  *	Start packet transmission on the interface for NEWQUEUE devices
   6624  */
   6625 static void
   6626 wm_nq_start(struct ifnet *ifp)
   6627 {
   6628 	struct wm_softc *sc = ifp->if_softc;
   6629 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6630 
   6631 	WM_TX_LOCK(txq);
   6632 	if (!sc->sc_stopping)
   6633 		wm_nq_start_locked(ifp);
   6634 	WM_TX_UNLOCK(txq);
   6635 }
   6636 
   6637 static void
   6638 wm_nq_start_locked(struct ifnet *ifp)
   6639 {
   6640 	struct wm_softc *sc = ifp->if_softc;
   6641 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6642 
   6643 	wm_nq_send_common_locked(ifp, txq, false);
   6644 }
   6645 
   6646 static inline int
   6647 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6648 {
   6649 	struct wm_softc *sc = ifp->if_softc;
   6650 	u_int cpuid = cpu_index(curcpu());
   6651 
   6652 	/*
   6653 	 * Currently, simple distribute strategy.
   6654 	 * TODO:
   6655 	 * destribute by flowid(RSS has value).
   6656 	 */
   6657 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6658 }
   6659 
   6660 static int
   6661 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6662 {
   6663 	int qid;
   6664 	struct wm_softc *sc = ifp->if_softc;
   6665 	struct wm_txqueue *txq;
   6666 
   6667 	qid = wm_nq_select_txqueue(ifp, m);
   6668 	txq = &sc->sc_queue[qid].wmq_txq;
   6669 
   6670 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6671 		m_freem(m);
   6672 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6673 		return ENOBUFS;
   6674 	}
   6675 
   6676 	if (WM_TX_TRYLOCK(txq)) {
   6677 		/* XXXX should be per TX queue */
   6678 		ifp->if_obytes += m->m_pkthdr.len;
   6679 		if (m->m_flags & M_MCAST)
   6680 			ifp->if_omcasts++;
   6681 
   6682 		if (!sc->sc_stopping)
   6683 			wm_nq_transmit_locked(ifp, txq);
   6684 		WM_TX_UNLOCK(txq);
   6685 	}
   6686 
   6687 	return 0;
   6688 }
   6689 
   6690 static void
   6691 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6692 {
   6693 
   6694 	wm_nq_send_common_locked(ifp, txq, true);
   6695 }
   6696 
   6697 static void
   6698 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6699     bool is_transmit)
   6700 {
   6701 	struct wm_softc *sc = ifp->if_softc;
   6702 	struct mbuf *m0;
   6703 	struct m_tag *mtag;
   6704 	struct wm_txsoft *txs;
   6705 	bus_dmamap_t dmamap;
   6706 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6707 	bool do_csum, sent;
   6708 
   6709 	KASSERT(WM_TX_LOCKED(txq));
   6710 
   6711 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6712 		return;
   6713 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6714 		return;
   6715 
   6716 	sent = false;
   6717 
   6718 	/*
   6719 	 * Loop through the send queue, setting up transmit descriptors
   6720 	 * until we drain the queue, or use up all available transmit
   6721 	 * descriptors.
   6722 	 */
   6723 	for (;;) {
   6724 		m0 = NULL;
   6725 
   6726 		/* Get a work queue entry. */
   6727 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6728 			wm_txeof(sc, txq);
   6729 			if (txq->txq_sfree == 0) {
   6730 				DPRINTF(WM_DEBUG_TX,
   6731 				    ("%s: TX: no free job descriptors\n",
   6732 					device_xname(sc->sc_dev)));
   6733 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6734 				break;
   6735 			}
   6736 		}
   6737 
   6738 		/* Grab a packet off the queue. */
   6739 		if (is_transmit)
   6740 			m0 = pcq_get(txq->txq_interq);
   6741 		else
   6742 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6743 		if (m0 == NULL)
   6744 			break;
   6745 
   6746 		DPRINTF(WM_DEBUG_TX,
   6747 		    ("%s: TX: have packet to transmit: %p\n",
   6748 		    device_xname(sc->sc_dev), m0));
   6749 
   6750 		txs = &txq->txq_soft[txq->txq_snext];
   6751 		dmamap = txs->txs_dmamap;
   6752 
   6753 		/*
   6754 		 * Load the DMA map.  If this fails, the packet either
   6755 		 * didn't fit in the allotted number of segments, or we
   6756 		 * were short on resources.  For the too-many-segments
   6757 		 * case, we simply report an error and drop the packet,
   6758 		 * since we can't sanely copy a jumbo packet to a single
   6759 		 * buffer.
   6760 		 */
   6761 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6762 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6763 		if (error) {
   6764 			if (error == EFBIG) {
   6765 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6766 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6767 				    "DMA segments, dropping...\n",
   6768 				    device_xname(sc->sc_dev));
   6769 				wm_dump_mbuf_chain(sc, m0);
   6770 				m_freem(m0);
   6771 				continue;
   6772 			}
   6773 			/* Short on resources, just stop for now. */
   6774 			DPRINTF(WM_DEBUG_TX,
   6775 			    ("%s: TX: dmamap load failed: %d\n",
   6776 			    device_xname(sc->sc_dev), error));
   6777 			break;
   6778 		}
   6779 
   6780 		segs_needed = dmamap->dm_nsegs;
   6781 
   6782 		/*
   6783 		 * Ensure we have enough descriptors free to describe
   6784 		 * the packet.  Note, we always reserve one descriptor
   6785 		 * at the end of the ring due to the semantics of the
   6786 		 * TDT register, plus one more in the event we need
   6787 		 * to load offload context.
   6788 		 */
   6789 		if (segs_needed > txq->txq_free - 2) {
   6790 			/*
   6791 			 * Not enough free descriptors to transmit this
   6792 			 * packet.  We haven't committed anything yet,
   6793 			 * so just unload the DMA map, put the packet
   6794 			 * pack on the queue, and punt.  Notify the upper
   6795 			 * layer that there are no more slots left.
   6796 			 */
   6797 			DPRINTF(WM_DEBUG_TX,
   6798 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6799 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6800 			    segs_needed, txq->txq_free - 1));
   6801 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6802 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6803 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6804 			break;
   6805 		}
   6806 
   6807 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6808 
   6809 		DPRINTF(WM_DEBUG_TX,
   6810 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6811 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6812 
   6813 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6814 
   6815 		/*
   6816 		 * Store a pointer to the packet so that we can free it
   6817 		 * later.
   6818 		 *
   6819 		 * Initially, we consider the number of descriptors the
   6820 		 * packet uses the number of DMA segments.  This may be
   6821 		 * incremented by 1 if we do checksum offload (a descriptor
   6822 		 * is used to set the checksum context).
   6823 		 */
   6824 		txs->txs_mbuf = m0;
   6825 		txs->txs_firstdesc = txq->txq_next;
   6826 		txs->txs_ndesc = segs_needed;
   6827 
   6828 		/* Set up offload parameters for this packet. */
   6829 		uint32_t cmdlen, fields, dcmdlen;
   6830 		if (m0->m_pkthdr.csum_flags &
   6831 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6832 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6833 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6834 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   6835 			    &do_csum) != 0) {
   6836 				/* Error message already displayed. */
   6837 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6838 				continue;
   6839 			}
   6840 		} else {
   6841 			do_csum = false;
   6842 			cmdlen = 0;
   6843 			fields = 0;
   6844 		}
   6845 
   6846 		/* Sync the DMA map. */
   6847 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6848 		    BUS_DMASYNC_PREWRITE);
   6849 
   6850 		/* Initialize the first transmit descriptor. */
   6851 		nexttx = txq->txq_next;
   6852 		if (!do_csum) {
   6853 			/* setup a legacy descriptor */
   6854 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   6855 			    dmamap->dm_segs[0].ds_addr);
   6856 			txq->txq_descs[nexttx].wtx_cmdlen =
   6857 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6858 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6859 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6860 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6861 			    NULL) {
   6862 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6863 				    htole32(WTX_CMD_VLE);
   6864 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6865 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6866 			} else {
   6867 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6868 			}
   6869 			dcmdlen = 0;
   6870 		} else {
   6871 			/* setup an advanced data descriptor */
   6872 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6873 			    htole64(dmamap->dm_segs[0].ds_addr);
   6874 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6875 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6876 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6877 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6878 			    htole32(fields);
   6879 			DPRINTF(WM_DEBUG_TX,
   6880 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6881 			    device_xname(sc->sc_dev), nexttx,
   6882 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6883 			DPRINTF(WM_DEBUG_TX,
   6884 			    ("\t 0x%08x%08x\n", fields,
   6885 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6886 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6887 		}
   6888 
   6889 		lasttx = nexttx;
   6890 		nexttx = WM_NEXTTX(txq, nexttx);
   6891 		/*
   6892 		 * fill in the next descriptors. legacy or adcanced format
   6893 		 * is the same here
   6894 		 */
   6895 		for (seg = 1; seg < dmamap->dm_nsegs;
   6896 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6897 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6898 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6899 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6900 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6901 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6902 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6903 			lasttx = nexttx;
   6904 
   6905 			DPRINTF(WM_DEBUG_TX,
   6906 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6907 			     "len %#04zx\n",
   6908 			    device_xname(sc->sc_dev), nexttx,
   6909 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6910 			    dmamap->dm_segs[seg].ds_len));
   6911 		}
   6912 
   6913 		KASSERT(lasttx != -1);
   6914 
   6915 		/*
   6916 		 * Set up the command byte on the last descriptor of
   6917 		 * the packet.  If we're in the interrupt delay window,
   6918 		 * delay the interrupt.
   6919 		 */
   6920 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6921 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6922 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6923 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6924 
   6925 		txs->txs_lastdesc = lasttx;
   6926 
   6927 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6928 		    device_xname(sc->sc_dev),
   6929 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6930 
   6931 		/* Sync the descriptors we're using. */
   6932 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6933 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6934 
   6935 		/* Give the packet to the chip. */
   6936 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6937 		sent = true;
   6938 
   6939 		DPRINTF(WM_DEBUG_TX,
   6940 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6941 
   6942 		DPRINTF(WM_DEBUG_TX,
   6943 		    ("%s: TX: finished transmitting packet, job %d\n",
   6944 		    device_xname(sc->sc_dev), txq->txq_snext));
   6945 
   6946 		/* Advance the tx pointer. */
   6947 		txq->txq_free -= txs->txs_ndesc;
   6948 		txq->txq_next = nexttx;
   6949 
   6950 		txq->txq_sfree--;
   6951 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6952 
   6953 		/* Pass the packet to any BPF listeners. */
   6954 		bpf_mtap(ifp, m0);
   6955 	}
   6956 
   6957 	if (m0 != NULL) {
   6958 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6959 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6960 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6961 			__func__));
   6962 		m_freem(m0);
   6963 	}
   6964 
   6965 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6966 		/* No more slots; notify upper layer. */
   6967 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6968 	}
   6969 
   6970 	if (sent) {
   6971 		/* Set a watchdog timer in case the chip flakes out. */
   6972 		ifp->if_timer = 5;
   6973 	}
   6974 }
   6975 
   6976 /* Interrupt */
   6977 
   6978 /*
   6979  * wm_txeof:
   6980  *
   6981  *	Helper; handle transmit interrupts.
   6982  */
   6983 static int
   6984 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   6985 {
   6986 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6987 	struct wm_txsoft *txs;
   6988 	bool processed = false;
   6989 	int count = 0;
   6990 	int i;
   6991 	uint8_t status;
   6992 
   6993 	KASSERT(WM_TX_LOCKED(txq));
   6994 
   6995 	if (sc->sc_stopping)
   6996 		return 0;
   6997 
   6998 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   6999 
   7000 	/*
   7001 	 * Go through the Tx list and free mbufs for those
   7002 	 * frames which have been transmitted.
   7003 	 */
   7004 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7005 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7006 		txs = &txq->txq_soft[i];
   7007 
   7008 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7009 			device_xname(sc->sc_dev), i));
   7010 
   7011 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7012 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7013 
   7014 		status =
   7015 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7016 		if ((status & WTX_ST_DD) == 0) {
   7017 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7018 			    BUS_DMASYNC_PREREAD);
   7019 			break;
   7020 		}
   7021 
   7022 		processed = true;
   7023 		count++;
   7024 		DPRINTF(WM_DEBUG_TX,
   7025 		    ("%s: TX: job %d done: descs %d..%d\n",
   7026 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7027 		    txs->txs_lastdesc));
   7028 
   7029 		/*
   7030 		 * XXX We should probably be using the statistics
   7031 		 * XXX registers, but I don't know if they exist
   7032 		 * XXX on chips before the i82544.
   7033 		 */
   7034 
   7035 #ifdef WM_EVENT_COUNTERS
   7036 		if (status & WTX_ST_TU)
   7037 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   7038 #endif /* WM_EVENT_COUNTERS */
   7039 
   7040 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7041 			ifp->if_oerrors++;
   7042 			if (status & WTX_ST_LC)
   7043 				log(LOG_WARNING, "%s: late collision\n",
   7044 				    device_xname(sc->sc_dev));
   7045 			else if (status & WTX_ST_EC) {
   7046 				ifp->if_collisions += 16;
   7047 				log(LOG_WARNING, "%s: excessive collisions\n",
   7048 				    device_xname(sc->sc_dev));
   7049 			}
   7050 		} else
   7051 			ifp->if_opackets++;
   7052 
   7053 		txq->txq_free += txs->txs_ndesc;
   7054 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7055 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7056 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7057 		m_freem(txs->txs_mbuf);
   7058 		txs->txs_mbuf = NULL;
   7059 	}
   7060 
   7061 	/* Update the dirty transmit buffer pointer. */
   7062 	txq->txq_sdirty = i;
   7063 	DPRINTF(WM_DEBUG_TX,
   7064 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7065 
   7066 	if (count != 0)
   7067 		rnd_add_uint32(&sc->rnd_source, count);
   7068 
   7069 	/*
   7070 	 * If there are no more pending transmissions, cancel the watchdog
   7071 	 * timer.
   7072 	 */
   7073 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7074 		ifp->if_timer = 0;
   7075 
   7076 	return processed;
   7077 }
   7078 
   7079 /*
   7080  * wm_rxeof:
   7081  *
   7082  *	Helper; handle receive interrupts.
   7083  */
   7084 static void
   7085 wm_rxeof(struct wm_rxqueue *rxq)
   7086 {
   7087 	struct wm_softc *sc = rxq->rxq_sc;
   7088 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7089 	struct wm_rxsoft *rxs;
   7090 	struct mbuf *m;
   7091 	int i, len;
   7092 	int count = 0;
   7093 	uint8_t status, errors;
   7094 	uint16_t vlantag;
   7095 
   7096 	KASSERT(WM_RX_LOCKED(rxq));
   7097 
   7098 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7099 		rxs = &rxq->rxq_soft[i];
   7100 
   7101 		DPRINTF(WM_DEBUG_RX,
   7102 		    ("%s: RX: checking descriptor %d\n",
   7103 		    device_xname(sc->sc_dev), i));
   7104 
   7105 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7106 
   7107 		status = rxq->rxq_descs[i].wrx_status;
   7108 		errors = rxq->rxq_descs[i].wrx_errors;
   7109 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7110 		vlantag = rxq->rxq_descs[i].wrx_special;
   7111 
   7112 		if ((status & WRX_ST_DD) == 0) {
   7113 			/* We have processed all of the receive descriptors. */
   7114 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7115 			break;
   7116 		}
   7117 
   7118 		count++;
   7119 		if (__predict_false(rxq->rxq_discard)) {
   7120 			DPRINTF(WM_DEBUG_RX,
   7121 			    ("%s: RX: discarding contents of descriptor %d\n",
   7122 			    device_xname(sc->sc_dev), i));
   7123 			wm_init_rxdesc(rxq, i);
   7124 			if (status & WRX_ST_EOP) {
   7125 				/* Reset our state. */
   7126 				DPRINTF(WM_DEBUG_RX,
   7127 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7128 				    device_xname(sc->sc_dev)));
   7129 				rxq->rxq_discard = 0;
   7130 			}
   7131 			continue;
   7132 		}
   7133 
   7134 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7135 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7136 
   7137 		m = rxs->rxs_mbuf;
   7138 
   7139 		/*
   7140 		 * Add a new receive buffer to the ring, unless of
   7141 		 * course the length is zero. Treat the latter as a
   7142 		 * failed mapping.
   7143 		 */
   7144 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7145 			/*
   7146 			 * Failed, throw away what we've done so
   7147 			 * far, and discard the rest of the packet.
   7148 			 */
   7149 			ifp->if_ierrors++;
   7150 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7151 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7152 			wm_init_rxdesc(rxq, i);
   7153 			if ((status & WRX_ST_EOP) == 0)
   7154 				rxq->rxq_discard = 1;
   7155 			if (rxq->rxq_head != NULL)
   7156 				m_freem(rxq->rxq_head);
   7157 			WM_RXCHAIN_RESET(rxq);
   7158 			DPRINTF(WM_DEBUG_RX,
   7159 			    ("%s: RX: Rx buffer allocation failed, "
   7160 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7161 			    rxq->rxq_discard ? " (discard)" : ""));
   7162 			continue;
   7163 		}
   7164 
   7165 		m->m_len = len;
   7166 		rxq->rxq_len += len;
   7167 		DPRINTF(WM_DEBUG_RX,
   7168 		    ("%s: RX: buffer at %p len %d\n",
   7169 		    device_xname(sc->sc_dev), m->m_data, len));
   7170 
   7171 		/* If this is not the end of the packet, keep looking. */
   7172 		if ((status & WRX_ST_EOP) == 0) {
   7173 			WM_RXCHAIN_LINK(rxq, m);
   7174 			DPRINTF(WM_DEBUG_RX,
   7175 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7176 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7177 			continue;
   7178 		}
   7179 
   7180 		/*
   7181 		 * Okay, we have the entire packet now.  The chip is
   7182 		 * configured to include the FCS except I350 and I21[01]
   7183 		 * (not all chips can be configured to strip it),
   7184 		 * so we need to trim it.
   7185 		 * May need to adjust length of previous mbuf in the
   7186 		 * chain if the current mbuf is too short.
   7187 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7188 		 * is always set in I350, so we don't trim it.
   7189 		 */
   7190 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7191 		    && (sc->sc_type != WM_T_I210)
   7192 		    && (sc->sc_type != WM_T_I211)) {
   7193 			if (m->m_len < ETHER_CRC_LEN) {
   7194 				rxq->rxq_tail->m_len
   7195 				    -= (ETHER_CRC_LEN - m->m_len);
   7196 				m->m_len = 0;
   7197 			} else
   7198 				m->m_len -= ETHER_CRC_LEN;
   7199 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7200 		} else
   7201 			len = rxq->rxq_len;
   7202 
   7203 		WM_RXCHAIN_LINK(rxq, m);
   7204 
   7205 		*rxq->rxq_tailp = NULL;
   7206 		m = rxq->rxq_head;
   7207 
   7208 		WM_RXCHAIN_RESET(rxq);
   7209 
   7210 		DPRINTF(WM_DEBUG_RX,
   7211 		    ("%s: RX: have entire packet, len -> %d\n",
   7212 		    device_xname(sc->sc_dev), len));
   7213 
   7214 		/* If an error occurred, update stats and drop the packet. */
   7215 		if (errors &
   7216 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7217 			if (errors & WRX_ER_SE)
   7218 				log(LOG_WARNING, "%s: symbol error\n",
   7219 				    device_xname(sc->sc_dev));
   7220 			else if (errors & WRX_ER_SEQ)
   7221 				log(LOG_WARNING, "%s: receive sequence error\n",
   7222 				    device_xname(sc->sc_dev));
   7223 			else if (errors & WRX_ER_CE)
   7224 				log(LOG_WARNING, "%s: CRC error\n",
   7225 				    device_xname(sc->sc_dev));
   7226 			m_freem(m);
   7227 			continue;
   7228 		}
   7229 
   7230 		/* No errors.  Receive the packet. */
   7231 		m->m_pkthdr.rcvif = ifp;
   7232 		m->m_pkthdr.len = len;
   7233 
   7234 		/*
   7235 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7236 		 * for us.  Associate the tag with the packet.
   7237 		 */
   7238 		/* XXXX should check for i350 and i354 */
   7239 		if ((status & WRX_ST_VP) != 0) {
   7240 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7241 		}
   7242 
   7243 		/* Set up checksum info for this packet. */
   7244 		if ((status & WRX_ST_IXSM) == 0) {
   7245 			if (status & WRX_ST_IPCS) {
   7246 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   7247 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7248 				if (errors & WRX_ER_IPE)
   7249 					m->m_pkthdr.csum_flags |=
   7250 					    M_CSUM_IPv4_BAD;
   7251 			}
   7252 			if (status & WRX_ST_TCPCS) {
   7253 				/*
   7254 				 * Note: we don't know if this was TCP or UDP,
   7255 				 * so we just set both bits, and expect the
   7256 				 * upper layers to deal.
   7257 				 */
   7258 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   7259 				m->m_pkthdr.csum_flags |=
   7260 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7261 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7262 				if (errors & WRX_ER_TCPE)
   7263 					m->m_pkthdr.csum_flags |=
   7264 					    M_CSUM_TCP_UDP_BAD;
   7265 			}
   7266 		}
   7267 
   7268 		ifp->if_ipackets++;
   7269 
   7270 		WM_RX_UNLOCK(rxq);
   7271 
   7272 		/* Pass this up to any BPF listeners. */
   7273 		bpf_mtap(ifp, m);
   7274 
   7275 		/* Pass it on. */
   7276 		if_percpuq_enqueue(sc->sc_ipq, m);
   7277 
   7278 		WM_RX_LOCK(rxq);
   7279 
   7280 		if (sc->sc_stopping)
   7281 			break;
   7282 	}
   7283 
   7284 	/* Update the receive pointer. */
   7285 	rxq->rxq_ptr = i;
   7286 	if (count != 0)
   7287 		rnd_add_uint32(&sc->rnd_source, count);
   7288 
   7289 	DPRINTF(WM_DEBUG_RX,
   7290 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7291 }
   7292 
   7293 /*
   7294  * wm_linkintr_gmii:
   7295  *
   7296  *	Helper; handle link interrupts for GMII.
   7297  */
   7298 static void
   7299 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7300 {
   7301 
   7302 	KASSERT(WM_CORE_LOCKED(sc));
   7303 
   7304 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7305 		__func__));
   7306 
   7307 	if (icr & ICR_LSC) {
   7308 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7309 
   7310 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7311 			wm_gig_downshift_workaround_ich8lan(sc);
   7312 
   7313 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7314 			device_xname(sc->sc_dev)));
   7315 		mii_pollstat(&sc->sc_mii);
   7316 		if (sc->sc_type == WM_T_82543) {
   7317 			int miistatus, active;
   7318 
   7319 			/*
   7320 			 * With 82543, we need to force speed and
   7321 			 * duplex on the MAC equal to what the PHY
   7322 			 * speed and duplex configuration is.
   7323 			 */
   7324 			miistatus = sc->sc_mii.mii_media_status;
   7325 
   7326 			if (miistatus & IFM_ACTIVE) {
   7327 				active = sc->sc_mii.mii_media_active;
   7328 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7329 				switch (IFM_SUBTYPE(active)) {
   7330 				case IFM_10_T:
   7331 					sc->sc_ctrl |= CTRL_SPEED_10;
   7332 					break;
   7333 				case IFM_100_TX:
   7334 					sc->sc_ctrl |= CTRL_SPEED_100;
   7335 					break;
   7336 				case IFM_1000_T:
   7337 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7338 					break;
   7339 				default:
   7340 					/*
   7341 					 * fiber?
   7342 					 * Shoud not enter here.
   7343 					 */
   7344 					printf("unknown media (%x)\n", active);
   7345 					break;
   7346 				}
   7347 				if (active & IFM_FDX)
   7348 					sc->sc_ctrl |= CTRL_FD;
   7349 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7350 			}
   7351 		} else if ((sc->sc_type == WM_T_ICH8)
   7352 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7353 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7354 		} else if (sc->sc_type == WM_T_PCH) {
   7355 			wm_k1_gig_workaround_hv(sc,
   7356 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7357 		}
   7358 
   7359 		if ((sc->sc_phytype == WMPHY_82578)
   7360 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7361 			== IFM_1000_T)) {
   7362 
   7363 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7364 				delay(200*1000); /* XXX too big */
   7365 
   7366 				/* Link stall fix for link up */
   7367 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7368 				    HV_MUX_DATA_CTRL,
   7369 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7370 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7371 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7372 				    HV_MUX_DATA_CTRL,
   7373 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7374 			}
   7375 		}
   7376 	} else if (icr & ICR_RXSEQ) {
   7377 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7378 			device_xname(sc->sc_dev)));
   7379 	}
   7380 }
   7381 
   7382 /*
   7383  * wm_linkintr_tbi:
   7384  *
   7385  *	Helper; handle link interrupts for TBI mode.
   7386  */
   7387 static void
   7388 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7389 {
   7390 	uint32_t status;
   7391 
   7392 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7393 		__func__));
   7394 
   7395 	status = CSR_READ(sc, WMREG_STATUS);
   7396 	if (icr & ICR_LSC) {
   7397 		if (status & STATUS_LU) {
   7398 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7399 			    device_xname(sc->sc_dev),
   7400 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7401 			/*
   7402 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7403 			 * so we should update sc->sc_ctrl
   7404 			 */
   7405 
   7406 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7407 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7408 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7409 			if (status & STATUS_FD)
   7410 				sc->sc_tctl |=
   7411 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7412 			else
   7413 				sc->sc_tctl |=
   7414 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7415 			if (sc->sc_ctrl & CTRL_TFCE)
   7416 				sc->sc_fcrtl |= FCRTL_XONE;
   7417 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7418 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7419 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7420 				      sc->sc_fcrtl);
   7421 			sc->sc_tbi_linkup = 1;
   7422 		} else {
   7423 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7424 			    device_xname(sc->sc_dev)));
   7425 			sc->sc_tbi_linkup = 0;
   7426 		}
   7427 		/* Update LED */
   7428 		wm_tbi_serdes_set_linkled(sc);
   7429 	} else if (icr & ICR_RXSEQ) {
   7430 		DPRINTF(WM_DEBUG_LINK,
   7431 		    ("%s: LINK: Receive sequence error\n",
   7432 		    device_xname(sc->sc_dev)));
   7433 	}
   7434 }
   7435 
   7436 /*
   7437  * wm_linkintr_serdes:
   7438  *
   7439  *	Helper; handle link interrupts for TBI mode.
   7440  */
   7441 static void
   7442 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7443 {
   7444 	struct mii_data *mii = &sc->sc_mii;
   7445 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7446 	uint32_t pcs_adv, pcs_lpab, reg;
   7447 
   7448 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7449 		__func__));
   7450 
   7451 	if (icr & ICR_LSC) {
   7452 		/* Check PCS */
   7453 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7454 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7455 			mii->mii_media_status |= IFM_ACTIVE;
   7456 			sc->sc_tbi_linkup = 1;
   7457 		} else {
   7458 			mii->mii_media_status |= IFM_NONE;
   7459 			sc->sc_tbi_linkup = 0;
   7460 			wm_tbi_serdes_set_linkled(sc);
   7461 			return;
   7462 		}
   7463 		mii->mii_media_active |= IFM_1000_SX;
   7464 		if ((reg & PCS_LSTS_FDX) != 0)
   7465 			mii->mii_media_active |= IFM_FDX;
   7466 		else
   7467 			mii->mii_media_active |= IFM_HDX;
   7468 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7469 			/* Check flow */
   7470 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7471 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7472 				DPRINTF(WM_DEBUG_LINK,
   7473 				    ("XXX LINKOK but not ACOMP\n"));
   7474 				return;
   7475 			}
   7476 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7477 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7478 			DPRINTF(WM_DEBUG_LINK,
   7479 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7480 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7481 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7482 				mii->mii_media_active |= IFM_FLOW
   7483 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7484 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7485 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7486 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7487 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7488 				mii->mii_media_active |= IFM_FLOW
   7489 				    | IFM_ETH_TXPAUSE;
   7490 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7491 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7492 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7493 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7494 				mii->mii_media_active |= IFM_FLOW
   7495 				    | IFM_ETH_RXPAUSE;
   7496 		}
   7497 		/* Update LED */
   7498 		wm_tbi_serdes_set_linkled(sc);
   7499 	} else {
   7500 		DPRINTF(WM_DEBUG_LINK,
   7501 		    ("%s: LINK: Receive sequence error\n",
   7502 		    device_xname(sc->sc_dev)));
   7503 	}
   7504 }
   7505 
   7506 /*
   7507  * wm_linkintr:
   7508  *
   7509  *	Helper; handle link interrupts.
   7510  */
   7511 static void
   7512 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7513 {
   7514 
   7515 	KASSERT(WM_CORE_LOCKED(sc));
   7516 
   7517 	if (sc->sc_flags & WM_F_HAS_MII)
   7518 		wm_linkintr_gmii(sc, icr);
   7519 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7520 	    && (sc->sc_type >= WM_T_82575))
   7521 		wm_linkintr_serdes(sc, icr);
   7522 	else
   7523 		wm_linkintr_tbi(sc, icr);
   7524 }
   7525 
   7526 /*
   7527  * wm_intr_legacy:
   7528  *
   7529  *	Interrupt service routine for INTx and MSI.
   7530  */
   7531 static int
   7532 wm_intr_legacy(void *arg)
   7533 {
   7534 	struct wm_softc *sc = arg;
   7535 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7536 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7537 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7538 	uint32_t icr, rndval = 0;
   7539 	int handled = 0;
   7540 
   7541 	DPRINTF(WM_DEBUG_TX,
   7542 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7543 	while (1 /* CONSTCOND */) {
   7544 		icr = CSR_READ(sc, WMREG_ICR);
   7545 		if ((icr & sc->sc_icr) == 0)
   7546 			break;
   7547 		if (rndval == 0)
   7548 			rndval = icr;
   7549 
   7550 		WM_RX_LOCK(rxq);
   7551 
   7552 		if (sc->sc_stopping) {
   7553 			WM_RX_UNLOCK(rxq);
   7554 			break;
   7555 		}
   7556 
   7557 		handled = 1;
   7558 
   7559 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7560 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7561 			DPRINTF(WM_DEBUG_RX,
   7562 			    ("%s: RX: got Rx intr 0x%08x\n",
   7563 			    device_xname(sc->sc_dev),
   7564 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7565 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7566 		}
   7567 #endif
   7568 		wm_rxeof(rxq);
   7569 
   7570 		WM_RX_UNLOCK(rxq);
   7571 		WM_TX_LOCK(txq);
   7572 
   7573 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7574 		if (icr & ICR_TXDW) {
   7575 			DPRINTF(WM_DEBUG_TX,
   7576 			    ("%s: TX: got TXDW interrupt\n",
   7577 			    device_xname(sc->sc_dev)));
   7578 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7579 		}
   7580 #endif
   7581 		wm_txeof(sc, txq);
   7582 
   7583 		WM_TX_UNLOCK(txq);
   7584 		WM_CORE_LOCK(sc);
   7585 
   7586 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7587 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7588 			wm_linkintr(sc, icr);
   7589 		}
   7590 
   7591 		WM_CORE_UNLOCK(sc);
   7592 
   7593 		if (icr & ICR_RXO) {
   7594 #if defined(WM_DEBUG)
   7595 			log(LOG_WARNING, "%s: Receive overrun\n",
   7596 			    device_xname(sc->sc_dev));
   7597 #endif /* defined(WM_DEBUG) */
   7598 		}
   7599 	}
   7600 
   7601 	rnd_add_uint32(&sc->rnd_source, rndval);
   7602 
   7603 	if (handled) {
   7604 		/* Try to get more packets going. */
   7605 		ifp->if_start(ifp);
   7606 	}
   7607 
   7608 	return handled;
   7609 }
   7610 
   7611 static int
   7612 wm_txrxintr_msix(void *arg)
   7613 {
   7614 	struct wm_queue *wmq = arg;
   7615 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7616 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7617 	struct wm_softc *sc = txq->txq_sc;
   7618 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7619 
   7620 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7621 
   7622 	DPRINTF(WM_DEBUG_TX,
   7623 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7624 
   7625 	if (sc->sc_type == WM_T_82574)
   7626 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7627 	else if (sc->sc_type == WM_T_82575)
   7628 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7629 	else
   7630 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7631 
   7632 	if (!sc->sc_stopping) {
   7633 		WM_TX_LOCK(txq);
   7634 
   7635 		WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7636 		wm_txeof(sc, txq);
   7637 
   7638 		/* Try to get more packets going. */
   7639 		if (pcq_peek(txq->txq_interq) != NULL)
   7640 			wm_nq_transmit_locked(ifp, txq);
   7641 		/*
   7642 		 * There are still some upper layer processing which call
   7643 		 * ifp->if_start(). e.g. ALTQ
   7644 		 */
   7645 		if (wmq->wmq_id == 0) {
   7646 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7647 				wm_nq_start_locked(ifp);
   7648 		}
   7649 		WM_TX_UNLOCK(txq);
   7650 	}
   7651 
   7652 	DPRINTF(WM_DEBUG_RX,
   7653 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7654 
   7655 	if (!sc->sc_stopping) {
   7656 		WM_RX_LOCK(rxq);
   7657 		WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7658 		wm_rxeof(rxq);
   7659 		WM_RX_UNLOCK(rxq);
   7660 	}
   7661 
   7662 	if (sc->sc_type == WM_T_82574)
   7663 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7664 	else if (sc->sc_type == WM_T_82575)
   7665 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7666 	else
   7667 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7668 
   7669 	return 1;
   7670 }
   7671 
   7672 /*
   7673  * wm_linkintr_msix:
   7674  *
   7675  *	Interrupt service routine for link status change for MSI-X.
   7676  */
   7677 static int
   7678 wm_linkintr_msix(void *arg)
   7679 {
   7680 	struct wm_softc *sc = arg;
   7681 	uint32_t reg;
   7682 
   7683 	DPRINTF(WM_DEBUG_LINK,
   7684 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7685 
   7686 	reg = CSR_READ(sc, WMREG_ICR);
   7687 	WM_CORE_LOCK(sc);
   7688 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7689 		goto out;
   7690 
   7691 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7692 	wm_linkintr(sc, ICR_LSC);
   7693 
   7694 out:
   7695 	WM_CORE_UNLOCK(sc);
   7696 
   7697 	if (sc->sc_type == WM_T_82574)
   7698 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7699 	else if (sc->sc_type == WM_T_82575)
   7700 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7701 	else
   7702 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7703 
   7704 	return 1;
   7705 }
   7706 
   7707 /*
   7708  * Media related.
   7709  * GMII, SGMII, TBI (and SERDES)
   7710  */
   7711 
   7712 /* Common */
   7713 
   7714 /*
   7715  * wm_tbi_serdes_set_linkled:
   7716  *
   7717  *	Update the link LED on TBI and SERDES devices.
   7718  */
   7719 static void
   7720 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7721 {
   7722 
   7723 	if (sc->sc_tbi_linkup)
   7724 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7725 	else
   7726 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7727 
   7728 	/* 82540 or newer devices are active low */
   7729 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7730 
   7731 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7732 }
   7733 
   7734 /* GMII related */
   7735 
   7736 /*
   7737  * wm_gmii_reset:
   7738  *
   7739  *	Reset the PHY.
   7740  */
   7741 static void
   7742 wm_gmii_reset(struct wm_softc *sc)
   7743 {
   7744 	uint32_t reg;
   7745 	int rv;
   7746 
   7747 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7748 		device_xname(sc->sc_dev), __func__));
   7749 	/* get phy semaphore */
   7750 	switch (sc->sc_type) {
   7751 	case WM_T_82571:
   7752 	case WM_T_82572:
   7753 	case WM_T_82573:
   7754 	case WM_T_82574:
   7755 	case WM_T_82583:
   7756 		 /* XXX should get sw semaphore, too */
   7757 		rv = wm_get_swsm_semaphore(sc);
   7758 		break;
   7759 	case WM_T_82575:
   7760 	case WM_T_82576:
   7761 	case WM_T_82580:
   7762 	case WM_T_I350:
   7763 	case WM_T_I354:
   7764 	case WM_T_I210:
   7765 	case WM_T_I211:
   7766 	case WM_T_80003:
   7767 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7768 		break;
   7769 	case WM_T_ICH8:
   7770 	case WM_T_ICH9:
   7771 	case WM_T_ICH10:
   7772 	case WM_T_PCH:
   7773 	case WM_T_PCH2:
   7774 	case WM_T_PCH_LPT:
   7775 	case WM_T_PCH_SPT:
   7776 		rv = wm_get_swfwhw_semaphore(sc);
   7777 		break;
   7778 	default:
   7779 		/* nothing to do*/
   7780 		rv = 0;
   7781 		break;
   7782 	}
   7783 	if (rv != 0) {
   7784 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7785 		    __func__);
   7786 		return;
   7787 	}
   7788 
   7789 	switch (sc->sc_type) {
   7790 	case WM_T_82542_2_0:
   7791 	case WM_T_82542_2_1:
   7792 		/* null */
   7793 		break;
   7794 	case WM_T_82543:
   7795 		/*
   7796 		 * With 82543, we need to force speed and duplex on the MAC
   7797 		 * equal to what the PHY speed and duplex configuration is.
   7798 		 * In addition, we need to perform a hardware reset on the PHY
   7799 		 * to take it out of reset.
   7800 		 */
   7801 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7802 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7803 
   7804 		/* The PHY reset pin is active-low. */
   7805 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7806 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7807 		    CTRL_EXT_SWDPIN(4));
   7808 		reg |= CTRL_EXT_SWDPIO(4);
   7809 
   7810 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7811 		CSR_WRITE_FLUSH(sc);
   7812 		delay(10*1000);
   7813 
   7814 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7815 		CSR_WRITE_FLUSH(sc);
   7816 		delay(150);
   7817 #if 0
   7818 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7819 #endif
   7820 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7821 		break;
   7822 	case WM_T_82544:	/* reset 10000us */
   7823 	case WM_T_82540:
   7824 	case WM_T_82545:
   7825 	case WM_T_82545_3:
   7826 	case WM_T_82546:
   7827 	case WM_T_82546_3:
   7828 	case WM_T_82541:
   7829 	case WM_T_82541_2:
   7830 	case WM_T_82547:
   7831 	case WM_T_82547_2:
   7832 	case WM_T_82571:	/* reset 100us */
   7833 	case WM_T_82572:
   7834 	case WM_T_82573:
   7835 	case WM_T_82574:
   7836 	case WM_T_82575:
   7837 	case WM_T_82576:
   7838 	case WM_T_82580:
   7839 	case WM_T_I350:
   7840 	case WM_T_I354:
   7841 	case WM_T_I210:
   7842 	case WM_T_I211:
   7843 	case WM_T_82583:
   7844 	case WM_T_80003:
   7845 		/* generic reset */
   7846 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7847 		CSR_WRITE_FLUSH(sc);
   7848 		delay(20000);
   7849 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7850 		CSR_WRITE_FLUSH(sc);
   7851 		delay(20000);
   7852 
   7853 		if ((sc->sc_type == WM_T_82541)
   7854 		    || (sc->sc_type == WM_T_82541_2)
   7855 		    || (sc->sc_type == WM_T_82547)
   7856 		    || (sc->sc_type == WM_T_82547_2)) {
   7857 			/* workaround for igp are done in igp_reset() */
   7858 			/* XXX add code to set LED after phy reset */
   7859 		}
   7860 		break;
   7861 	case WM_T_ICH8:
   7862 	case WM_T_ICH9:
   7863 	case WM_T_ICH10:
   7864 	case WM_T_PCH:
   7865 	case WM_T_PCH2:
   7866 	case WM_T_PCH_LPT:
   7867 	case WM_T_PCH_SPT:
   7868 		/* generic reset */
   7869 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7870 		CSR_WRITE_FLUSH(sc);
   7871 		delay(100);
   7872 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7873 		CSR_WRITE_FLUSH(sc);
   7874 		delay(150);
   7875 		break;
   7876 	default:
   7877 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7878 		    __func__);
   7879 		break;
   7880 	}
   7881 
   7882 	/* release PHY semaphore */
   7883 	switch (sc->sc_type) {
   7884 	case WM_T_82571:
   7885 	case WM_T_82572:
   7886 	case WM_T_82573:
   7887 	case WM_T_82574:
   7888 	case WM_T_82583:
   7889 		 /* XXX should put sw semaphore, too */
   7890 		wm_put_swsm_semaphore(sc);
   7891 		break;
   7892 	case WM_T_82575:
   7893 	case WM_T_82576:
   7894 	case WM_T_82580:
   7895 	case WM_T_I350:
   7896 	case WM_T_I354:
   7897 	case WM_T_I210:
   7898 	case WM_T_I211:
   7899 	case WM_T_80003:
   7900 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7901 		break;
   7902 	case WM_T_ICH8:
   7903 	case WM_T_ICH9:
   7904 	case WM_T_ICH10:
   7905 	case WM_T_PCH:
   7906 	case WM_T_PCH2:
   7907 	case WM_T_PCH_LPT:
   7908 	case WM_T_PCH_SPT:
   7909 		wm_put_swfwhw_semaphore(sc);
   7910 		break;
   7911 	default:
   7912 		/* nothing to do */
   7913 		rv = 0;
   7914 		break;
   7915 	}
   7916 
   7917 	/* get_cfg_done */
   7918 	wm_get_cfg_done(sc);
   7919 
   7920 	/* extra setup */
   7921 	switch (sc->sc_type) {
   7922 	case WM_T_82542_2_0:
   7923 	case WM_T_82542_2_1:
   7924 	case WM_T_82543:
   7925 	case WM_T_82544:
   7926 	case WM_T_82540:
   7927 	case WM_T_82545:
   7928 	case WM_T_82545_3:
   7929 	case WM_T_82546:
   7930 	case WM_T_82546_3:
   7931 	case WM_T_82541_2:
   7932 	case WM_T_82547_2:
   7933 	case WM_T_82571:
   7934 	case WM_T_82572:
   7935 	case WM_T_82573:
   7936 	case WM_T_82575:
   7937 	case WM_T_82576:
   7938 	case WM_T_82580:
   7939 	case WM_T_I350:
   7940 	case WM_T_I354:
   7941 	case WM_T_I210:
   7942 	case WM_T_I211:
   7943 	case WM_T_80003:
   7944 		/* null */
   7945 		break;
   7946 	case WM_T_82574:
   7947 	case WM_T_82583:
   7948 		wm_lplu_d0_disable(sc);
   7949 		break;
   7950 	case WM_T_82541:
   7951 	case WM_T_82547:
   7952 		/* XXX Configure actively LED after PHY reset */
   7953 		break;
   7954 	case WM_T_ICH8:
   7955 	case WM_T_ICH9:
   7956 	case WM_T_ICH10:
   7957 	case WM_T_PCH:
   7958 	case WM_T_PCH2:
   7959 	case WM_T_PCH_LPT:
   7960 	case WM_T_PCH_SPT:
   7961 		/* Allow time for h/w to get to a quiescent state afer reset */
   7962 		delay(10*1000);
   7963 
   7964 		if (sc->sc_type == WM_T_PCH)
   7965 			wm_hv_phy_workaround_ich8lan(sc);
   7966 
   7967 		if (sc->sc_type == WM_T_PCH2)
   7968 			wm_lv_phy_workaround_ich8lan(sc);
   7969 
   7970 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7971 			/*
   7972 			 * dummy read to clear the phy wakeup bit after lcd
   7973 			 * reset
   7974 			 */
   7975 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7976 		}
   7977 
   7978 		/*
   7979 		 * XXX Configure the LCD with th extended configuration region
   7980 		 * in NVM
   7981 		 */
   7982 
   7983 		/* Disable D0 LPLU. */
   7984 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   7985 			wm_lplu_d0_disable_pch(sc);
   7986 		else
   7987 			wm_lplu_d0_disable(sc);	/* ICH* */
   7988 		break;
   7989 	default:
   7990 		panic("%s: unknown type\n", __func__);
   7991 		break;
   7992 	}
   7993 }
   7994 
   7995 /*
   7996  * wm_get_phy_id_82575:
   7997  *
   7998  * Return PHY ID. Return -1 if it failed.
   7999  */
   8000 static int
   8001 wm_get_phy_id_82575(struct wm_softc *sc)
   8002 {
   8003 	uint32_t reg;
   8004 	int phyid = -1;
   8005 
   8006 	/* XXX */
   8007 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8008 		return -1;
   8009 
   8010 	if (wm_sgmii_uses_mdio(sc)) {
   8011 		switch (sc->sc_type) {
   8012 		case WM_T_82575:
   8013 		case WM_T_82576:
   8014 			reg = CSR_READ(sc, WMREG_MDIC);
   8015 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8016 			break;
   8017 		case WM_T_82580:
   8018 		case WM_T_I350:
   8019 		case WM_T_I354:
   8020 		case WM_T_I210:
   8021 		case WM_T_I211:
   8022 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8023 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8024 			break;
   8025 		default:
   8026 			return -1;
   8027 		}
   8028 	}
   8029 
   8030 	return phyid;
   8031 }
   8032 
   8033 
   8034 /*
   8035  * wm_gmii_mediainit:
   8036  *
   8037  *	Initialize media for use on 1000BASE-T devices.
   8038  */
   8039 static void
   8040 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8041 {
   8042 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8043 	struct mii_data *mii = &sc->sc_mii;
   8044 	uint32_t reg;
   8045 
   8046 	/* We have GMII. */
   8047 	sc->sc_flags |= WM_F_HAS_MII;
   8048 
   8049 	if (sc->sc_type == WM_T_80003)
   8050 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8051 	else
   8052 		sc->sc_tipg = TIPG_1000T_DFLT;
   8053 
   8054 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8055 	if ((sc->sc_type == WM_T_82580)
   8056 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8057 	    || (sc->sc_type == WM_T_I211)) {
   8058 		reg = CSR_READ(sc, WMREG_PHPM);
   8059 		reg &= ~PHPM_GO_LINK_D;
   8060 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8061 	}
   8062 
   8063 	/*
   8064 	 * Let the chip set speed/duplex on its own based on
   8065 	 * signals from the PHY.
   8066 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8067 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8068 	 */
   8069 	sc->sc_ctrl |= CTRL_SLU;
   8070 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8071 
   8072 	/* Initialize our media structures and probe the GMII. */
   8073 	mii->mii_ifp = ifp;
   8074 
   8075 	/*
   8076 	 * Determine the PHY access method.
   8077 	 *
   8078 	 *  For SGMII, use SGMII specific method.
   8079 	 *
   8080 	 *  For some devices, we can determine the PHY access method
   8081 	 * from sc_type.
   8082 	 *
   8083 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8084 	 * access  method by sc_type, so use the PCI product ID for some
   8085 	 * devices.
   8086 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8087 	 * can't detect, then use bm's method.
   8088 	 */
   8089 	switch (prodid) {
   8090 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8091 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8092 		/* 82577 */
   8093 		sc->sc_phytype = WMPHY_82577;
   8094 		break;
   8095 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8096 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8097 		/* 82578 */
   8098 		sc->sc_phytype = WMPHY_82578;
   8099 		break;
   8100 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8101 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8102 		/* 82579 */
   8103 		sc->sc_phytype = WMPHY_82579;
   8104 		break;
   8105 	case PCI_PRODUCT_INTEL_82801I_BM:
   8106 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8107 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8108 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8109 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8110 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8111 		/* 82567 */
   8112 		sc->sc_phytype = WMPHY_BM;
   8113 		mii->mii_readreg = wm_gmii_bm_readreg;
   8114 		mii->mii_writereg = wm_gmii_bm_writereg;
   8115 		break;
   8116 	default:
   8117 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8118 		    && !wm_sgmii_uses_mdio(sc)){
   8119 			/* SGMII */
   8120 			mii->mii_readreg = wm_sgmii_readreg;
   8121 			mii->mii_writereg = wm_sgmii_writereg;
   8122 		} else if (sc->sc_type >= WM_T_80003) {
   8123 			/* 80003 */
   8124 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8125 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8126 		} else if (sc->sc_type >= WM_T_I210) {
   8127 			/* I210 and I211 */
   8128 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8129 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8130 		} else if (sc->sc_type >= WM_T_82580) {
   8131 			/* 82580, I350 and I354 */
   8132 			sc->sc_phytype = WMPHY_82580;
   8133 			mii->mii_readreg = wm_gmii_82580_readreg;
   8134 			mii->mii_writereg = wm_gmii_82580_writereg;
   8135 		} else if (sc->sc_type >= WM_T_82544) {
   8136 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8137 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8138 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8139 		} else {
   8140 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8141 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8142 		}
   8143 		break;
   8144 	}
   8145 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8146 		/* All PCH* use _hv_ */
   8147 		mii->mii_readreg = wm_gmii_hv_readreg;
   8148 		mii->mii_writereg = wm_gmii_hv_writereg;
   8149 	}
   8150 	mii->mii_statchg = wm_gmii_statchg;
   8151 
   8152 	wm_gmii_reset(sc);
   8153 
   8154 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8155 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8156 	    wm_gmii_mediastatus);
   8157 
   8158 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8159 	    || (sc->sc_type == WM_T_82580)
   8160 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8161 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8162 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8163 			/* Attach only one port */
   8164 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8165 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8166 		} else {
   8167 			int i, id;
   8168 			uint32_t ctrl_ext;
   8169 
   8170 			id = wm_get_phy_id_82575(sc);
   8171 			if (id != -1) {
   8172 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8173 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8174 			}
   8175 			if ((id == -1)
   8176 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8177 				/* Power on sgmii phy if it is disabled */
   8178 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8179 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8180 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8181 				CSR_WRITE_FLUSH(sc);
   8182 				delay(300*1000); /* XXX too long */
   8183 
   8184 				/* from 1 to 8 */
   8185 				for (i = 1; i < 8; i++)
   8186 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8187 					    0xffffffff, i, MII_OFFSET_ANY,
   8188 					    MIIF_DOPAUSE);
   8189 
   8190 				/* restore previous sfp cage power state */
   8191 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8192 			}
   8193 		}
   8194 	} else {
   8195 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8196 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8197 	}
   8198 
   8199 	/*
   8200 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8201 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8202 	 */
   8203 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8204 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8205 		wm_set_mdio_slow_mode_hv(sc);
   8206 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8207 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8208 	}
   8209 
   8210 	/*
   8211 	 * (For ICH8 variants)
   8212 	 * If PHY detection failed, use BM's r/w function and retry.
   8213 	 */
   8214 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8215 		/* if failed, retry with *_bm_* */
   8216 		mii->mii_readreg = wm_gmii_bm_readreg;
   8217 		mii->mii_writereg = wm_gmii_bm_writereg;
   8218 
   8219 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8220 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8221 	}
   8222 
   8223 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8224 		/* Any PHY wasn't find */
   8225 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8226 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8227 		sc->sc_phytype = WMPHY_NONE;
   8228 	} else {
   8229 		/*
   8230 		 * PHY Found!
   8231 		 * Check PHY type.
   8232 		 */
   8233 		uint32_t model;
   8234 		struct mii_softc *child;
   8235 
   8236 		child = LIST_FIRST(&mii->mii_phys);
   8237 		model = child->mii_mpd_model;
   8238 		if (model == MII_MODEL_yyINTEL_I82566)
   8239 			sc->sc_phytype = WMPHY_IGP_3;
   8240 
   8241 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8242 	}
   8243 }
   8244 
   8245 /*
   8246  * wm_gmii_mediachange:	[ifmedia interface function]
   8247  *
   8248  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8249  */
   8250 static int
   8251 wm_gmii_mediachange(struct ifnet *ifp)
   8252 {
   8253 	struct wm_softc *sc = ifp->if_softc;
   8254 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8255 	int rc;
   8256 
   8257 	if ((ifp->if_flags & IFF_UP) == 0)
   8258 		return 0;
   8259 
   8260 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8261 	sc->sc_ctrl |= CTRL_SLU;
   8262 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8263 	    || (sc->sc_type > WM_T_82543)) {
   8264 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8265 	} else {
   8266 		sc->sc_ctrl &= ~CTRL_ASDE;
   8267 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8268 		if (ife->ifm_media & IFM_FDX)
   8269 			sc->sc_ctrl |= CTRL_FD;
   8270 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8271 		case IFM_10_T:
   8272 			sc->sc_ctrl |= CTRL_SPEED_10;
   8273 			break;
   8274 		case IFM_100_TX:
   8275 			sc->sc_ctrl |= CTRL_SPEED_100;
   8276 			break;
   8277 		case IFM_1000_T:
   8278 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8279 			break;
   8280 		default:
   8281 			panic("wm_gmii_mediachange: bad media 0x%x",
   8282 			    ife->ifm_media);
   8283 		}
   8284 	}
   8285 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8286 	if (sc->sc_type <= WM_T_82543)
   8287 		wm_gmii_reset(sc);
   8288 
   8289 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8290 		return 0;
   8291 	return rc;
   8292 }
   8293 
   8294 /*
   8295  * wm_gmii_mediastatus:	[ifmedia interface function]
   8296  *
   8297  *	Get the current interface media status on a 1000BASE-T device.
   8298  */
   8299 static void
   8300 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8301 {
   8302 	struct wm_softc *sc = ifp->if_softc;
   8303 
   8304 	ether_mediastatus(ifp, ifmr);
   8305 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8306 	    | sc->sc_flowflags;
   8307 }
   8308 
   8309 #define	MDI_IO		CTRL_SWDPIN(2)
   8310 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8311 #define	MDI_CLK		CTRL_SWDPIN(3)
   8312 
   8313 static void
   8314 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8315 {
   8316 	uint32_t i, v;
   8317 
   8318 	v = CSR_READ(sc, WMREG_CTRL);
   8319 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8320 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8321 
   8322 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8323 		if (data & i)
   8324 			v |= MDI_IO;
   8325 		else
   8326 			v &= ~MDI_IO;
   8327 		CSR_WRITE(sc, WMREG_CTRL, v);
   8328 		CSR_WRITE_FLUSH(sc);
   8329 		delay(10);
   8330 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8331 		CSR_WRITE_FLUSH(sc);
   8332 		delay(10);
   8333 		CSR_WRITE(sc, WMREG_CTRL, v);
   8334 		CSR_WRITE_FLUSH(sc);
   8335 		delay(10);
   8336 	}
   8337 }
   8338 
   8339 static uint32_t
   8340 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8341 {
   8342 	uint32_t v, i, data = 0;
   8343 
   8344 	v = CSR_READ(sc, WMREG_CTRL);
   8345 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8346 	v |= CTRL_SWDPIO(3);
   8347 
   8348 	CSR_WRITE(sc, WMREG_CTRL, v);
   8349 	CSR_WRITE_FLUSH(sc);
   8350 	delay(10);
   8351 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8352 	CSR_WRITE_FLUSH(sc);
   8353 	delay(10);
   8354 	CSR_WRITE(sc, WMREG_CTRL, v);
   8355 	CSR_WRITE_FLUSH(sc);
   8356 	delay(10);
   8357 
   8358 	for (i = 0; i < 16; i++) {
   8359 		data <<= 1;
   8360 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8361 		CSR_WRITE_FLUSH(sc);
   8362 		delay(10);
   8363 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8364 			data |= 1;
   8365 		CSR_WRITE(sc, WMREG_CTRL, v);
   8366 		CSR_WRITE_FLUSH(sc);
   8367 		delay(10);
   8368 	}
   8369 
   8370 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8371 	CSR_WRITE_FLUSH(sc);
   8372 	delay(10);
   8373 	CSR_WRITE(sc, WMREG_CTRL, v);
   8374 	CSR_WRITE_FLUSH(sc);
   8375 	delay(10);
   8376 
   8377 	return data;
   8378 }
   8379 
   8380 #undef MDI_IO
   8381 #undef MDI_DIR
   8382 #undef MDI_CLK
   8383 
   8384 /*
   8385  * wm_gmii_i82543_readreg:	[mii interface function]
   8386  *
   8387  *	Read a PHY register on the GMII (i82543 version).
   8388  */
   8389 static int
   8390 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8391 {
   8392 	struct wm_softc *sc = device_private(self);
   8393 	int rv;
   8394 
   8395 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8396 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8397 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8398 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8399 
   8400 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8401 	    device_xname(sc->sc_dev), phy, reg, rv));
   8402 
   8403 	return rv;
   8404 }
   8405 
   8406 /*
   8407  * wm_gmii_i82543_writereg:	[mii interface function]
   8408  *
   8409  *	Write a PHY register on the GMII (i82543 version).
   8410  */
   8411 static void
   8412 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8413 {
   8414 	struct wm_softc *sc = device_private(self);
   8415 
   8416 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8417 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8418 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8419 	    (MII_COMMAND_START << 30), 32);
   8420 }
   8421 
   8422 /*
   8423  * wm_gmii_i82544_readreg:	[mii interface function]
   8424  *
   8425  *	Read a PHY register on the GMII.
   8426  */
   8427 static int
   8428 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8429 {
   8430 	struct wm_softc *sc = device_private(self);
   8431 	uint32_t mdic = 0;
   8432 	int i, rv;
   8433 
   8434 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8435 	    MDIC_REGADD(reg));
   8436 
   8437 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8438 		mdic = CSR_READ(sc, WMREG_MDIC);
   8439 		if (mdic & MDIC_READY)
   8440 			break;
   8441 		delay(50);
   8442 	}
   8443 
   8444 	if ((mdic & MDIC_READY) == 0) {
   8445 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8446 		    device_xname(sc->sc_dev), phy, reg);
   8447 		rv = 0;
   8448 	} else if (mdic & MDIC_E) {
   8449 #if 0 /* This is normal if no PHY is present. */
   8450 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8451 		    device_xname(sc->sc_dev), phy, reg);
   8452 #endif
   8453 		rv = 0;
   8454 	} else {
   8455 		rv = MDIC_DATA(mdic);
   8456 		if (rv == 0xffff)
   8457 			rv = 0;
   8458 	}
   8459 
   8460 	return rv;
   8461 }
   8462 
   8463 /*
   8464  * wm_gmii_i82544_writereg:	[mii interface function]
   8465  *
   8466  *	Write a PHY register on the GMII.
   8467  */
   8468 static void
   8469 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8470 {
   8471 	struct wm_softc *sc = device_private(self);
   8472 	uint32_t mdic = 0;
   8473 	int i;
   8474 
   8475 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8476 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8477 
   8478 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8479 		mdic = CSR_READ(sc, WMREG_MDIC);
   8480 		if (mdic & MDIC_READY)
   8481 			break;
   8482 		delay(50);
   8483 	}
   8484 
   8485 	if ((mdic & MDIC_READY) == 0)
   8486 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8487 		    device_xname(sc->sc_dev), phy, reg);
   8488 	else if (mdic & MDIC_E)
   8489 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8490 		    device_xname(sc->sc_dev), phy, reg);
   8491 }
   8492 
   8493 /*
   8494  * wm_gmii_i80003_readreg:	[mii interface function]
   8495  *
   8496  *	Read a PHY register on the kumeran
   8497  * This could be handled by the PHY layer if we didn't have to lock the
   8498  * ressource ...
   8499  */
   8500 static int
   8501 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8502 {
   8503 	struct wm_softc *sc = device_private(self);
   8504 	int sem;
   8505 	int rv;
   8506 
   8507 	if (phy != 1) /* only one PHY on kumeran bus */
   8508 		return 0;
   8509 
   8510 	sem = swfwphysem[sc->sc_funcid];
   8511 	if (wm_get_swfw_semaphore(sc, sem)) {
   8512 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8513 		    __func__);
   8514 		return 0;
   8515 	}
   8516 
   8517 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8518 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8519 		    reg >> GG82563_PAGE_SHIFT);
   8520 	} else {
   8521 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8522 		    reg >> GG82563_PAGE_SHIFT);
   8523 	}
   8524 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8525 	delay(200);
   8526 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8527 	delay(200);
   8528 
   8529 	wm_put_swfw_semaphore(sc, sem);
   8530 	return rv;
   8531 }
   8532 
   8533 /*
   8534  * wm_gmii_i80003_writereg:	[mii interface function]
   8535  *
   8536  *	Write a PHY register on the kumeran.
   8537  * This could be handled by the PHY layer if we didn't have to lock the
   8538  * ressource ...
   8539  */
   8540 static void
   8541 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8542 {
   8543 	struct wm_softc *sc = device_private(self);
   8544 	int sem;
   8545 
   8546 	if (phy != 1) /* only one PHY on kumeran bus */
   8547 		return;
   8548 
   8549 	sem = swfwphysem[sc->sc_funcid];
   8550 	if (wm_get_swfw_semaphore(sc, sem)) {
   8551 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8552 		    __func__);
   8553 		return;
   8554 	}
   8555 
   8556 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8557 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8558 		    reg >> GG82563_PAGE_SHIFT);
   8559 	} else {
   8560 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8561 		    reg >> GG82563_PAGE_SHIFT);
   8562 	}
   8563 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8564 	delay(200);
   8565 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8566 	delay(200);
   8567 
   8568 	wm_put_swfw_semaphore(sc, sem);
   8569 }
   8570 
   8571 /*
   8572  * wm_gmii_bm_readreg:	[mii interface function]
   8573  *
   8574  *	Read a PHY register on the kumeran
   8575  * This could be handled by the PHY layer if we didn't have to lock the
   8576  * ressource ...
   8577  */
   8578 static int
   8579 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8580 {
   8581 	struct wm_softc *sc = device_private(self);
   8582 	int sem;
   8583 	int rv;
   8584 
   8585 	sem = swfwphysem[sc->sc_funcid];
   8586 	if (wm_get_swfw_semaphore(sc, sem)) {
   8587 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8588 		    __func__);
   8589 		return 0;
   8590 	}
   8591 
   8592 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8593 		if (phy == 1)
   8594 			wm_gmii_i82544_writereg(self, phy,
   8595 			    MII_IGPHY_PAGE_SELECT, reg);
   8596 		else
   8597 			wm_gmii_i82544_writereg(self, phy,
   8598 			    GG82563_PHY_PAGE_SELECT,
   8599 			    reg >> GG82563_PAGE_SHIFT);
   8600 	}
   8601 
   8602 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8603 	wm_put_swfw_semaphore(sc, sem);
   8604 	return rv;
   8605 }
   8606 
   8607 /*
   8608  * wm_gmii_bm_writereg:	[mii interface function]
   8609  *
   8610  *	Write a PHY register on the kumeran.
   8611  * This could be handled by the PHY layer if we didn't have to lock the
   8612  * ressource ...
   8613  */
   8614 static void
   8615 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8616 {
   8617 	struct wm_softc *sc = device_private(self);
   8618 	int sem;
   8619 
   8620 	sem = swfwphysem[sc->sc_funcid];
   8621 	if (wm_get_swfw_semaphore(sc, sem)) {
   8622 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8623 		    __func__);
   8624 		return;
   8625 	}
   8626 
   8627 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8628 		if (phy == 1)
   8629 			wm_gmii_i82544_writereg(self, phy,
   8630 			    MII_IGPHY_PAGE_SELECT, reg);
   8631 		else
   8632 			wm_gmii_i82544_writereg(self, phy,
   8633 			    GG82563_PHY_PAGE_SELECT,
   8634 			    reg >> GG82563_PAGE_SHIFT);
   8635 	}
   8636 
   8637 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8638 	wm_put_swfw_semaphore(sc, sem);
   8639 }
   8640 
   8641 static void
   8642 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8643 {
   8644 	struct wm_softc *sc = device_private(self);
   8645 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8646 	uint16_t wuce;
   8647 
   8648 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8649 	if (sc->sc_type == WM_T_PCH) {
   8650 		/* XXX e1000 driver do nothing... why? */
   8651 	}
   8652 
   8653 	/* Set page 769 */
   8654 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8655 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8656 
   8657 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8658 
   8659 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8660 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8661 	    wuce | BM_WUC_ENABLE_BIT);
   8662 
   8663 	/* Select page 800 */
   8664 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8665 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8666 
   8667 	/* Write page 800 */
   8668 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8669 
   8670 	if (rd)
   8671 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8672 	else
   8673 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8674 
   8675 	/* Set page 769 */
   8676 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8677 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8678 
   8679 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8680 }
   8681 
   8682 /*
   8683  * wm_gmii_hv_readreg:	[mii interface function]
   8684  *
   8685  *	Read a PHY register on the kumeran
   8686  * This could be handled by the PHY layer if we didn't have to lock the
   8687  * ressource ...
   8688  */
   8689 static int
   8690 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8691 {
   8692 	struct wm_softc *sc = device_private(self);
   8693 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8694 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8695 	uint16_t val;
   8696 	int rv;
   8697 
   8698 	if (wm_get_swfwhw_semaphore(sc)) {
   8699 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8700 		    __func__);
   8701 		return 0;
   8702 	}
   8703 
   8704 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8705 	if (sc->sc_phytype == WMPHY_82577) {
   8706 		/* XXX must write */
   8707 	}
   8708 
   8709 	/* Page 800 works differently than the rest so it has its own func */
   8710 	if (page == BM_WUC_PAGE) {
   8711 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8712 		return val;
   8713 	}
   8714 
   8715 	/*
   8716 	 * Lower than page 768 works differently than the rest so it has its
   8717 	 * own func
   8718 	 */
   8719 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8720 		printf("gmii_hv_readreg!!!\n");
   8721 		return 0;
   8722 	}
   8723 
   8724 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8725 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8726 		    page << BME1000_PAGE_SHIFT);
   8727 	}
   8728 
   8729 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8730 	wm_put_swfwhw_semaphore(sc);
   8731 	return rv;
   8732 }
   8733 
   8734 /*
   8735  * wm_gmii_hv_writereg:	[mii interface function]
   8736  *
   8737  *	Write a PHY register on the kumeran.
   8738  * This could be handled by the PHY layer if we didn't have to lock the
   8739  * ressource ...
   8740  */
   8741 static void
   8742 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8743 {
   8744 	struct wm_softc *sc = device_private(self);
   8745 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8746 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8747 
   8748 	if (wm_get_swfwhw_semaphore(sc)) {
   8749 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8750 		    __func__);
   8751 		return;
   8752 	}
   8753 
   8754 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8755 
   8756 	/* Page 800 works differently than the rest so it has its own func */
   8757 	if (page == BM_WUC_PAGE) {
   8758 		uint16_t tmp;
   8759 
   8760 		tmp = val;
   8761 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8762 		return;
   8763 	}
   8764 
   8765 	/*
   8766 	 * Lower than page 768 works differently than the rest so it has its
   8767 	 * own func
   8768 	 */
   8769 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8770 		printf("gmii_hv_writereg!!!\n");
   8771 		return;
   8772 	}
   8773 
   8774 	/*
   8775 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8776 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8777 	 */
   8778 
   8779 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8780 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8781 		    page << BME1000_PAGE_SHIFT);
   8782 	}
   8783 
   8784 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8785 	wm_put_swfwhw_semaphore(sc);
   8786 }
   8787 
   8788 /*
   8789  * wm_gmii_82580_readreg:	[mii interface function]
   8790  *
   8791  *	Read a PHY register on the 82580 and I350.
   8792  * This could be handled by the PHY layer if we didn't have to lock the
   8793  * ressource ...
   8794  */
   8795 static int
   8796 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8797 {
   8798 	struct wm_softc *sc = device_private(self);
   8799 	int sem;
   8800 	int rv;
   8801 
   8802 	sem = swfwphysem[sc->sc_funcid];
   8803 	if (wm_get_swfw_semaphore(sc, sem)) {
   8804 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8805 		    __func__);
   8806 		return 0;
   8807 	}
   8808 
   8809 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8810 
   8811 	wm_put_swfw_semaphore(sc, sem);
   8812 	return rv;
   8813 }
   8814 
   8815 /*
   8816  * wm_gmii_82580_writereg:	[mii interface function]
   8817  *
   8818  *	Write a PHY register on the 82580 and I350.
   8819  * This could be handled by the PHY layer if we didn't have to lock the
   8820  * ressource ...
   8821  */
   8822 static void
   8823 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8824 {
   8825 	struct wm_softc *sc = device_private(self);
   8826 	int sem;
   8827 
   8828 	sem = swfwphysem[sc->sc_funcid];
   8829 	if (wm_get_swfw_semaphore(sc, sem)) {
   8830 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8831 		    __func__);
   8832 		return;
   8833 	}
   8834 
   8835 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8836 
   8837 	wm_put_swfw_semaphore(sc, sem);
   8838 }
   8839 
   8840 /*
   8841  * wm_gmii_gs40g_readreg:	[mii interface function]
   8842  *
   8843  *	Read a PHY register on the I2100 and I211.
   8844  * This could be handled by the PHY layer if we didn't have to lock the
   8845  * ressource ...
   8846  */
   8847 static int
   8848 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8849 {
   8850 	struct wm_softc *sc = device_private(self);
   8851 	int sem;
   8852 	int page, offset;
   8853 	int rv;
   8854 
   8855 	/* Acquire semaphore */
   8856 	sem = swfwphysem[sc->sc_funcid];
   8857 	if (wm_get_swfw_semaphore(sc, sem)) {
   8858 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8859 		    __func__);
   8860 		return 0;
   8861 	}
   8862 
   8863 	/* Page select */
   8864 	page = reg >> GS40G_PAGE_SHIFT;
   8865 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8866 
   8867 	/* Read reg */
   8868 	offset = reg & GS40G_OFFSET_MASK;
   8869 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8870 
   8871 	wm_put_swfw_semaphore(sc, sem);
   8872 	return rv;
   8873 }
   8874 
   8875 /*
   8876  * wm_gmii_gs40g_writereg:	[mii interface function]
   8877  *
   8878  *	Write a PHY register on the I210 and I211.
   8879  * This could be handled by the PHY layer if we didn't have to lock the
   8880  * ressource ...
   8881  */
   8882 static void
   8883 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8884 {
   8885 	struct wm_softc *sc = device_private(self);
   8886 	int sem;
   8887 	int page, offset;
   8888 
   8889 	/* Acquire semaphore */
   8890 	sem = swfwphysem[sc->sc_funcid];
   8891 	if (wm_get_swfw_semaphore(sc, sem)) {
   8892 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8893 		    __func__);
   8894 		return;
   8895 	}
   8896 
   8897 	/* Page select */
   8898 	page = reg >> GS40G_PAGE_SHIFT;
   8899 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8900 
   8901 	/* Write reg */
   8902 	offset = reg & GS40G_OFFSET_MASK;
   8903 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8904 
   8905 	/* Release semaphore */
   8906 	wm_put_swfw_semaphore(sc, sem);
   8907 }
   8908 
   8909 /*
   8910  * wm_gmii_statchg:	[mii interface function]
   8911  *
   8912  *	Callback from MII layer when media changes.
   8913  */
   8914 static void
   8915 wm_gmii_statchg(struct ifnet *ifp)
   8916 {
   8917 	struct wm_softc *sc = ifp->if_softc;
   8918 	struct mii_data *mii = &sc->sc_mii;
   8919 
   8920 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8921 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8922 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8923 
   8924 	/*
   8925 	 * Get flow control negotiation result.
   8926 	 */
   8927 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8928 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8929 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8930 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8931 	}
   8932 
   8933 	if (sc->sc_flowflags & IFM_FLOW) {
   8934 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8935 			sc->sc_ctrl |= CTRL_TFCE;
   8936 			sc->sc_fcrtl |= FCRTL_XONE;
   8937 		}
   8938 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8939 			sc->sc_ctrl |= CTRL_RFCE;
   8940 	}
   8941 
   8942 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8943 		DPRINTF(WM_DEBUG_LINK,
   8944 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8945 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8946 	} else {
   8947 		DPRINTF(WM_DEBUG_LINK,
   8948 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8949 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8950 	}
   8951 
   8952 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8953 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8954 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8955 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8956 	if (sc->sc_type == WM_T_80003) {
   8957 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8958 		case IFM_1000_T:
   8959 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8960 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8961 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8962 			break;
   8963 		default:
   8964 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8965 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8966 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8967 			break;
   8968 		}
   8969 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8970 	}
   8971 }
   8972 
   8973 /*
   8974  * wm_kmrn_readreg:
   8975  *
   8976  *	Read a kumeran register
   8977  */
   8978 static int
   8979 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8980 {
   8981 	int rv;
   8982 
   8983 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8984 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8985 			aprint_error_dev(sc->sc_dev,
   8986 			    "%s: failed to get semaphore\n", __func__);
   8987 			return 0;
   8988 		}
   8989 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8990 		if (wm_get_swfwhw_semaphore(sc)) {
   8991 			aprint_error_dev(sc->sc_dev,
   8992 			    "%s: failed to get semaphore\n", __func__);
   8993 			return 0;
   8994 		}
   8995 	}
   8996 
   8997 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8998 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8999 	    KUMCTRLSTA_REN);
   9000 	CSR_WRITE_FLUSH(sc);
   9001 	delay(2);
   9002 
   9003 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9004 
   9005 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9006 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9007 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9008 		wm_put_swfwhw_semaphore(sc);
   9009 
   9010 	return rv;
   9011 }
   9012 
   9013 /*
   9014  * wm_kmrn_writereg:
   9015  *
   9016  *	Write a kumeran register
   9017  */
   9018 static void
   9019 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9020 {
   9021 
   9022 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9023 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   9024 			aprint_error_dev(sc->sc_dev,
   9025 			    "%s: failed to get semaphore\n", __func__);
   9026 			return;
   9027 		}
   9028 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9029 		if (wm_get_swfwhw_semaphore(sc)) {
   9030 			aprint_error_dev(sc->sc_dev,
   9031 			    "%s: failed to get semaphore\n", __func__);
   9032 			return;
   9033 		}
   9034 	}
   9035 
   9036 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9037 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9038 	    (val & KUMCTRLSTA_MASK));
   9039 
   9040 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9041 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9042 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9043 		wm_put_swfwhw_semaphore(sc);
   9044 }
   9045 
   9046 /* SGMII related */
   9047 
   9048 /*
   9049  * wm_sgmii_uses_mdio
   9050  *
   9051  * Check whether the transaction is to the internal PHY or the external
   9052  * MDIO interface. Return true if it's MDIO.
   9053  */
   9054 static bool
   9055 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9056 {
   9057 	uint32_t reg;
   9058 	bool ismdio = false;
   9059 
   9060 	switch (sc->sc_type) {
   9061 	case WM_T_82575:
   9062 	case WM_T_82576:
   9063 		reg = CSR_READ(sc, WMREG_MDIC);
   9064 		ismdio = ((reg & MDIC_DEST) != 0);
   9065 		break;
   9066 	case WM_T_82580:
   9067 	case WM_T_I350:
   9068 	case WM_T_I354:
   9069 	case WM_T_I210:
   9070 	case WM_T_I211:
   9071 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9072 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9073 		break;
   9074 	default:
   9075 		break;
   9076 	}
   9077 
   9078 	return ismdio;
   9079 }
   9080 
   9081 /*
   9082  * wm_sgmii_readreg:	[mii interface function]
   9083  *
   9084  *	Read a PHY register on the SGMII
   9085  * This could be handled by the PHY layer if we didn't have to lock the
   9086  * ressource ...
   9087  */
   9088 static int
   9089 wm_sgmii_readreg(device_t self, int phy, int reg)
   9090 {
   9091 	struct wm_softc *sc = device_private(self);
   9092 	uint32_t i2ccmd;
   9093 	int i, rv;
   9094 
   9095 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9096 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9097 		    __func__);
   9098 		return 0;
   9099 	}
   9100 
   9101 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9102 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9103 	    | I2CCMD_OPCODE_READ;
   9104 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9105 
   9106 	/* Poll the ready bit */
   9107 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9108 		delay(50);
   9109 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9110 		if (i2ccmd & I2CCMD_READY)
   9111 			break;
   9112 	}
   9113 	if ((i2ccmd & I2CCMD_READY) == 0)
   9114 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9115 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9116 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9117 
   9118 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9119 
   9120 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   9121 	return rv;
   9122 }
   9123 
   9124 /*
   9125  * wm_sgmii_writereg:	[mii interface function]
   9126  *
   9127  *	Write a PHY register on the SGMII.
   9128  * This could be handled by the PHY layer if we didn't have to lock the
   9129  * ressource ...
   9130  */
   9131 static void
   9132 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9133 {
   9134 	struct wm_softc *sc = device_private(self);
   9135 	uint32_t i2ccmd;
   9136 	int i;
   9137 	int val_swapped;
   9138 
   9139 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9140 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9141 		    __func__);
   9142 		return;
   9143 	}
   9144 	/* Swap the data bytes for the I2C interface */
   9145 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9146 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9147 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9148 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9149 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9150 
   9151 	/* Poll the ready bit */
   9152 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9153 		delay(50);
   9154 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9155 		if (i2ccmd & I2CCMD_READY)
   9156 			break;
   9157 	}
   9158 	if ((i2ccmd & I2CCMD_READY) == 0)
   9159 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9160 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9161 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9162 
   9163 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9164 }
   9165 
   9166 /* TBI related */
   9167 
   9168 /*
   9169  * wm_tbi_mediainit:
   9170  *
   9171  *	Initialize media for use on 1000BASE-X devices.
   9172  */
   9173 static void
   9174 wm_tbi_mediainit(struct wm_softc *sc)
   9175 {
   9176 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9177 	const char *sep = "";
   9178 
   9179 	if (sc->sc_type < WM_T_82543)
   9180 		sc->sc_tipg = TIPG_WM_DFLT;
   9181 	else
   9182 		sc->sc_tipg = TIPG_LG_DFLT;
   9183 
   9184 	sc->sc_tbi_serdes_anegticks = 5;
   9185 
   9186 	/* Initialize our media structures */
   9187 	sc->sc_mii.mii_ifp = ifp;
   9188 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9189 
   9190 	if ((sc->sc_type >= WM_T_82575)
   9191 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9192 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9193 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9194 	else
   9195 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9196 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9197 
   9198 	/*
   9199 	 * SWD Pins:
   9200 	 *
   9201 	 *	0 = Link LED (output)
   9202 	 *	1 = Loss Of Signal (input)
   9203 	 */
   9204 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9205 
   9206 	/* XXX Perhaps this is only for TBI */
   9207 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9208 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9209 
   9210 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9211 		sc->sc_ctrl &= ~CTRL_LRST;
   9212 
   9213 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9214 
   9215 #define	ADD(ss, mm, dd)							\
   9216 do {									\
   9217 	aprint_normal("%s%s", sep, ss);					\
   9218 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9219 	sep = ", ";							\
   9220 } while (/*CONSTCOND*/0)
   9221 
   9222 	aprint_normal_dev(sc->sc_dev, "");
   9223 
   9224 	/* Only 82545 is LX */
   9225 	if (sc->sc_type == WM_T_82545) {
   9226 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9227 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9228 	} else {
   9229 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9230 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9231 	}
   9232 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9233 	aprint_normal("\n");
   9234 
   9235 #undef ADD
   9236 
   9237 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9238 }
   9239 
   9240 /*
   9241  * wm_tbi_mediachange:	[ifmedia interface function]
   9242  *
   9243  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9244  */
   9245 static int
   9246 wm_tbi_mediachange(struct ifnet *ifp)
   9247 {
   9248 	struct wm_softc *sc = ifp->if_softc;
   9249 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9250 	uint32_t status;
   9251 	int i;
   9252 
   9253 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9254 		/* XXX need some work for >= 82571 and < 82575 */
   9255 		if (sc->sc_type < WM_T_82575)
   9256 			return 0;
   9257 	}
   9258 
   9259 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9260 	    || (sc->sc_type >= WM_T_82575))
   9261 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9262 
   9263 	sc->sc_ctrl &= ~CTRL_LRST;
   9264 	sc->sc_txcw = TXCW_ANE;
   9265 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9266 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9267 	else if (ife->ifm_media & IFM_FDX)
   9268 		sc->sc_txcw |= TXCW_FD;
   9269 	else
   9270 		sc->sc_txcw |= TXCW_HD;
   9271 
   9272 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9273 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9274 
   9275 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9276 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9277 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9278 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9279 	CSR_WRITE_FLUSH(sc);
   9280 	delay(1000);
   9281 
   9282 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9283 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9284 
   9285 	/*
   9286 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9287 	 * optics detect a signal, 0 if they don't.
   9288 	 */
   9289 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9290 		/* Have signal; wait for the link to come up. */
   9291 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9292 			delay(10000);
   9293 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9294 				break;
   9295 		}
   9296 
   9297 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9298 			    device_xname(sc->sc_dev),i));
   9299 
   9300 		status = CSR_READ(sc, WMREG_STATUS);
   9301 		DPRINTF(WM_DEBUG_LINK,
   9302 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9303 			device_xname(sc->sc_dev),status, STATUS_LU));
   9304 		if (status & STATUS_LU) {
   9305 			/* Link is up. */
   9306 			DPRINTF(WM_DEBUG_LINK,
   9307 			    ("%s: LINK: set media -> link up %s\n",
   9308 			    device_xname(sc->sc_dev),
   9309 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9310 
   9311 			/*
   9312 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9313 			 * so we should update sc->sc_ctrl
   9314 			 */
   9315 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9316 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9317 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9318 			if (status & STATUS_FD)
   9319 				sc->sc_tctl |=
   9320 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9321 			else
   9322 				sc->sc_tctl |=
   9323 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9324 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9325 				sc->sc_fcrtl |= FCRTL_XONE;
   9326 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9327 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9328 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9329 				      sc->sc_fcrtl);
   9330 			sc->sc_tbi_linkup = 1;
   9331 		} else {
   9332 			if (i == WM_LINKUP_TIMEOUT)
   9333 				wm_check_for_link(sc);
   9334 			/* Link is down. */
   9335 			DPRINTF(WM_DEBUG_LINK,
   9336 			    ("%s: LINK: set media -> link down\n",
   9337 			    device_xname(sc->sc_dev)));
   9338 			sc->sc_tbi_linkup = 0;
   9339 		}
   9340 	} else {
   9341 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9342 		    device_xname(sc->sc_dev)));
   9343 		sc->sc_tbi_linkup = 0;
   9344 	}
   9345 
   9346 	wm_tbi_serdes_set_linkled(sc);
   9347 
   9348 	return 0;
   9349 }
   9350 
   9351 /*
   9352  * wm_tbi_mediastatus:	[ifmedia interface function]
   9353  *
   9354  *	Get the current interface media status on a 1000BASE-X device.
   9355  */
   9356 static void
   9357 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9358 {
   9359 	struct wm_softc *sc = ifp->if_softc;
   9360 	uint32_t ctrl, status;
   9361 
   9362 	ifmr->ifm_status = IFM_AVALID;
   9363 	ifmr->ifm_active = IFM_ETHER;
   9364 
   9365 	status = CSR_READ(sc, WMREG_STATUS);
   9366 	if ((status & STATUS_LU) == 0) {
   9367 		ifmr->ifm_active |= IFM_NONE;
   9368 		return;
   9369 	}
   9370 
   9371 	ifmr->ifm_status |= IFM_ACTIVE;
   9372 	/* Only 82545 is LX */
   9373 	if (sc->sc_type == WM_T_82545)
   9374 		ifmr->ifm_active |= IFM_1000_LX;
   9375 	else
   9376 		ifmr->ifm_active |= IFM_1000_SX;
   9377 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9378 		ifmr->ifm_active |= IFM_FDX;
   9379 	else
   9380 		ifmr->ifm_active |= IFM_HDX;
   9381 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9382 	if (ctrl & CTRL_RFCE)
   9383 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9384 	if (ctrl & CTRL_TFCE)
   9385 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9386 }
   9387 
   9388 /* XXX TBI only */
   9389 static int
   9390 wm_check_for_link(struct wm_softc *sc)
   9391 {
   9392 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9393 	uint32_t rxcw;
   9394 	uint32_t ctrl;
   9395 	uint32_t status;
   9396 	uint32_t sig;
   9397 
   9398 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9399 		/* XXX need some work for >= 82571 */
   9400 		if (sc->sc_type >= WM_T_82571) {
   9401 			sc->sc_tbi_linkup = 1;
   9402 			return 0;
   9403 		}
   9404 	}
   9405 
   9406 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9407 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9408 	status = CSR_READ(sc, WMREG_STATUS);
   9409 
   9410 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9411 
   9412 	DPRINTF(WM_DEBUG_LINK,
   9413 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9414 		device_xname(sc->sc_dev), __func__,
   9415 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9416 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9417 
   9418 	/*
   9419 	 * SWDPIN   LU RXCW
   9420 	 *      0    0    0
   9421 	 *      0    0    1	(should not happen)
   9422 	 *      0    1    0	(should not happen)
   9423 	 *      0    1    1	(should not happen)
   9424 	 *      1    0    0	Disable autonego and force linkup
   9425 	 *      1    0    1	got /C/ but not linkup yet
   9426 	 *      1    1    0	(linkup)
   9427 	 *      1    1    1	If IFM_AUTO, back to autonego
   9428 	 *
   9429 	 */
   9430 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9431 	    && ((status & STATUS_LU) == 0)
   9432 	    && ((rxcw & RXCW_C) == 0)) {
   9433 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9434 			__func__));
   9435 		sc->sc_tbi_linkup = 0;
   9436 		/* Disable auto-negotiation in the TXCW register */
   9437 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9438 
   9439 		/*
   9440 		 * Force link-up and also force full-duplex.
   9441 		 *
   9442 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9443 		 * so we should update sc->sc_ctrl
   9444 		 */
   9445 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9446 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9447 	} else if (((status & STATUS_LU) != 0)
   9448 	    && ((rxcw & RXCW_C) != 0)
   9449 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9450 		sc->sc_tbi_linkup = 1;
   9451 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9452 			__func__));
   9453 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9454 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9455 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9456 	    && ((rxcw & RXCW_C) != 0)) {
   9457 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9458 	} else {
   9459 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9460 			status));
   9461 	}
   9462 
   9463 	return 0;
   9464 }
   9465 
   9466 /*
   9467  * wm_tbi_tick:
   9468  *
   9469  *	Check the link on TBI devices.
   9470  *	This function acts as mii_tick().
   9471  */
   9472 static void
   9473 wm_tbi_tick(struct wm_softc *sc)
   9474 {
   9475 	struct mii_data *mii = &sc->sc_mii;
   9476 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9477 	uint32_t status;
   9478 
   9479 	KASSERT(WM_CORE_LOCKED(sc));
   9480 
   9481 	status = CSR_READ(sc, WMREG_STATUS);
   9482 
   9483 	/* XXX is this needed? */
   9484 	(void)CSR_READ(sc, WMREG_RXCW);
   9485 	(void)CSR_READ(sc, WMREG_CTRL);
   9486 
   9487 	/* set link status */
   9488 	if ((status & STATUS_LU) == 0) {
   9489 		DPRINTF(WM_DEBUG_LINK,
   9490 		    ("%s: LINK: checklink -> down\n",
   9491 			device_xname(sc->sc_dev)));
   9492 		sc->sc_tbi_linkup = 0;
   9493 	} else if (sc->sc_tbi_linkup == 0) {
   9494 		DPRINTF(WM_DEBUG_LINK,
   9495 		    ("%s: LINK: checklink -> up %s\n",
   9496 			device_xname(sc->sc_dev),
   9497 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9498 		sc->sc_tbi_linkup = 1;
   9499 		sc->sc_tbi_serdes_ticks = 0;
   9500 	}
   9501 
   9502 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9503 		goto setled;
   9504 
   9505 	if ((status & STATUS_LU) == 0) {
   9506 		sc->sc_tbi_linkup = 0;
   9507 		/* If the timer expired, retry autonegotiation */
   9508 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9509 		    && (++sc->sc_tbi_serdes_ticks
   9510 			>= sc->sc_tbi_serdes_anegticks)) {
   9511 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9512 			sc->sc_tbi_serdes_ticks = 0;
   9513 			/*
   9514 			 * Reset the link, and let autonegotiation do
   9515 			 * its thing
   9516 			 */
   9517 			sc->sc_ctrl |= CTRL_LRST;
   9518 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9519 			CSR_WRITE_FLUSH(sc);
   9520 			delay(1000);
   9521 			sc->sc_ctrl &= ~CTRL_LRST;
   9522 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9523 			CSR_WRITE_FLUSH(sc);
   9524 			delay(1000);
   9525 			CSR_WRITE(sc, WMREG_TXCW,
   9526 			    sc->sc_txcw & ~TXCW_ANE);
   9527 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9528 		}
   9529 	}
   9530 
   9531 setled:
   9532 	wm_tbi_serdes_set_linkled(sc);
   9533 }
   9534 
   9535 /* SERDES related */
   9536 static void
   9537 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9538 {
   9539 	uint32_t reg;
   9540 
   9541 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9542 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9543 		return;
   9544 
   9545 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9546 	reg |= PCS_CFG_PCS_EN;
   9547 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9548 
   9549 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9550 	reg &= ~CTRL_EXT_SWDPIN(3);
   9551 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9552 	CSR_WRITE_FLUSH(sc);
   9553 }
   9554 
   9555 static int
   9556 wm_serdes_mediachange(struct ifnet *ifp)
   9557 {
   9558 	struct wm_softc *sc = ifp->if_softc;
   9559 	bool pcs_autoneg = true; /* XXX */
   9560 	uint32_t ctrl_ext, pcs_lctl, reg;
   9561 
   9562 	/* XXX Currently, this function is not called on 8257[12] */
   9563 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9564 	    || (sc->sc_type >= WM_T_82575))
   9565 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9566 
   9567 	wm_serdes_power_up_link_82575(sc);
   9568 
   9569 	sc->sc_ctrl |= CTRL_SLU;
   9570 
   9571 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9572 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9573 
   9574 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9575 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9576 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9577 	case CTRL_EXT_LINK_MODE_SGMII:
   9578 		pcs_autoneg = true;
   9579 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9580 		break;
   9581 	case CTRL_EXT_LINK_MODE_1000KX:
   9582 		pcs_autoneg = false;
   9583 		/* FALLTHROUGH */
   9584 	default:
   9585 		if ((sc->sc_type == WM_T_82575)
   9586 		    || (sc->sc_type == WM_T_82576)) {
   9587 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9588 				pcs_autoneg = false;
   9589 		}
   9590 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9591 		    | CTRL_FRCFDX;
   9592 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9593 	}
   9594 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9595 
   9596 	if (pcs_autoneg) {
   9597 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9598 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9599 
   9600 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9601 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9602 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9603 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9604 	} else
   9605 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9606 
   9607 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9608 
   9609 
   9610 	return 0;
   9611 }
   9612 
   9613 static void
   9614 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9615 {
   9616 	struct wm_softc *sc = ifp->if_softc;
   9617 	struct mii_data *mii = &sc->sc_mii;
   9618 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9619 	uint32_t pcs_adv, pcs_lpab, reg;
   9620 
   9621 	ifmr->ifm_status = IFM_AVALID;
   9622 	ifmr->ifm_active = IFM_ETHER;
   9623 
   9624 	/* Check PCS */
   9625 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9626 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9627 		ifmr->ifm_active |= IFM_NONE;
   9628 		sc->sc_tbi_linkup = 0;
   9629 		goto setled;
   9630 	}
   9631 
   9632 	sc->sc_tbi_linkup = 1;
   9633 	ifmr->ifm_status |= IFM_ACTIVE;
   9634 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9635 	if ((reg & PCS_LSTS_FDX) != 0)
   9636 		ifmr->ifm_active |= IFM_FDX;
   9637 	else
   9638 		ifmr->ifm_active |= IFM_HDX;
   9639 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9640 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9641 		/* Check flow */
   9642 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9643 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9644 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9645 			goto setled;
   9646 		}
   9647 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9648 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9649 		DPRINTF(WM_DEBUG_LINK,
   9650 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9651 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9652 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9653 			mii->mii_media_active |= IFM_FLOW
   9654 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9655 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9656 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9657 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9658 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9659 			mii->mii_media_active |= IFM_FLOW
   9660 			    | IFM_ETH_TXPAUSE;
   9661 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9662 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9663 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9664 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9665 			mii->mii_media_active |= IFM_FLOW
   9666 			    | IFM_ETH_RXPAUSE;
   9667 		} else {
   9668 		}
   9669 	}
   9670 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9671 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9672 setled:
   9673 	wm_tbi_serdes_set_linkled(sc);
   9674 }
   9675 
   9676 /*
   9677  * wm_serdes_tick:
   9678  *
   9679  *	Check the link on serdes devices.
   9680  */
   9681 static void
   9682 wm_serdes_tick(struct wm_softc *sc)
   9683 {
   9684 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9685 	struct mii_data *mii = &sc->sc_mii;
   9686 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9687 	uint32_t reg;
   9688 
   9689 	KASSERT(WM_CORE_LOCKED(sc));
   9690 
   9691 	mii->mii_media_status = IFM_AVALID;
   9692 	mii->mii_media_active = IFM_ETHER;
   9693 
   9694 	/* Check PCS */
   9695 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9696 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9697 		mii->mii_media_status |= IFM_ACTIVE;
   9698 		sc->sc_tbi_linkup = 1;
   9699 		sc->sc_tbi_serdes_ticks = 0;
   9700 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9701 		if ((reg & PCS_LSTS_FDX) != 0)
   9702 			mii->mii_media_active |= IFM_FDX;
   9703 		else
   9704 			mii->mii_media_active |= IFM_HDX;
   9705 	} else {
   9706 		mii->mii_media_status |= IFM_NONE;
   9707 		sc->sc_tbi_linkup = 0;
   9708 		    /* If the timer expired, retry autonegotiation */
   9709 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9710 		    && (++sc->sc_tbi_serdes_ticks
   9711 			>= sc->sc_tbi_serdes_anegticks)) {
   9712 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9713 			sc->sc_tbi_serdes_ticks = 0;
   9714 			/* XXX */
   9715 			wm_serdes_mediachange(ifp);
   9716 		}
   9717 	}
   9718 
   9719 	wm_tbi_serdes_set_linkled(sc);
   9720 }
   9721 
   9722 /* SFP related */
   9723 
   9724 static int
   9725 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9726 {
   9727 	uint32_t i2ccmd;
   9728 	int i;
   9729 
   9730 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9731 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9732 
   9733 	/* Poll the ready bit */
   9734 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9735 		delay(50);
   9736 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9737 		if (i2ccmd & I2CCMD_READY)
   9738 			break;
   9739 	}
   9740 	if ((i2ccmd & I2CCMD_READY) == 0)
   9741 		return -1;
   9742 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9743 		return -1;
   9744 
   9745 	*data = i2ccmd & 0x00ff;
   9746 
   9747 	return 0;
   9748 }
   9749 
   9750 static uint32_t
   9751 wm_sfp_get_media_type(struct wm_softc *sc)
   9752 {
   9753 	uint32_t ctrl_ext;
   9754 	uint8_t val = 0;
   9755 	int timeout = 3;
   9756 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9757 	int rv = -1;
   9758 
   9759 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9760 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9761 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9762 	CSR_WRITE_FLUSH(sc);
   9763 
   9764 	/* Read SFP module data */
   9765 	while (timeout) {
   9766 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9767 		if (rv == 0)
   9768 			break;
   9769 		delay(100*1000); /* XXX too big */
   9770 		timeout--;
   9771 	}
   9772 	if (rv != 0)
   9773 		goto out;
   9774 	switch (val) {
   9775 	case SFF_SFP_ID_SFF:
   9776 		aprint_normal_dev(sc->sc_dev,
   9777 		    "Module/Connector soldered to board\n");
   9778 		break;
   9779 	case SFF_SFP_ID_SFP:
   9780 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9781 		break;
   9782 	case SFF_SFP_ID_UNKNOWN:
   9783 		goto out;
   9784 	default:
   9785 		break;
   9786 	}
   9787 
   9788 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9789 	if (rv != 0) {
   9790 		goto out;
   9791 	}
   9792 
   9793 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9794 		mediatype = WM_MEDIATYPE_SERDES;
   9795 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9796 		sc->sc_flags |= WM_F_SGMII;
   9797 		mediatype = WM_MEDIATYPE_COPPER;
   9798 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9799 		sc->sc_flags |= WM_F_SGMII;
   9800 		mediatype = WM_MEDIATYPE_SERDES;
   9801 	}
   9802 
   9803 out:
   9804 	/* Restore I2C interface setting */
   9805 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9806 
   9807 	return mediatype;
   9808 }
   9809 /*
   9810  * NVM related.
   9811  * Microwire, SPI (w/wo EERD) and Flash.
   9812  */
   9813 
   9814 /* Both spi and uwire */
   9815 
   9816 /*
   9817  * wm_eeprom_sendbits:
   9818  *
   9819  *	Send a series of bits to the EEPROM.
   9820  */
   9821 static void
   9822 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9823 {
   9824 	uint32_t reg;
   9825 	int x;
   9826 
   9827 	reg = CSR_READ(sc, WMREG_EECD);
   9828 
   9829 	for (x = nbits; x > 0; x--) {
   9830 		if (bits & (1U << (x - 1)))
   9831 			reg |= EECD_DI;
   9832 		else
   9833 			reg &= ~EECD_DI;
   9834 		CSR_WRITE(sc, WMREG_EECD, reg);
   9835 		CSR_WRITE_FLUSH(sc);
   9836 		delay(2);
   9837 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9838 		CSR_WRITE_FLUSH(sc);
   9839 		delay(2);
   9840 		CSR_WRITE(sc, WMREG_EECD, reg);
   9841 		CSR_WRITE_FLUSH(sc);
   9842 		delay(2);
   9843 	}
   9844 }
   9845 
   9846 /*
   9847  * wm_eeprom_recvbits:
   9848  *
   9849  *	Receive a series of bits from the EEPROM.
   9850  */
   9851 static void
   9852 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9853 {
   9854 	uint32_t reg, val;
   9855 	int x;
   9856 
   9857 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9858 
   9859 	val = 0;
   9860 	for (x = nbits; x > 0; x--) {
   9861 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9862 		CSR_WRITE_FLUSH(sc);
   9863 		delay(2);
   9864 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9865 			val |= (1U << (x - 1));
   9866 		CSR_WRITE(sc, WMREG_EECD, reg);
   9867 		CSR_WRITE_FLUSH(sc);
   9868 		delay(2);
   9869 	}
   9870 	*valp = val;
   9871 }
   9872 
   9873 /* Microwire */
   9874 
   9875 /*
   9876  * wm_nvm_read_uwire:
   9877  *
   9878  *	Read a word from the EEPROM using the MicroWire protocol.
   9879  */
   9880 static int
   9881 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9882 {
   9883 	uint32_t reg, val;
   9884 	int i;
   9885 
   9886 	for (i = 0; i < wordcnt; i++) {
   9887 		/* Clear SK and DI. */
   9888 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9889 		CSR_WRITE(sc, WMREG_EECD, reg);
   9890 
   9891 		/*
   9892 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9893 		 * and Xen.
   9894 		 *
   9895 		 * We use this workaround only for 82540 because qemu's
   9896 		 * e1000 act as 82540.
   9897 		 */
   9898 		if (sc->sc_type == WM_T_82540) {
   9899 			reg |= EECD_SK;
   9900 			CSR_WRITE(sc, WMREG_EECD, reg);
   9901 			reg &= ~EECD_SK;
   9902 			CSR_WRITE(sc, WMREG_EECD, reg);
   9903 			CSR_WRITE_FLUSH(sc);
   9904 			delay(2);
   9905 		}
   9906 		/* XXX: end of workaround */
   9907 
   9908 		/* Set CHIP SELECT. */
   9909 		reg |= EECD_CS;
   9910 		CSR_WRITE(sc, WMREG_EECD, reg);
   9911 		CSR_WRITE_FLUSH(sc);
   9912 		delay(2);
   9913 
   9914 		/* Shift in the READ command. */
   9915 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9916 
   9917 		/* Shift in address. */
   9918 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9919 
   9920 		/* Shift out the data. */
   9921 		wm_eeprom_recvbits(sc, &val, 16);
   9922 		data[i] = val & 0xffff;
   9923 
   9924 		/* Clear CHIP SELECT. */
   9925 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9926 		CSR_WRITE(sc, WMREG_EECD, reg);
   9927 		CSR_WRITE_FLUSH(sc);
   9928 		delay(2);
   9929 	}
   9930 
   9931 	return 0;
   9932 }
   9933 
   9934 /* SPI */
   9935 
   9936 /*
   9937  * Set SPI and FLASH related information from the EECD register.
   9938  * For 82541 and 82547, the word size is taken from EEPROM.
   9939  */
   9940 static int
   9941 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9942 {
   9943 	int size;
   9944 	uint32_t reg;
   9945 	uint16_t data;
   9946 
   9947 	reg = CSR_READ(sc, WMREG_EECD);
   9948 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9949 
   9950 	/* Read the size of NVM from EECD by default */
   9951 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9952 	switch (sc->sc_type) {
   9953 	case WM_T_82541:
   9954 	case WM_T_82541_2:
   9955 	case WM_T_82547:
   9956 	case WM_T_82547_2:
   9957 		/* Set dummy value to access EEPROM */
   9958 		sc->sc_nvm_wordsize = 64;
   9959 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9960 		reg = data;
   9961 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9962 		if (size == 0)
   9963 			size = 6; /* 64 word size */
   9964 		else
   9965 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9966 		break;
   9967 	case WM_T_80003:
   9968 	case WM_T_82571:
   9969 	case WM_T_82572:
   9970 	case WM_T_82573: /* SPI case */
   9971 	case WM_T_82574: /* SPI case */
   9972 	case WM_T_82583: /* SPI case */
   9973 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9974 		if (size > 14)
   9975 			size = 14;
   9976 		break;
   9977 	case WM_T_82575:
   9978 	case WM_T_82576:
   9979 	case WM_T_82580:
   9980 	case WM_T_I350:
   9981 	case WM_T_I354:
   9982 	case WM_T_I210:
   9983 	case WM_T_I211:
   9984 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9985 		if (size > 15)
   9986 			size = 15;
   9987 		break;
   9988 	default:
   9989 		aprint_error_dev(sc->sc_dev,
   9990 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   9991 		return -1;
   9992 		break;
   9993 	}
   9994 
   9995 	sc->sc_nvm_wordsize = 1 << size;
   9996 
   9997 	return 0;
   9998 }
   9999 
   10000 /*
   10001  * wm_nvm_ready_spi:
   10002  *
   10003  *	Wait for a SPI EEPROM to be ready for commands.
   10004  */
   10005 static int
   10006 wm_nvm_ready_spi(struct wm_softc *sc)
   10007 {
   10008 	uint32_t val;
   10009 	int usec;
   10010 
   10011 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10012 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10013 		wm_eeprom_recvbits(sc, &val, 8);
   10014 		if ((val & SPI_SR_RDY) == 0)
   10015 			break;
   10016 	}
   10017 	if (usec >= SPI_MAX_RETRIES) {
   10018 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10019 		return 1;
   10020 	}
   10021 	return 0;
   10022 }
   10023 
   10024 /*
   10025  * wm_nvm_read_spi:
   10026  *
   10027  *	Read a work from the EEPROM using the SPI protocol.
   10028  */
   10029 static int
   10030 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10031 {
   10032 	uint32_t reg, val;
   10033 	int i;
   10034 	uint8_t opc;
   10035 
   10036 	/* Clear SK and CS. */
   10037 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10038 	CSR_WRITE(sc, WMREG_EECD, reg);
   10039 	CSR_WRITE_FLUSH(sc);
   10040 	delay(2);
   10041 
   10042 	if (wm_nvm_ready_spi(sc))
   10043 		return 1;
   10044 
   10045 	/* Toggle CS to flush commands. */
   10046 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10047 	CSR_WRITE_FLUSH(sc);
   10048 	delay(2);
   10049 	CSR_WRITE(sc, WMREG_EECD, reg);
   10050 	CSR_WRITE_FLUSH(sc);
   10051 	delay(2);
   10052 
   10053 	opc = SPI_OPC_READ;
   10054 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10055 		opc |= SPI_OPC_A8;
   10056 
   10057 	wm_eeprom_sendbits(sc, opc, 8);
   10058 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10059 
   10060 	for (i = 0; i < wordcnt; i++) {
   10061 		wm_eeprom_recvbits(sc, &val, 16);
   10062 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10063 	}
   10064 
   10065 	/* Raise CS and clear SK. */
   10066 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10067 	CSR_WRITE(sc, WMREG_EECD, reg);
   10068 	CSR_WRITE_FLUSH(sc);
   10069 	delay(2);
   10070 
   10071 	return 0;
   10072 }
   10073 
   10074 /* Using with EERD */
   10075 
   10076 static int
   10077 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10078 {
   10079 	uint32_t attempts = 100000;
   10080 	uint32_t i, reg = 0;
   10081 	int32_t done = -1;
   10082 
   10083 	for (i = 0; i < attempts; i++) {
   10084 		reg = CSR_READ(sc, rw);
   10085 
   10086 		if (reg & EERD_DONE) {
   10087 			done = 0;
   10088 			break;
   10089 		}
   10090 		delay(5);
   10091 	}
   10092 
   10093 	return done;
   10094 }
   10095 
   10096 static int
   10097 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10098     uint16_t *data)
   10099 {
   10100 	int i, eerd = 0;
   10101 	int error = 0;
   10102 
   10103 	for (i = 0; i < wordcnt; i++) {
   10104 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10105 
   10106 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10107 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10108 		if (error != 0)
   10109 			break;
   10110 
   10111 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10112 	}
   10113 
   10114 	return error;
   10115 }
   10116 
   10117 /* Flash */
   10118 
   10119 static int
   10120 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10121 {
   10122 	uint32_t eecd;
   10123 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10124 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10125 	uint8_t sig_byte = 0;
   10126 
   10127 	switch (sc->sc_type) {
   10128 	case WM_T_PCH_SPT:
   10129 		/*
   10130 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10131 		 * sector valid bits from the NVM.
   10132 		 */
   10133 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10134 		if ((*bank == 0) || (*bank == 1)) {
   10135 			aprint_error_dev(sc->sc_dev,
   10136 					 "%s: no valid NVM bank present\n",
   10137 				__func__);
   10138 			return -1;
   10139 		} else {
   10140 			*bank = *bank - 2;
   10141 			return 0;
   10142 		}
   10143 	case WM_T_ICH8:
   10144 	case WM_T_ICH9:
   10145 		eecd = CSR_READ(sc, WMREG_EECD);
   10146 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10147 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10148 			return 0;
   10149 		}
   10150 		/* FALLTHROUGH */
   10151 	default:
   10152 		/* Default to 0 */
   10153 		*bank = 0;
   10154 
   10155 		/* Check bank 0 */
   10156 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10157 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10158 			*bank = 0;
   10159 			return 0;
   10160 		}
   10161 
   10162 		/* Check bank 1 */
   10163 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10164 		    &sig_byte);
   10165 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10166 			*bank = 1;
   10167 			return 0;
   10168 		}
   10169 	}
   10170 
   10171 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10172 		device_xname(sc->sc_dev)));
   10173 	return -1;
   10174 }
   10175 
   10176 /******************************************************************************
   10177  * This function does initial flash setup so that a new read/write/erase cycle
   10178  * can be started.
   10179  *
   10180  * sc - The pointer to the hw structure
   10181  ****************************************************************************/
   10182 static int32_t
   10183 wm_ich8_cycle_init(struct wm_softc *sc)
   10184 {
   10185 	uint16_t hsfsts;
   10186 	int32_t error = 1;
   10187 	int32_t i     = 0;
   10188 
   10189 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10190 
   10191 	/* May be check the Flash Des Valid bit in Hw status */
   10192 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10193 		return error;
   10194 	}
   10195 
   10196 	/* Clear FCERR in Hw status by writing 1 */
   10197 	/* Clear DAEL in Hw status by writing a 1 */
   10198 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10199 
   10200 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10201 
   10202 	/*
   10203 	 * Either we should have a hardware SPI cycle in progress bit to check
   10204 	 * against, in order to start a new cycle or FDONE bit should be
   10205 	 * changed in the hardware so that it is 1 after harware reset, which
   10206 	 * can then be used as an indication whether a cycle is in progress or
   10207 	 * has been completed .. we should also have some software semaphore
   10208 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10209 	 * threads access to those bits can be sequentiallized or a way so that
   10210 	 * 2 threads dont start the cycle at the same time
   10211 	 */
   10212 
   10213 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10214 		/*
   10215 		 * There is no cycle running at present, so we can start a
   10216 		 * cycle
   10217 		 */
   10218 
   10219 		/* Begin by setting Flash Cycle Done. */
   10220 		hsfsts |= HSFSTS_DONE;
   10221 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10222 		error = 0;
   10223 	} else {
   10224 		/*
   10225 		 * otherwise poll for sometime so the current cycle has a
   10226 		 * chance to end before giving up.
   10227 		 */
   10228 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10229 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10230 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10231 				error = 0;
   10232 				break;
   10233 			}
   10234 			delay(1);
   10235 		}
   10236 		if (error == 0) {
   10237 			/*
   10238 			 * Successful in waiting for previous cycle to timeout,
   10239 			 * now set the Flash Cycle Done.
   10240 			 */
   10241 			hsfsts |= HSFSTS_DONE;
   10242 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10243 		}
   10244 	}
   10245 	return error;
   10246 }
   10247 
   10248 /******************************************************************************
   10249  * This function starts a flash cycle and waits for its completion
   10250  *
   10251  * sc - The pointer to the hw structure
   10252  ****************************************************************************/
   10253 static int32_t
   10254 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10255 {
   10256 	uint16_t hsflctl;
   10257 	uint16_t hsfsts;
   10258 	int32_t error = 1;
   10259 	uint32_t i = 0;
   10260 
   10261 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10262 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10263 	hsflctl |= HSFCTL_GO;
   10264 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10265 
   10266 	/* Wait till FDONE bit is set to 1 */
   10267 	do {
   10268 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10269 		if (hsfsts & HSFSTS_DONE)
   10270 			break;
   10271 		delay(1);
   10272 		i++;
   10273 	} while (i < timeout);
   10274 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10275 		error = 0;
   10276 
   10277 	return error;
   10278 }
   10279 
   10280 /******************************************************************************
   10281  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10282  *
   10283  * sc - The pointer to the hw structure
   10284  * index - The index of the byte or word to read.
   10285  * size - Size of data to read, 1=byte 2=word, 4=dword
   10286  * data - Pointer to the word to store the value read.
   10287  *****************************************************************************/
   10288 static int32_t
   10289 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10290     uint32_t size, uint32_t *data)
   10291 {
   10292 	uint16_t hsfsts;
   10293 	uint16_t hsflctl;
   10294 	uint32_t flash_linear_address;
   10295 	uint32_t flash_data = 0;
   10296 	int32_t error = 1;
   10297 	int32_t count = 0;
   10298 
   10299 	if (size < 1  || size > 4 || data == 0x0 ||
   10300 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10301 		return error;
   10302 
   10303 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10304 	    sc->sc_ich8_flash_base;
   10305 
   10306 	do {
   10307 		delay(1);
   10308 		/* Steps */
   10309 		error = wm_ich8_cycle_init(sc);
   10310 		if (error)
   10311 			break;
   10312 
   10313 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10314 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10315 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10316 		    & HSFCTL_BCOUNT_MASK;
   10317 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10318 		if (sc->sc_type == WM_T_PCH_SPT) {
   10319 			/*
   10320 			 * In SPT, This register is in Lan memory space, not
   10321 			 * flash. Therefore, only 32 bit access is supported.
   10322 			 */
   10323 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10324 			    (uint32_t)hsflctl);
   10325 		} else
   10326 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10327 
   10328 		/*
   10329 		 * Write the last 24 bits of index into Flash Linear address
   10330 		 * field in Flash Address
   10331 		 */
   10332 		/* TODO: TBD maybe check the index against the size of flash */
   10333 
   10334 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10335 
   10336 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10337 
   10338 		/*
   10339 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10340 		 * the whole sequence a few more times, else read in (shift in)
   10341 		 * the Flash Data0, the order is least significant byte first
   10342 		 * msb to lsb
   10343 		 */
   10344 		if (error == 0) {
   10345 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10346 			if (size == 1)
   10347 				*data = (uint8_t)(flash_data & 0x000000FF);
   10348 			else if (size == 2)
   10349 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10350 			else if (size == 4)
   10351 				*data = (uint32_t)flash_data;
   10352 			break;
   10353 		} else {
   10354 			/*
   10355 			 * If we've gotten here, then things are probably
   10356 			 * completely hosed, but if the error condition is
   10357 			 * detected, it won't hurt to give it another try...
   10358 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10359 			 */
   10360 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10361 			if (hsfsts & HSFSTS_ERR) {
   10362 				/* Repeat for some time before giving up. */
   10363 				continue;
   10364 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10365 				break;
   10366 		}
   10367 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10368 
   10369 	return error;
   10370 }
   10371 
   10372 /******************************************************************************
   10373  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10374  *
   10375  * sc - pointer to wm_hw structure
   10376  * index - The index of the byte to read.
   10377  * data - Pointer to a byte to store the value read.
   10378  *****************************************************************************/
   10379 static int32_t
   10380 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10381 {
   10382 	int32_t status;
   10383 	uint32_t word = 0;
   10384 
   10385 	status = wm_read_ich8_data(sc, index, 1, &word);
   10386 	if (status == 0)
   10387 		*data = (uint8_t)word;
   10388 	else
   10389 		*data = 0;
   10390 
   10391 	return status;
   10392 }
   10393 
   10394 /******************************************************************************
   10395  * Reads a word from the NVM using the ICH8 flash access registers.
   10396  *
   10397  * sc - pointer to wm_hw structure
   10398  * index - The starting byte index of the word to read.
   10399  * data - Pointer to a word to store the value read.
   10400  *****************************************************************************/
   10401 static int32_t
   10402 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10403 {
   10404 	int32_t status;
   10405 	uint32_t word = 0;
   10406 
   10407 	status = wm_read_ich8_data(sc, index, 2, &word);
   10408 	if (status == 0)
   10409 		*data = (uint16_t)word;
   10410 	else
   10411 		*data = 0;
   10412 
   10413 	return status;
   10414 }
   10415 
   10416 /******************************************************************************
   10417  * Reads a dword from the NVM using the ICH8 flash access registers.
   10418  *
   10419  * sc - pointer to wm_hw structure
   10420  * index - The starting byte index of the word to read.
   10421  * data - Pointer to a word to store the value read.
   10422  *****************************************************************************/
   10423 static int32_t
   10424 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10425 {
   10426 	int32_t status;
   10427 
   10428 	status = wm_read_ich8_data(sc, index, 4, data);
   10429 	return status;
   10430 }
   10431 
   10432 /******************************************************************************
   10433  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10434  * register.
   10435  *
   10436  * sc - Struct containing variables accessed by shared code
   10437  * offset - offset of word in the EEPROM to read
   10438  * data - word read from the EEPROM
   10439  * words - number of words to read
   10440  *****************************************************************************/
   10441 static int
   10442 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10443 {
   10444 	int32_t  error = 0;
   10445 	uint32_t flash_bank = 0;
   10446 	uint32_t act_offset = 0;
   10447 	uint32_t bank_offset = 0;
   10448 	uint16_t word = 0;
   10449 	uint16_t i = 0;
   10450 
   10451 	/*
   10452 	 * We need to know which is the valid flash bank.  In the event
   10453 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10454 	 * managing flash_bank.  So it cannot be trusted and needs
   10455 	 * to be updated with each read.
   10456 	 */
   10457 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10458 	if (error) {
   10459 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10460 			device_xname(sc->sc_dev)));
   10461 		flash_bank = 0;
   10462 	}
   10463 
   10464 	/*
   10465 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10466 	 * size
   10467 	 */
   10468 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10469 
   10470 	error = wm_get_swfwhw_semaphore(sc);
   10471 	if (error) {
   10472 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10473 		    __func__);
   10474 		return error;
   10475 	}
   10476 
   10477 	for (i = 0; i < words; i++) {
   10478 		/* The NVM part needs a byte offset, hence * 2 */
   10479 		act_offset = bank_offset + ((offset + i) * 2);
   10480 		error = wm_read_ich8_word(sc, act_offset, &word);
   10481 		if (error) {
   10482 			aprint_error_dev(sc->sc_dev,
   10483 			    "%s: failed to read NVM\n", __func__);
   10484 			break;
   10485 		}
   10486 		data[i] = word;
   10487 	}
   10488 
   10489 	wm_put_swfwhw_semaphore(sc);
   10490 	return error;
   10491 }
   10492 
   10493 /******************************************************************************
   10494  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10495  * register.
   10496  *
   10497  * sc - Struct containing variables accessed by shared code
   10498  * offset - offset of word in the EEPROM to read
   10499  * data - word read from the EEPROM
   10500  * words - number of words to read
   10501  *****************************************************************************/
   10502 static int
   10503 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10504 {
   10505 	int32_t  error = 0;
   10506 	uint32_t flash_bank = 0;
   10507 	uint32_t act_offset = 0;
   10508 	uint32_t bank_offset = 0;
   10509 	uint32_t dword = 0;
   10510 	uint16_t i = 0;
   10511 
   10512 	/*
   10513 	 * We need to know which is the valid flash bank.  In the event
   10514 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10515 	 * managing flash_bank.  So it cannot be trusted and needs
   10516 	 * to be updated with each read.
   10517 	 */
   10518 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10519 	if (error) {
   10520 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10521 			device_xname(sc->sc_dev)));
   10522 		flash_bank = 0;
   10523 	}
   10524 
   10525 	/*
   10526 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10527 	 * size
   10528 	 */
   10529 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10530 
   10531 	error = wm_get_swfwhw_semaphore(sc);
   10532 	if (error) {
   10533 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10534 		    __func__);
   10535 		return error;
   10536 	}
   10537 
   10538 	for (i = 0; i < words; i++) {
   10539 		/* The NVM part needs a byte offset, hence * 2 */
   10540 		act_offset = bank_offset + ((offset + i) * 2);
   10541 		/* but we must read dword aligned, so mask ... */
   10542 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10543 		if (error) {
   10544 			aprint_error_dev(sc->sc_dev,
   10545 			    "%s: failed to read NVM\n", __func__);
   10546 			break;
   10547 		}
   10548 		/* ... and pick out low or high word */
   10549 		if ((act_offset & 0x2) == 0)
   10550 			data[i] = (uint16_t)(dword & 0xFFFF);
   10551 		else
   10552 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10553 	}
   10554 
   10555 	wm_put_swfwhw_semaphore(sc);
   10556 	return error;
   10557 }
   10558 
   10559 /* iNVM */
   10560 
   10561 static int
   10562 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10563 {
   10564 	int32_t  rv = 0;
   10565 	uint32_t invm_dword;
   10566 	uint16_t i;
   10567 	uint8_t record_type, word_address;
   10568 
   10569 	for (i = 0; i < INVM_SIZE; i++) {
   10570 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10571 		/* Get record type */
   10572 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10573 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10574 			break;
   10575 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10576 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10577 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10578 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10579 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10580 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10581 			if (word_address == address) {
   10582 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10583 				rv = 0;
   10584 				break;
   10585 			}
   10586 		}
   10587 	}
   10588 
   10589 	return rv;
   10590 }
   10591 
   10592 static int
   10593 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10594 {
   10595 	int rv = 0;
   10596 	int i;
   10597 
   10598 	for (i = 0; i < words; i++) {
   10599 		switch (offset + i) {
   10600 		case NVM_OFF_MACADDR:
   10601 		case NVM_OFF_MACADDR1:
   10602 		case NVM_OFF_MACADDR2:
   10603 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10604 			if (rv != 0) {
   10605 				data[i] = 0xffff;
   10606 				rv = -1;
   10607 			}
   10608 			break;
   10609 		case NVM_OFF_CFG2:
   10610 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10611 			if (rv != 0) {
   10612 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10613 				rv = 0;
   10614 			}
   10615 			break;
   10616 		case NVM_OFF_CFG4:
   10617 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10618 			if (rv != 0) {
   10619 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10620 				rv = 0;
   10621 			}
   10622 			break;
   10623 		case NVM_OFF_LED_1_CFG:
   10624 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10625 			if (rv != 0) {
   10626 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10627 				rv = 0;
   10628 			}
   10629 			break;
   10630 		case NVM_OFF_LED_0_2_CFG:
   10631 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10632 			if (rv != 0) {
   10633 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10634 				rv = 0;
   10635 			}
   10636 			break;
   10637 		case NVM_OFF_ID_LED_SETTINGS:
   10638 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10639 			if (rv != 0) {
   10640 				*data = ID_LED_RESERVED_FFFF;
   10641 				rv = 0;
   10642 			}
   10643 			break;
   10644 		default:
   10645 			DPRINTF(WM_DEBUG_NVM,
   10646 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10647 			*data = NVM_RESERVED_WORD;
   10648 			break;
   10649 		}
   10650 	}
   10651 
   10652 	return rv;
   10653 }
   10654 
   10655 /* Lock, detecting NVM type, validate checksum, version and read */
   10656 
   10657 /*
   10658  * wm_nvm_acquire:
   10659  *
   10660  *	Perform the EEPROM handshake required on some chips.
   10661  */
   10662 static int
   10663 wm_nvm_acquire(struct wm_softc *sc)
   10664 {
   10665 	uint32_t reg;
   10666 	int x;
   10667 	int ret = 0;
   10668 
   10669 	/* always success */
   10670 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10671 		return 0;
   10672 
   10673 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10674 		ret = wm_get_swfwhw_semaphore(sc);
   10675 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10676 		/* This will also do wm_get_swsm_semaphore() if needed */
   10677 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10678 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10679 		ret = wm_get_swsm_semaphore(sc);
   10680 	}
   10681 
   10682 	if (ret) {
   10683 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10684 			__func__);
   10685 		return 1;
   10686 	}
   10687 
   10688 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10689 		reg = CSR_READ(sc, WMREG_EECD);
   10690 
   10691 		/* Request EEPROM access. */
   10692 		reg |= EECD_EE_REQ;
   10693 		CSR_WRITE(sc, WMREG_EECD, reg);
   10694 
   10695 		/* ..and wait for it to be granted. */
   10696 		for (x = 0; x < 1000; x++) {
   10697 			reg = CSR_READ(sc, WMREG_EECD);
   10698 			if (reg & EECD_EE_GNT)
   10699 				break;
   10700 			delay(5);
   10701 		}
   10702 		if ((reg & EECD_EE_GNT) == 0) {
   10703 			aprint_error_dev(sc->sc_dev,
   10704 			    "could not acquire EEPROM GNT\n");
   10705 			reg &= ~EECD_EE_REQ;
   10706 			CSR_WRITE(sc, WMREG_EECD, reg);
   10707 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10708 				wm_put_swfwhw_semaphore(sc);
   10709 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10710 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10711 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10712 				wm_put_swsm_semaphore(sc);
   10713 			return 1;
   10714 		}
   10715 	}
   10716 
   10717 	return 0;
   10718 }
   10719 
   10720 /*
   10721  * wm_nvm_release:
   10722  *
   10723  *	Release the EEPROM mutex.
   10724  */
   10725 static void
   10726 wm_nvm_release(struct wm_softc *sc)
   10727 {
   10728 	uint32_t reg;
   10729 
   10730 	/* always success */
   10731 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10732 		return;
   10733 
   10734 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10735 		reg = CSR_READ(sc, WMREG_EECD);
   10736 		reg &= ~EECD_EE_REQ;
   10737 		CSR_WRITE(sc, WMREG_EECD, reg);
   10738 	}
   10739 
   10740 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10741 		wm_put_swfwhw_semaphore(sc);
   10742 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10743 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10744 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10745 		wm_put_swsm_semaphore(sc);
   10746 }
   10747 
   10748 static int
   10749 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10750 {
   10751 	uint32_t eecd = 0;
   10752 
   10753 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10754 	    || sc->sc_type == WM_T_82583) {
   10755 		eecd = CSR_READ(sc, WMREG_EECD);
   10756 
   10757 		/* Isolate bits 15 & 16 */
   10758 		eecd = ((eecd >> 15) & 0x03);
   10759 
   10760 		/* If both bits are set, device is Flash type */
   10761 		if (eecd == 0x03)
   10762 			return 0;
   10763 	}
   10764 	return 1;
   10765 }
   10766 
   10767 static int
   10768 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10769 {
   10770 	uint32_t eec;
   10771 
   10772 	eec = CSR_READ(sc, WMREG_EEC);
   10773 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10774 		return 1;
   10775 
   10776 	return 0;
   10777 }
   10778 
   10779 /*
   10780  * wm_nvm_validate_checksum
   10781  *
   10782  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10783  */
   10784 static int
   10785 wm_nvm_validate_checksum(struct wm_softc *sc)
   10786 {
   10787 	uint16_t checksum;
   10788 	uint16_t eeprom_data;
   10789 #ifdef WM_DEBUG
   10790 	uint16_t csum_wordaddr, valid_checksum;
   10791 #endif
   10792 	int i;
   10793 
   10794 	checksum = 0;
   10795 
   10796 	/* Don't check for I211 */
   10797 	if (sc->sc_type == WM_T_I211)
   10798 		return 0;
   10799 
   10800 #ifdef WM_DEBUG
   10801 	if (sc->sc_type == WM_T_PCH_LPT) {
   10802 		csum_wordaddr = NVM_OFF_COMPAT;
   10803 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10804 	} else {
   10805 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10806 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10807 	}
   10808 
   10809 	/* Dump EEPROM image for debug */
   10810 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10811 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10812 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10813 		/* XXX PCH_SPT? */
   10814 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10815 		if ((eeprom_data & valid_checksum) == 0) {
   10816 			DPRINTF(WM_DEBUG_NVM,
   10817 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10818 				device_xname(sc->sc_dev), eeprom_data,
   10819 				    valid_checksum));
   10820 		}
   10821 	}
   10822 
   10823 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10824 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10825 		for (i = 0; i < NVM_SIZE; i++) {
   10826 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10827 				printf("XXXX ");
   10828 			else
   10829 				printf("%04hx ", eeprom_data);
   10830 			if (i % 8 == 7)
   10831 				printf("\n");
   10832 		}
   10833 	}
   10834 
   10835 #endif /* WM_DEBUG */
   10836 
   10837 	for (i = 0; i < NVM_SIZE; i++) {
   10838 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10839 			return 1;
   10840 		checksum += eeprom_data;
   10841 	}
   10842 
   10843 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10844 #ifdef WM_DEBUG
   10845 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10846 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10847 #endif
   10848 	}
   10849 
   10850 	return 0;
   10851 }
   10852 
   10853 static void
   10854 wm_nvm_version_invm(struct wm_softc *sc)
   10855 {
   10856 	uint32_t dword;
   10857 
   10858 	/*
   10859 	 * Linux's code to decode version is very strange, so we don't
   10860 	 * obey that algorithm and just use word 61 as the document.
   10861 	 * Perhaps it's not perfect though...
   10862 	 *
   10863 	 * Example:
   10864 	 *
   10865 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10866 	 */
   10867 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10868 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10869 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10870 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10871 }
   10872 
   10873 static void
   10874 wm_nvm_version(struct wm_softc *sc)
   10875 {
   10876 	uint16_t major, minor, build, patch;
   10877 	uint16_t uid0, uid1;
   10878 	uint16_t nvm_data;
   10879 	uint16_t off;
   10880 	bool check_version = false;
   10881 	bool check_optionrom = false;
   10882 	bool have_build = false;
   10883 
   10884 	/*
   10885 	 * Version format:
   10886 	 *
   10887 	 * XYYZ
   10888 	 * X0YZ
   10889 	 * X0YY
   10890 	 *
   10891 	 * Example:
   10892 	 *
   10893 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10894 	 *	82571	0x50a6	5.10.6?
   10895 	 *	82572	0x506a	5.6.10?
   10896 	 *	82572EI	0x5069	5.6.9?
   10897 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10898 	 *		0x2013	2.1.3?
   10899 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10900 	 */
   10901 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10902 	switch (sc->sc_type) {
   10903 	case WM_T_82571:
   10904 	case WM_T_82572:
   10905 	case WM_T_82574:
   10906 	case WM_T_82583:
   10907 		check_version = true;
   10908 		check_optionrom = true;
   10909 		have_build = true;
   10910 		break;
   10911 	case WM_T_82575:
   10912 	case WM_T_82576:
   10913 	case WM_T_82580:
   10914 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10915 			check_version = true;
   10916 		break;
   10917 	case WM_T_I211:
   10918 		wm_nvm_version_invm(sc);
   10919 		goto printver;
   10920 	case WM_T_I210:
   10921 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10922 			wm_nvm_version_invm(sc);
   10923 			goto printver;
   10924 		}
   10925 		/* FALLTHROUGH */
   10926 	case WM_T_I350:
   10927 	case WM_T_I354:
   10928 		check_version = true;
   10929 		check_optionrom = true;
   10930 		break;
   10931 	default:
   10932 		return;
   10933 	}
   10934 	if (check_version) {
   10935 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10936 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10937 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10938 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10939 			build = nvm_data & NVM_BUILD_MASK;
   10940 			have_build = true;
   10941 		} else
   10942 			minor = nvm_data & 0x00ff;
   10943 
   10944 		/* Decimal */
   10945 		minor = (minor / 16) * 10 + (minor % 16);
   10946 		sc->sc_nvm_ver_major = major;
   10947 		sc->sc_nvm_ver_minor = minor;
   10948 
   10949 printver:
   10950 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10951 		    sc->sc_nvm_ver_minor);
   10952 		if (have_build) {
   10953 			sc->sc_nvm_ver_build = build;
   10954 			aprint_verbose(".%d", build);
   10955 		}
   10956 	}
   10957 	if (check_optionrom) {
   10958 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10959 		/* Option ROM Version */
   10960 		if ((off != 0x0000) && (off != 0xffff)) {
   10961 			off += NVM_COMBO_VER_OFF;
   10962 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10963 			wm_nvm_read(sc, off, 1, &uid0);
   10964 			if ((uid0 != 0) && (uid0 != 0xffff)
   10965 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10966 				/* 16bits */
   10967 				major = uid0 >> 8;
   10968 				build = (uid0 << 8) | (uid1 >> 8);
   10969 				patch = uid1 & 0x00ff;
   10970 				aprint_verbose(", option ROM Version %d.%d.%d",
   10971 				    major, build, patch);
   10972 			}
   10973 		}
   10974 	}
   10975 
   10976 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10977 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10978 }
   10979 
   10980 /*
   10981  * wm_nvm_read:
   10982  *
   10983  *	Read data from the serial EEPROM.
   10984  */
   10985 static int
   10986 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10987 {
   10988 	int rv;
   10989 
   10990 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   10991 		return 1;
   10992 
   10993 	if (wm_nvm_acquire(sc))
   10994 		return 1;
   10995 
   10996 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10997 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10998 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   10999 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11000 	else if (sc->sc_type == WM_T_PCH_SPT)
   11001 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11002 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11003 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11004 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11005 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11006 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11007 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11008 	else
   11009 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11010 
   11011 	wm_nvm_release(sc);
   11012 	return rv;
   11013 }
   11014 
   11015 /*
   11016  * Hardware semaphores.
   11017  * Very complexed...
   11018  */
   11019 
   11020 static int
   11021 wm_get_swsm_semaphore(struct wm_softc *sc)
   11022 {
   11023 	int32_t timeout;
   11024 	uint32_t swsm;
   11025 
   11026 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11027 		/* Get the SW semaphore. */
   11028 		timeout = sc->sc_nvm_wordsize + 1;
   11029 		while (timeout) {
   11030 			swsm = CSR_READ(sc, WMREG_SWSM);
   11031 
   11032 			if ((swsm & SWSM_SMBI) == 0)
   11033 				break;
   11034 
   11035 			delay(50);
   11036 			timeout--;
   11037 		}
   11038 
   11039 		if (timeout == 0) {
   11040 			aprint_error_dev(sc->sc_dev,
   11041 			    "could not acquire SWSM SMBI\n");
   11042 			return 1;
   11043 		}
   11044 	}
   11045 
   11046 	/* Get the FW semaphore. */
   11047 	timeout = sc->sc_nvm_wordsize + 1;
   11048 	while (timeout) {
   11049 		swsm = CSR_READ(sc, WMREG_SWSM);
   11050 		swsm |= SWSM_SWESMBI;
   11051 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11052 		/* If we managed to set the bit we got the semaphore. */
   11053 		swsm = CSR_READ(sc, WMREG_SWSM);
   11054 		if (swsm & SWSM_SWESMBI)
   11055 			break;
   11056 
   11057 		delay(50);
   11058 		timeout--;
   11059 	}
   11060 
   11061 	if (timeout == 0) {
   11062 		aprint_error_dev(sc->sc_dev,
   11063 		    "could not acquire SWSM SWESMBI\n");
   11064 		/* Release semaphores */
   11065 		wm_put_swsm_semaphore(sc);
   11066 		return 1;
   11067 	}
   11068 	return 0;
   11069 }
   11070 
   11071 static void
   11072 wm_put_swsm_semaphore(struct wm_softc *sc)
   11073 {
   11074 	uint32_t swsm;
   11075 
   11076 	swsm = CSR_READ(sc, WMREG_SWSM);
   11077 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11078 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11079 }
   11080 
   11081 static int
   11082 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11083 {
   11084 	uint32_t swfw_sync;
   11085 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11086 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11087 	int timeout = 200;
   11088 
   11089 	for (timeout = 0; timeout < 200; timeout++) {
   11090 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11091 			if (wm_get_swsm_semaphore(sc)) {
   11092 				aprint_error_dev(sc->sc_dev,
   11093 				    "%s: failed to get semaphore\n",
   11094 				    __func__);
   11095 				return 1;
   11096 			}
   11097 		}
   11098 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11099 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11100 			swfw_sync |= swmask;
   11101 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11102 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11103 				wm_put_swsm_semaphore(sc);
   11104 			return 0;
   11105 		}
   11106 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11107 			wm_put_swsm_semaphore(sc);
   11108 		delay(5000);
   11109 	}
   11110 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11111 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11112 	return 1;
   11113 }
   11114 
   11115 static void
   11116 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11117 {
   11118 	uint32_t swfw_sync;
   11119 
   11120 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11121 		while (wm_get_swsm_semaphore(sc) != 0)
   11122 			continue;
   11123 	}
   11124 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11125 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11126 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11127 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11128 		wm_put_swsm_semaphore(sc);
   11129 }
   11130 
   11131 static int
   11132 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11133 {
   11134 	uint32_t ext_ctrl;
   11135 	int timeout = 200;
   11136 
   11137 	for (timeout = 0; timeout < 200; timeout++) {
   11138 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11139 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11140 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11141 
   11142 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11143 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11144 			return 0;
   11145 		delay(5000);
   11146 	}
   11147 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11148 	    device_xname(sc->sc_dev), ext_ctrl);
   11149 	return 1;
   11150 }
   11151 
   11152 static void
   11153 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11154 {
   11155 	uint32_t ext_ctrl;
   11156 
   11157 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11158 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11159 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11160 }
   11161 
   11162 static int
   11163 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11164 {
   11165 	int i = 0;
   11166 	uint32_t reg;
   11167 
   11168 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11169 	do {
   11170 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11171 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11172 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11173 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11174 			break;
   11175 		delay(2*1000);
   11176 		i++;
   11177 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11178 
   11179 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11180 		wm_put_hw_semaphore_82573(sc);
   11181 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11182 		    device_xname(sc->sc_dev));
   11183 		return -1;
   11184 	}
   11185 
   11186 	return 0;
   11187 }
   11188 
   11189 static void
   11190 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11191 {
   11192 	uint32_t reg;
   11193 
   11194 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11195 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11196 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11197 }
   11198 
   11199 /*
   11200  * Management mode and power management related subroutines.
   11201  * BMC, AMT, suspend/resume and EEE.
   11202  */
   11203 
   11204 #ifdef WM_WOL
   11205 static int
   11206 wm_check_mng_mode(struct wm_softc *sc)
   11207 {
   11208 	int rv;
   11209 
   11210 	switch (sc->sc_type) {
   11211 	case WM_T_ICH8:
   11212 	case WM_T_ICH9:
   11213 	case WM_T_ICH10:
   11214 	case WM_T_PCH:
   11215 	case WM_T_PCH2:
   11216 	case WM_T_PCH_LPT:
   11217 	case WM_T_PCH_SPT:
   11218 		rv = wm_check_mng_mode_ich8lan(sc);
   11219 		break;
   11220 	case WM_T_82574:
   11221 	case WM_T_82583:
   11222 		rv = wm_check_mng_mode_82574(sc);
   11223 		break;
   11224 	case WM_T_82571:
   11225 	case WM_T_82572:
   11226 	case WM_T_82573:
   11227 	case WM_T_80003:
   11228 		rv = wm_check_mng_mode_generic(sc);
   11229 		break;
   11230 	default:
   11231 		/* noting to do */
   11232 		rv = 0;
   11233 		break;
   11234 	}
   11235 
   11236 	return rv;
   11237 }
   11238 
   11239 static int
   11240 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11241 {
   11242 	uint32_t fwsm;
   11243 
   11244 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11245 
   11246 	if (((fwsm & FWSM_FW_VALID) != 0)
   11247 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11248 		return 1;
   11249 
   11250 	return 0;
   11251 }
   11252 
   11253 static int
   11254 wm_check_mng_mode_82574(struct wm_softc *sc)
   11255 {
   11256 	uint16_t data;
   11257 
   11258 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11259 
   11260 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11261 		return 1;
   11262 
   11263 	return 0;
   11264 }
   11265 
   11266 static int
   11267 wm_check_mng_mode_generic(struct wm_softc *sc)
   11268 {
   11269 	uint32_t fwsm;
   11270 
   11271 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11272 
   11273 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11274 		return 1;
   11275 
   11276 	return 0;
   11277 }
   11278 #endif /* WM_WOL */
   11279 
   11280 static int
   11281 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11282 {
   11283 	uint32_t manc, fwsm, factps;
   11284 
   11285 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11286 		return 0;
   11287 
   11288 	manc = CSR_READ(sc, WMREG_MANC);
   11289 
   11290 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11291 		device_xname(sc->sc_dev), manc));
   11292 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11293 		return 0;
   11294 
   11295 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11296 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11297 		factps = CSR_READ(sc, WMREG_FACTPS);
   11298 		if (((factps & FACTPS_MNGCG) == 0)
   11299 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11300 			return 1;
   11301 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11302 		uint16_t data;
   11303 
   11304 		factps = CSR_READ(sc, WMREG_FACTPS);
   11305 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11306 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11307 			device_xname(sc->sc_dev), factps, data));
   11308 		if (((factps & FACTPS_MNGCG) == 0)
   11309 		    && ((data & NVM_CFG2_MNGM_MASK)
   11310 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11311 			return 1;
   11312 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11313 	    && ((manc & MANC_ASF_EN) == 0))
   11314 		return 1;
   11315 
   11316 	return 0;
   11317 }
   11318 
   11319 static bool
   11320 wm_phy_resetisblocked(struct wm_softc *sc)
   11321 {
   11322 	bool blocked = false;
   11323 	uint32_t reg;
   11324 	int i = 0;
   11325 
   11326 	switch (sc->sc_type) {
   11327 	case WM_T_ICH8:
   11328 	case WM_T_ICH9:
   11329 	case WM_T_ICH10:
   11330 	case WM_T_PCH:
   11331 	case WM_T_PCH2:
   11332 	case WM_T_PCH_LPT:
   11333 	case WM_T_PCH_SPT:
   11334 		do {
   11335 			reg = CSR_READ(sc, WMREG_FWSM);
   11336 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11337 				blocked = true;
   11338 				delay(10*1000);
   11339 				continue;
   11340 			}
   11341 			blocked = false;
   11342 		} while (blocked && (i++ < 10));
   11343 		return blocked;
   11344 		break;
   11345 	case WM_T_82571:
   11346 	case WM_T_82572:
   11347 	case WM_T_82573:
   11348 	case WM_T_82574:
   11349 	case WM_T_82583:
   11350 	case WM_T_80003:
   11351 		reg = CSR_READ(sc, WMREG_MANC);
   11352 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11353 			return true;
   11354 		else
   11355 			return false;
   11356 		break;
   11357 	default:
   11358 		/* no problem */
   11359 		break;
   11360 	}
   11361 
   11362 	return false;
   11363 }
   11364 
   11365 static void
   11366 wm_get_hw_control(struct wm_softc *sc)
   11367 {
   11368 	uint32_t reg;
   11369 
   11370 	switch (sc->sc_type) {
   11371 	case WM_T_82573:
   11372 		reg = CSR_READ(sc, WMREG_SWSM);
   11373 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11374 		break;
   11375 	case WM_T_82571:
   11376 	case WM_T_82572:
   11377 	case WM_T_82574:
   11378 	case WM_T_82583:
   11379 	case WM_T_80003:
   11380 	case WM_T_ICH8:
   11381 	case WM_T_ICH9:
   11382 	case WM_T_ICH10:
   11383 	case WM_T_PCH:
   11384 	case WM_T_PCH2:
   11385 	case WM_T_PCH_LPT:
   11386 	case WM_T_PCH_SPT:
   11387 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11388 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11389 		break;
   11390 	default:
   11391 		break;
   11392 	}
   11393 }
   11394 
   11395 static void
   11396 wm_release_hw_control(struct wm_softc *sc)
   11397 {
   11398 	uint32_t reg;
   11399 
   11400 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11401 		return;
   11402 
   11403 	if (sc->sc_type == WM_T_82573) {
   11404 		reg = CSR_READ(sc, WMREG_SWSM);
   11405 		reg &= ~SWSM_DRV_LOAD;
   11406 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11407 	} else {
   11408 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11409 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11410 	}
   11411 }
   11412 
   11413 static void
   11414 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11415 {
   11416 	uint32_t reg;
   11417 
   11418 	if (sc->sc_type < WM_T_PCH2)
   11419 		return;
   11420 
   11421 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11422 
   11423 	if (gate)
   11424 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11425 	else
   11426 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11427 
   11428 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11429 }
   11430 
   11431 static void
   11432 wm_smbustopci(struct wm_softc *sc)
   11433 {
   11434 	uint32_t fwsm, reg;
   11435 
   11436 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11437 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11438 
   11439 	/* Acquire semaphore */
   11440 	wm_get_swfwhw_semaphore(sc);
   11441 
   11442 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11443 	if (((fwsm & FWSM_FW_VALID) == 0)
   11444 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11445 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11446 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11447 			reg |= CTRL_EXT_FORCE_SMBUS;
   11448 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11449 			CSR_WRITE_FLUSH(sc);
   11450 			delay(50*1000);
   11451 		}
   11452 
   11453 		/* Toggle LANPHYPC */
   11454 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11455 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11456 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11457 		CSR_WRITE_FLUSH(sc);
   11458 		delay(10);
   11459 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11460 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11461 		CSR_WRITE_FLUSH(sc);
   11462 		delay(50*1000);
   11463 
   11464 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11465 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11466 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11467 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11468 		}
   11469 	}
   11470 
   11471 	/* Release semaphore */
   11472 	wm_put_swfwhw_semaphore(sc);
   11473 
   11474 	/*
   11475 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11476 	 */
   11477 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11478 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11479 }
   11480 
   11481 static void
   11482 wm_init_manageability(struct wm_softc *sc)
   11483 {
   11484 
   11485 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11486 		device_xname(sc->sc_dev), __func__));
   11487 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11488 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11489 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11490 
   11491 		/* Disable hardware interception of ARP */
   11492 		manc &= ~MANC_ARP_EN;
   11493 
   11494 		/* Enable receiving management packets to the host */
   11495 		if (sc->sc_type >= WM_T_82571) {
   11496 			manc |= MANC_EN_MNG2HOST;
   11497 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11498 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11499 		}
   11500 
   11501 		CSR_WRITE(sc, WMREG_MANC, manc);
   11502 	}
   11503 }
   11504 
   11505 static void
   11506 wm_release_manageability(struct wm_softc *sc)
   11507 {
   11508 
   11509 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11510 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11511 
   11512 		manc |= MANC_ARP_EN;
   11513 		if (sc->sc_type >= WM_T_82571)
   11514 			manc &= ~MANC_EN_MNG2HOST;
   11515 
   11516 		CSR_WRITE(sc, WMREG_MANC, manc);
   11517 	}
   11518 }
   11519 
   11520 static void
   11521 wm_get_wakeup(struct wm_softc *sc)
   11522 {
   11523 
   11524 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11525 	switch (sc->sc_type) {
   11526 	case WM_T_82573:
   11527 	case WM_T_82583:
   11528 		sc->sc_flags |= WM_F_HAS_AMT;
   11529 		/* FALLTHROUGH */
   11530 	case WM_T_80003:
   11531 	case WM_T_82541:
   11532 	case WM_T_82547:
   11533 	case WM_T_82571:
   11534 	case WM_T_82572:
   11535 	case WM_T_82574:
   11536 	case WM_T_82575:
   11537 	case WM_T_82576:
   11538 	case WM_T_82580:
   11539 	case WM_T_I350:
   11540 	case WM_T_I354:
   11541 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11542 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11543 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11544 		break;
   11545 	case WM_T_ICH8:
   11546 	case WM_T_ICH9:
   11547 	case WM_T_ICH10:
   11548 	case WM_T_PCH:
   11549 	case WM_T_PCH2:
   11550 	case WM_T_PCH_LPT:
   11551 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   11552 		sc->sc_flags |= WM_F_HAS_AMT;
   11553 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11554 		break;
   11555 	default:
   11556 		break;
   11557 	}
   11558 
   11559 	/* 1: HAS_MANAGE */
   11560 	if (wm_enable_mng_pass_thru(sc) != 0)
   11561 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11562 
   11563 #ifdef WM_DEBUG
   11564 	printf("\n");
   11565 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11566 		printf("HAS_AMT,");
   11567 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11568 		printf("ARC_SUBSYS_VALID,");
   11569 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11570 		printf("ASF_FIRMWARE_PRES,");
   11571 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11572 		printf("HAS_MANAGE,");
   11573 	printf("\n");
   11574 #endif
   11575 	/*
   11576 	 * Note that the WOL flags is set after the resetting of the eeprom
   11577 	 * stuff
   11578 	 */
   11579 }
   11580 
   11581 #ifdef WM_WOL
   11582 /* WOL in the newer chipset interfaces (pchlan) */
   11583 static void
   11584 wm_enable_phy_wakeup(struct wm_softc *sc)
   11585 {
   11586 #if 0
   11587 	uint16_t preg;
   11588 
   11589 	/* Copy MAC RARs to PHY RARs */
   11590 
   11591 	/* Copy MAC MTA to PHY MTA */
   11592 
   11593 	/* Configure PHY Rx Control register */
   11594 
   11595 	/* Enable PHY wakeup in MAC register */
   11596 
   11597 	/* Configure and enable PHY wakeup in PHY registers */
   11598 
   11599 	/* Activate PHY wakeup */
   11600 
   11601 	/* XXX */
   11602 #endif
   11603 }
   11604 
   11605 /* Power down workaround on D3 */
   11606 static void
   11607 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11608 {
   11609 	uint32_t reg;
   11610 	int i;
   11611 
   11612 	for (i = 0; i < 2; i++) {
   11613 		/* Disable link */
   11614 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11615 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11616 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11617 
   11618 		/*
   11619 		 * Call gig speed drop workaround on Gig disable before
   11620 		 * accessing any PHY registers
   11621 		 */
   11622 		if (sc->sc_type == WM_T_ICH8)
   11623 			wm_gig_downshift_workaround_ich8lan(sc);
   11624 
   11625 		/* Write VR power-down enable */
   11626 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11627 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11628 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11629 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11630 
   11631 		/* Read it back and test */
   11632 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11633 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11634 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11635 			break;
   11636 
   11637 		/* Issue PHY reset and repeat at most one more time */
   11638 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11639 	}
   11640 }
   11641 
   11642 static void
   11643 wm_enable_wakeup(struct wm_softc *sc)
   11644 {
   11645 	uint32_t reg, pmreg;
   11646 	pcireg_t pmode;
   11647 
   11648 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11649 		&pmreg, NULL) == 0)
   11650 		return;
   11651 
   11652 	/* Advertise the wakeup capability */
   11653 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11654 	    | CTRL_SWDPIN(3));
   11655 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11656 
   11657 	/* ICH workaround */
   11658 	switch (sc->sc_type) {
   11659 	case WM_T_ICH8:
   11660 	case WM_T_ICH9:
   11661 	case WM_T_ICH10:
   11662 	case WM_T_PCH:
   11663 	case WM_T_PCH2:
   11664 	case WM_T_PCH_LPT:
   11665 	case WM_T_PCH_SPT:
   11666 		/* Disable gig during WOL */
   11667 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11668 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11669 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11670 		if (sc->sc_type == WM_T_PCH)
   11671 			wm_gmii_reset(sc);
   11672 
   11673 		/* Power down workaround */
   11674 		if (sc->sc_phytype == WMPHY_82577) {
   11675 			struct mii_softc *child;
   11676 
   11677 			/* Assume that the PHY is copper */
   11678 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11679 			if (child->mii_mpd_rev <= 2)
   11680 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11681 				    (768 << 5) | 25, 0x0444); /* magic num */
   11682 		}
   11683 		break;
   11684 	default:
   11685 		break;
   11686 	}
   11687 
   11688 	/* Keep the laser running on fiber adapters */
   11689 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11690 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11691 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11692 		reg |= CTRL_EXT_SWDPIN(3);
   11693 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11694 	}
   11695 
   11696 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11697 #if 0	/* for the multicast packet */
   11698 	reg |= WUFC_MC;
   11699 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11700 #endif
   11701 
   11702 	if (sc->sc_type == WM_T_PCH) {
   11703 		wm_enable_phy_wakeup(sc);
   11704 	} else {
   11705 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11706 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11707 	}
   11708 
   11709 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11710 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11711 		|| (sc->sc_type == WM_T_PCH2))
   11712 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11713 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11714 
   11715 	/* Request PME */
   11716 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11717 #if 0
   11718 	/* Disable WOL */
   11719 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11720 #else
   11721 	/* For WOL */
   11722 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11723 #endif
   11724 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11725 }
   11726 #endif /* WM_WOL */
   11727 
   11728 /* LPLU */
   11729 
   11730 static void
   11731 wm_lplu_d0_disable(struct wm_softc *sc)
   11732 {
   11733 	uint32_t reg;
   11734 
   11735 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11736 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   11737 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11738 }
   11739 
   11740 static void
   11741 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   11742 {
   11743 	uint32_t reg;
   11744 
   11745 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   11746 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   11747 	reg |= HV_OEM_BITS_ANEGNOW;
   11748 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   11749 }
   11750 
   11751 /* EEE */
   11752 
   11753 static void
   11754 wm_set_eee_i350(struct wm_softc *sc)
   11755 {
   11756 	uint32_t ipcnfg, eeer;
   11757 
   11758 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11759 	eeer = CSR_READ(sc, WMREG_EEER);
   11760 
   11761 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11762 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11763 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11764 		    | EEER_LPI_FC);
   11765 	} else {
   11766 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11767 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11768 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11769 		    | EEER_LPI_FC);
   11770 	}
   11771 
   11772 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11773 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11774 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11775 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11776 }
   11777 
   11778 /*
   11779  * Workarounds (mainly PHY related).
   11780  * Basically, PHY's workarounds are in the PHY drivers.
   11781  */
   11782 
   11783 /* Work-around for 82566 Kumeran PCS lock loss */
   11784 static void
   11785 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11786 {
   11787 #if 0
   11788 	int miistatus, active, i;
   11789 	int reg;
   11790 
   11791 	miistatus = sc->sc_mii.mii_media_status;
   11792 
   11793 	/* If the link is not up, do nothing */
   11794 	if ((miistatus & IFM_ACTIVE) == 0)
   11795 		return;
   11796 
   11797 	active = sc->sc_mii.mii_media_active;
   11798 
   11799 	/* Nothing to do if the link is other than 1Gbps */
   11800 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11801 		return;
   11802 
   11803 	for (i = 0; i < 10; i++) {
   11804 		/* read twice */
   11805 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11806 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11807 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   11808 			goto out;	/* GOOD! */
   11809 
   11810 		/* Reset the PHY */
   11811 		wm_gmii_reset(sc);
   11812 		delay(5*1000);
   11813 	}
   11814 
   11815 	/* Disable GigE link negotiation */
   11816 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11817 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11818 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11819 
   11820 	/*
   11821 	 * Call gig speed drop workaround on Gig disable before accessing
   11822 	 * any PHY registers.
   11823 	 */
   11824 	wm_gig_downshift_workaround_ich8lan(sc);
   11825 
   11826 out:
   11827 	return;
   11828 #endif
   11829 }
   11830 
   11831 /* WOL from S5 stops working */
   11832 static void
   11833 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11834 {
   11835 	uint16_t kmrn_reg;
   11836 
   11837 	/* Only for igp3 */
   11838 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11839 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11840 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11841 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11842 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11843 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11844 	}
   11845 }
   11846 
   11847 /*
   11848  * Workaround for pch's PHYs
   11849  * XXX should be moved to new PHY driver?
   11850  */
   11851 static void
   11852 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11853 {
   11854 	if (sc->sc_phytype == WMPHY_82577)
   11855 		wm_set_mdio_slow_mode_hv(sc);
   11856 
   11857 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11858 
   11859 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11860 
   11861 	/* 82578 */
   11862 	if (sc->sc_phytype == WMPHY_82578) {
   11863 		/* PCH rev. < 3 */
   11864 		if (sc->sc_rev < 3) {
   11865 			/* XXX 6 bit shift? Why? Is it page2? */
   11866 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11867 			    0x66c0);
   11868 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11869 			    0xffff);
   11870 		}
   11871 
   11872 		/* XXX phy rev. < 2 */
   11873 	}
   11874 
   11875 	/* Select page 0 */
   11876 
   11877 	/* XXX acquire semaphore */
   11878 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11879 	/* XXX release semaphore */
   11880 
   11881 	/*
   11882 	 * Configure the K1 Si workaround during phy reset assuming there is
   11883 	 * link so that it disables K1 if link is in 1Gbps.
   11884 	 */
   11885 	wm_k1_gig_workaround_hv(sc, 1);
   11886 }
   11887 
   11888 static void
   11889 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11890 {
   11891 
   11892 	wm_set_mdio_slow_mode_hv(sc);
   11893 }
   11894 
   11895 static void
   11896 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11897 {
   11898 	int k1_enable = sc->sc_nvm_k1_enabled;
   11899 
   11900 	/* XXX acquire semaphore */
   11901 
   11902 	if (link) {
   11903 		k1_enable = 0;
   11904 
   11905 		/* Link stall fix for link up */
   11906 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11907 	} else {
   11908 		/* Link stall fix for link down */
   11909 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11910 	}
   11911 
   11912 	wm_configure_k1_ich8lan(sc, k1_enable);
   11913 
   11914 	/* XXX release semaphore */
   11915 }
   11916 
   11917 static void
   11918 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11919 {
   11920 	uint32_t reg;
   11921 
   11922 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11923 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11924 	    reg | HV_KMRN_MDIO_SLOW);
   11925 }
   11926 
   11927 static void
   11928 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11929 {
   11930 	uint32_t ctrl, ctrl_ext, tmp;
   11931 	uint16_t kmrn_reg;
   11932 
   11933 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11934 
   11935 	if (k1_enable)
   11936 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11937 	else
   11938 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11939 
   11940 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11941 
   11942 	delay(20);
   11943 
   11944 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11945 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11946 
   11947 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11948 	tmp |= CTRL_FRCSPD;
   11949 
   11950 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11951 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11952 	CSR_WRITE_FLUSH(sc);
   11953 	delay(20);
   11954 
   11955 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11956 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11957 	CSR_WRITE_FLUSH(sc);
   11958 	delay(20);
   11959 }
   11960 
   11961 /* special case - for 82575 - need to do manual init ... */
   11962 static void
   11963 wm_reset_init_script_82575(struct wm_softc *sc)
   11964 {
   11965 	/*
   11966 	 * remark: this is untested code - we have no board without EEPROM
   11967 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11968 	 */
   11969 
   11970 	/* SerDes configuration via SERDESCTRL */
   11971 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11972 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11973 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11974 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11975 
   11976 	/* CCM configuration via CCMCTL register */
   11977 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11978 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11979 
   11980 	/* PCIe lanes configuration */
   11981 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11982 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11983 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11984 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11985 
   11986 	/* PCIe PLL Configuration */
   11987 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11988 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   11989 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   11990 }
   11991 
   11992 static void
   11993 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   11994 {
   11995 	uint32_t reg;
   11996 	uint16_t nvmword;
   11997 	int rv;
   11998 
   11999 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12000 		return;
   12001 
   12002 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12003 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12004 	if (rv != 0) {
   12005 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12006 		    __func__);
   12007 		return;
   12008 	}
   12009 
   12010 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12011 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12012 		reg |= MDICNFG_DEST;
   12013 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12014 		reg |= MDICNFG_COM_MDIO;
   12015 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12016 }
   12017 
   12018 /*
   12019  * I210 Errata 25 and I211 Errata 10
   12020  * Slow System Clock.
   12021  */
   12022 static void
   12023 wm_pll_workaround_i210(struct wm_softc *sc)
   12024 {
   12025 	uint32_t mdicnfg, wuc;
   12026 	uint32_t reg;
   12027 	pcireg_t pcireg;
   12028 	uint32_t pmreg;
   12029 	uint16_t nvmword, tmp_nvmword;
   12030 	int phyval;
   12031 	bool wa_done = false;
   12032 	int i;
   12033 
   12034 	/* Save WUC and MDICNFG registers */
   12035 	wuc = CSR_READ(sc, WMREG_WUC);
   12036 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12037 
   12038 	reg = mdicnfg & ~MDICNFG_DEST;
   12039 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12040 
   12041 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12042 		nvmword = INVM_DEFAULT_AL;
   12043 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12044 
   12045 	/* Get Power Management cap offset */
   12046 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12047 		&pmreg, NULL) == 0)
   12048 		return;
   12049 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12050 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12051 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12052 
   12053 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12054 			break; /* OK */
   12055 		}
   12056 
   12057 		wa_done = true;
   12058 		/* Directly reset the internal PHY */
   12059 		reg = CSR_READ(sc, WMREG_CTRL);
   12060 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12061 
   12062 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12063 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12064 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12065 
   12066 		CSR_WRITE(sc, WMREG_WUC, 0);
   12067 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12068 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12069 
   12070 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12071 		    pmreg + PCI_PMCSR);
   12072 		pcireg |= PCI_PMCSR_STATE_D3;
   12073 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12074 		    pmreg + PCI_PMCSR, pcireg);
   12075 		delay(1000);
   12076 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12077 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12078 		    pmreg + PCI_PMCSR, pcireg);
   12079 
   12080 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12081 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12082 
   12083 		/* Restore WUC register */
   12084 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12085 	}
   12086 
   12087 	/* Restore MDICNFG setting */
   12088 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12089 	if (wa_done)
   12090 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12091 }
   12092