Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.423
      1 /*	$NetBSD: if_wm.c,v 1.423 2016/10/20 08:03:13 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.423 2016/10/20 08:03:13 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 #ifdef WM_EVENT_COUNTERS
    328 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    329 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    330 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    331 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    332 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    333 						/* XXX not used? */
    334 
    335 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    336 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    337 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    339 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    340 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    341 
    342 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    345 
    346 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    347 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    348 #endif /* WM_EVENT_COUNTERS */
    349 };
    350 
    351 struct wm_rxqueue {
    352 	kmutex_t *rxq_lock;		/* lock for rx operations */
    353 
    354 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    355 
    356 	/* Software state for the receive descriptors. */
    357 	wiseman_rxdesc_t *rxq_descs;
    358 
    359 	/* RX control data structures. */
    360 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    361 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    363 	int rxq_desc_rseg;		/* real number of control segment */
    364 	size_t rxq_desc_size;		/* control data size */
    365 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    366 
    367 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    368 
    369 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    370 	int rxq_discard;
    371 	int rxq_len;
    372 	struct mbuf *rxq_head;
    373 	struct mbuf *rxq_tail;
    374 	struct mbuf **rxq_tailp;
    375 
    376 #ifdef WM_EVENT_COUNTERS
    377 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    378 
    379 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    380 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    381 #endif
    382 };
    383 
    384 struct wm_queue {
    385 	int wmq_id;			/* index of transmit and receive queues */
    386 	int wmq_intr_idx;		/* index of MSI-X tables */
    387 
    388 	struct wm_txqueue wmq_txq;
    389 	struct wm_rxqueue wmq_rxq;
    390 };
    391 
    392 /*
    393  * Software state per device.
    394  */
    395 struct wm_softc {
    396 	device_t sc_dev;		/* generic device information */
    397 	bus_space_tag_t sc_st;		/* bus space tag */
    398 	bus_space_handle_t sc_sh;	/* bus space handle */
    399 	bus_size_t sc_ss;		/* bus space size */
    400 	bus_space_tag_t sc_iot;		/* I/O space tag */
    401 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    402 	bus_size_t sc_ios;		/* I/O space size */
    403 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    404 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    405 	bus_size_t sc_flashs;		/* flash registers space size */
    406 	off_t sc_flashreg_offset;	/*
    407 					 * offset to flash registers from
    408 					 * start of BAR
    409 					 */
    410 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    411 
    412 	struct ethercom sc_ethercom;	/* ethernet common data */
    413 	struct mii_data sc_mii;		/* MII/media information */
    414 
    415 	pci_chipset_tag_t sc_pc;
    416 	pcitag_t sc_pcitag;
    417 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    418 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    419 
    420 	uint16_t sc_pcidevid;		/* PCI device ID */
    421 	wm_chip_type sc_type;		/* MAC type */
    422 	int sc_rev;			/* MAC revision */
    423 	wm_phy_type sc_phytype;		/* PHY type */
    424 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    425 #define	WM_MEDIATYPE_UNKNOWN		0x00
    426 #define	WM_MEDIATYPE_FIBER		0x01
    427 #define	WM_MEDIATYPE_COPPER		0x02
    428 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    429 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    430 	int sc_flags;			/* flags; see below */
    431 	int sc_if_flags;		/* last if_flags */
    432 	int sc_flowflags;		/* 802.3x flow control flags */
    433 	int sc_align_tweak;
    434 
    435 	void *sc_ihs[WM_MAX_NINTR];	/*
    436 					 * interrupt cookie.
    437 					 * legacy and msi use sc_ihs[0].
    438 					 */
    439 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    440 	int sc_nintrs;			/* number of interrupts */
    441 
    442 	int sc_link_intr_idx;		/* index of MSI-X tables */
    443 
    444 	callout_t sc_tick_ch;		/* tick callout */
    445 	bool sc_stopping;
    446 
    447 	int sc_nvm_ver_major;
    448 	int sc_nvm_ver_minor;
    449 	int sc_nvm_ver_build;
    450 	int sc_nvm_addrbits;		/* NVM address bits */
    451 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    452 	int sc_ich8_flash_base;
    453 	int sc_ich8_flash_bank_size;
    454 	int sc_nvm_k1_enabled;
    455 
    456 	int sc_nqueues;
    457 	struct wm_queue *sc_queue;
    458 
    459 	int sc_affinity_offset;
    460 
    461 #ifdef WM_EVENT_COUNTERS
    462 	/* Event counters. */
    463 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    464 
    465         /* WM_T_82542_2_1 only */
    466 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    467 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    468 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    469 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    470 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    471 #endif /* WM_EVENT_COUNTERS */
    472 
    473 	/* This variable are used only on the 82547. */
    474 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    475 
    476 	uint32_t sc_ctrl;		/* prototype CTRL register */
    477 #if 0
    478 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    479 #endif
    480 	uint32_t sc_icr;		/* prototype interrupt bits */
    481 	uint32_t sc_itr;		/* prototype intr throttling reg */
    482 	uint32_t sc_tctl;		/* prototype TCTL register */
    483 	uint32_t sc_rctl;		/* prototype RCTL register */
    484 	uint32_t sc_txcw;		/* prototype TXCW register */
    485 	uint32_t sc_tipg;		/* prototype TIPG register */
    486 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    487 	uint32_t sc_pba;		/* prototype PBA register */
    488 
    489 	int sc_tbi_linkup;		/* TBI link status */
    490 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    491 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    492 
    493 	int sc_mchash_type;		/* multicast filter offset */
    494 
    495 	krndsource_t rnd_source;	/* random source */
    496 
    497 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    498 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    499 
    500 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    501 };
    502 
    503 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    504 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    505 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    506 
    507 #ifdef WM_MPSAFE
    508 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    509 #else
    510 #define CALLOUT_FLAGS	0
    511 #endif
    512 
    513 #define	WM_RXCHAIN_RESET(rxq)						\
    514 do {									\
    515 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    516 	*(rxq)->rxq_tailp = NULL;					\
    517 	(rxq)->rxq_len = 0;						\
    518 } while (/*CONSTCOND*/0)
    519 
    520 #define	WM_RXCHAIN_LINK(rxq, m)						\
    521 do {									\
    522 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    523 	(rxq)->rxq_tailp = &(m)->m_next;				\
    524 } while (/*CONSTCOND*/0)
    525 
    526 #ifdef WM_EVENT_COUNTERS
    527 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    528 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    529 
    530 #define WM_Q_EVCNT_INCR(qname, evname)			\
    531 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    532 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    533 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    534 #else /* !WM_EVENT_COUNTERS */
    535 #define	WM_EVCNT_INCR(ev)	/* nothing */
    536 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    537 
    538 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    539 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    540 #endif /* !WM_EVENT_COUNTERS */
    541 
    542 #define	CSR_READ(sc, reg)						\
    543 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    544 #define	CSR_WRITE(sc, reg, val)						\
    545 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    546 #define	CSR_WRITE_FLUSH(sc)						\
    547 	(void) CSR_READ((sc), WMREG_STATUS)
    548 
    549 #define ICH8_FLASH_READ32(sc, reg)					\
    550 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    551 	    (reg) + sc->sc_flashreg_offset)
    552 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    553 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    554 	    (reg) + sc->sc_flashreg_offset, (data))
    555 
    556 #define ICH8_FLASH_READ16(sc, reg)					\
    557 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    558 	    (reg) + sc->sc_flashreg_offset)
    559 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    560 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    561 	    (reg) + sc->sc_flashreg_offset, (data))
    562 
    563 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    564 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    565 
    566 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    567 #define	WM_CDTXADDR_HI(txq, x)						\
    568 	(sizeof(bus_addr_t) == 8 ?					\
    569 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    570 
    571 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    572 #define	WM_CDRXADDR_HI(rxq, x)						\
    573 	(sizeof(bus_addr_t) == 8 ?					\
    574 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    575 
    576 /*
    577  * Register read/write functions.
    578  * Other than CSR_{READ|WRITE}().
    579  */
    580 #if 0
    581 static inline uint32_t wm_io_read(struct wm_softc *, int);
    582 #endif
    583 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    584 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    585 	uint32_t, uint32_t);
    586 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    587 
    588 /*
    589  * Descriptor sync/init functions.
    590  */
    591 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    592 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    593 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    594 
    595 /*
    596  * Device driver interface functions and commonly used functions.
    597  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    598  */
    599 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    600 static int	wm_match(device_t, cfdata_t, void *);
    601 static void	wm_attach(device_t, device_t, void *);
    602 static int	wm_detach(device_t, int);
    603 static bool	wm_suspend(device_t, const pmf_qual_t *);
    604 static bool	wm_resume(device_t, const pmf_qual_t *);
    605 static void	wm_watchdog(struct ifnet *);
    606 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    607 static void	wm_tick(void *);
    608 static int	wm_ifflags_cb(struct ethercom *);
    609 static int	wm_ioctl(struct ifnet *, u_long, void *);
    610 /* MAC address related */
    611 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    612 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    613 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    614 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    615 static void	wm_set_filter(struct wm_softc *);
    616 /* Reset and init related */
    617 static void	wm_set_vlan(struct wm_softc *);
    618 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    619 static void	wm_get_auto_rd_done(struct wm_softc *);
    620 static void	wm_lan_init_done(struct wm_softc *);
    621 static void	wm_get_cfg_done(struct wm_softc *);
    622 static void	wm_initialize_hardware_bits(struct wm_softc *);
    623 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    624 static void	wm_reset(struct wm_softc *);
    625 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    626 static void	wm_rxdrain(struct wm_rxqueue *);
    627 static void	wm_rss_getkey(uint8_t *);
    628 static void	wm_init_rss(struct wm_softc *);
    629 static void	wm_adjust_qnum(struct wm_softc *, int);
    630 static int	wm_setup_legacy(struct wm_softc *);
    631 static int	wm_setup_msix(struct wm_softc *);
    632 static int	wm_init(struct ifnet *);
    633 static int	wm_init_locked(struct ifnet *);
    634 static void	wm_stop(struct ifnet *, int);
    635 static void	wm_stop_locked(struct ifnet *, int);
    636 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    637 static void	wm_82547_txfifo_stall(void *);
    638 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    639 /* DMA related */
    640 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    641 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    642 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    643 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    644     struct wm_txqueue *);
    645 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    646 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    647 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    648     struct wm_rxqueue *);
    649 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    650 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    651 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    652 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    653 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    654 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    655 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    656     struct wm_txqueue *);
    657 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    658     struct wm_rxqueue *);
    659 static int	wm_alloc_txrx_queues(struct wm_softc *);
    660 static void	wm_free_txrx_queues(struct wm_softc *);
    661 static int	wm_init_txrx_queues(struct wm_softc *);
    662 /* Start */
    663 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    664     uint32_t *, uint8_t *);
    665 static void	wm_start(struct ifnet *);
    666 static void	wm_start_locked(struct ifnet *);
    667 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    668     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    669 static void	wm_nq_start(struct ifnet *);
    670 static void	wm_nq_start_locked(struct ifnet *);
    671 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    672 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
    673 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    674 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    675 /* Interrupt */
    676 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    677 static void	wm_rxeof(struct wm_rxqueue *);
    678 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    679 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    680 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    681 static void	wm_linkintr(struct wm_softc *, uint32_t);
    682 static int	wm_intr_legacy(void *);
    683 static int	wm_txrxintr_msix(void *);
    684 static int	wm_linkintr_msix(void *);
    685 
    686 /*
    687  * Media related.
    688  * GMII, SGMII, TBI, SERDES and SFP.
    689  */
    690 /* Common */
    691 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    692 /* GMII related */
    693 static void	wm_gmii_reset(struct wm_softc *);
    694 static int	wm_get_phy_id_82575(struct wm_softc *);
    695 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    696 static int	wm_gmii_mediachange(struct ifnet *);
    697 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    698 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    699 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    700 static int	wm_gmii_i82543_readreg(device_t, int, int);
    701 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    702 static int	wm_gmii_i82544_readreg(device_t, int, int);
    703 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    704 static int	wm_gmii_i80003_readreg(device_t, int, int);
    705 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    706 static int	wm_gmii_bm_readreg(device_t, int, int);
    707 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    708 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    709 static int	wm_gmii_hv_readreg(device_t, int, int);
    710 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    711 static int	wm_gmii_82580_readreg(device_t, int, int);
    712 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    713 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    714 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    715 static void	wm_gmii_statchg(struct ifnet *);
    716 static int	wm_kmrn_readreg(struct wm_softc *, int);
    717 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    718 /* SGMII */
    719 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    720 static int	wm_sgmii_readreg(device_t, int, int);
    721 static void	wm_sgmii_writereg(device_t, int, int, int);
    722 /* TBI related */
    723 static void	wm_tbi_mediainit(struct wm_softc *);
    724 static int	wm_tbi_mediachange(struct ifnet *);
    725 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    726 static int	wm_check_for_link(struct wm_softc *);
    727 static void	wm_tbi_tick(struct wm_softc *);
    728 /* SERDES related */
    729 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    730 static int	wm_serdes_mediachange(struct ifnet *);
    731 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    732 static void	wm_serdes_tick(struct wm_softc *);
    733 /* SFP related */
    734 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    735 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    736 
    737 /*
    738  * NVM related.
    739  * Microwire, SPI (w/wo EERD) and Flash.
    740  */
    741 /* Misc functions */
    742 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    743 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    744 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    745 /* Microwire */
    746 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    747 /* SPI */
    748 static int	wm_nvm_ready_spi(struct wm_softc *);
    749 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    750 /* Using with EERD */
    751 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    752 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    753 /* Flash */
    754 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    755     unsigned int *);
    756 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    757 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    758 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    759 	uint32_t *);
    760 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    761 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    762 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    763 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    764 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    765 /* iNVM */
    766 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    767 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    768 /* Lock, detecting NVM type, validate checksum and read */
    769 static int	wm_nvm_acquire(struct wm_softc *);
    770 static void	wm_nvm_release(struct wm_softc *);
    771 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    772 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    773 static int	wm_nvm_validate_checksum(struct wm_softc *);
    774 static void	wm_nvm_version_invm(struct wm_softc *);
    775 static void	wm_nvm_version(struct wm_softc *);
    776 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    777 
    778 /*
    779  * Hardware semaphores.
    780  * Very complexed...
    781  */
    782 static int	wm_get_swsm_semaphore(struct wm_softc *);
    783 static void	wm_put_swsm_semaphore(struct wm_softc *);
    784 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    785 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    786 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    787 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    788 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    789 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    790 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    791 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    792 
    793 /*
    794  * Management mode and power management related subroutines.
    795  * BMC, AMT, suspend/resume and EEE.
    796  */
    797 #ifdef WM_WOL
    798 static int	wm_check_mng_mode(struct wm_softc *);
    799 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    800 static int	wm_check_mng_mode_82574(struct wm_softc *);
    801 static int	wm_check_mng_mode_generic(struct wm_softc *);
    802 #endif
    803 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    804 static bool	wm_phy_resetisblocked(struct wm_softc *);
    805 static void	wm_get_hw_control(struct wm_softc *);
    806 static void	wm_release_hw_control(struct wm_softc *);
    807 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    808 static void	wm_smbustopci(struct wm_softc *);
    809 static void	wm_init_manageability(struct wm_softc *);
    810 static void	wm_release_manageability(struct wm_softc *);
    811 static void	wm_get_wakeup(struct wm_softc *);
    812 #ifdef WM_WOL
    813 static void	wm_enable_phy_wakeup(struct wm_softc *);
    814 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    815 static void	wm_enable_wakeup(struct wm_softc *);
    816 #endif
    817 /* LPLU (Low Power Link Up) */
    818 static void	wm_lplu_d0_disable(struct wm_softc *);
    819 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    820 /* EEE */
    821 static void	wm_set_eee_i350(struct wm_softc *);
    822 
    823 /*
    824  * Workarounds (mainly PHY related).
    825  * Basically, PHY's workarounds are in the PHY drivers.
    826  */
    827 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    828 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    829 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    830 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    831 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    832 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    833 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    834 static void	wm_reset_init_script_82575(struct wm_softc *);
    835 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    836 static void	wm_pll_workaround_i210(struct wm_softc *);
    837 
    838 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    839     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    840 
    841 /*
    842  * Devices supported by this driver.
    843  */
    844 static const struct wm_product {
    845 	pci_vendor_id_t		wmp_vendor;
    846 	pci_product_id_t	wmp_product;
    847 	const char		*wmp_name;
    848 	wm_chip_type		wmp_type;
    849 	uint32_t		wmp_flags;
    850 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    851 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    852 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    853 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    854 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    855 } wm_products[] = {
    856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    857 	  "Intel i82542 1000BASE-X Ethernet",
    858 	  WM_T_82542_2_1,	WMP_F_FIBER },
    859 
    860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    861 	  "Intel i82543GC 1000BASE-X Ethernet",
    862 	  WM_T_82543,		WMP_F_FIBER },
    863 
    864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    865 	  "Intel i82543GC 1000BASE-T Ethernet",
    866 	  WM_T_82543,		WMP_F_COPPER },
    867 
    868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    869 	  "Intel i82544EI 1000BASE-T Ethernet",
    870 	  WM_T_82544,		WMP_F_COPPER },
    871 
    872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    873 	  "Intel i82544EI 1000BASE-X Ethernet",
    874 	  WM_T_82544,		WMP_F_FIBER },
    875 
    876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    877 	  "Intel i82544GC 1000BASE-T Ethernet",
    878 	  WM_T_82544,		WMP_F_COPPER },
    879 
    880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    881 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    882 	  WM_T_82544,		WMP_F_COPPER },
    883 
    884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    885 	  "Intel i82540EM 1000BASE-T Ethernet",
    886 	  WM_T_82540,		WMP_F_COPPER },
    887 
    888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    889 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    890 	  WM_T_82540,		WMP_F_COPPER },
    891 
    892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    893 	  "Intel i82540EP 1000BASE-T Ethernet",
    894 	  WM_T_82540,		WMP_F_COPPER },
    895 
    896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    897 	  "Intel i82540EP 1000BASE-T Ethernet",
    898 	  WM_T_82540,		WMP_F_COPPER },
    899 
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    901 	  "Intel i82540EP 1000BASE-T Ethernet",
    902 	  WM_T_82540,		WMP_F_COPPER },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    905 	  "Intel i82545EM 1000BASE-T Ethernet",
    906 	  WM_T_82545,		WMP_F_COPPER },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    909 	  "Intel i82545GM 1000BASE-T Ethernet",
    910 	  WM_T_82545_3,		WMP_F_COPPER },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    913 	  "Intel i82545GM 1000BASE-X Ethernet",
    914 	  WM_T_82545_3,		WMP_F_FIBER },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    917 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    918 	  WM_T_82545_3,		WMP_F_SERDES },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    921 	  "Intel i82546EB 1000BASE-T Ethernet",
    922 	  WM_T_82546,		WMP_F_COPPER },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    925 	  "Intel i82546EB 1000BASE-T Ethernet",
    926 	  WM_T_82546,		WMP_F_COPPER },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    929 	  "Intel i82545EM 1000BASE-X Ethernet",
    930 	  WM_T_82545,		WMP_F_FIBER },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    933 	  "Intel i82546EB 1000BASE-X Ethernet",
    934 	  WM_T_82546,		WMP_F_FIBER },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    937 	  "Intel i82546GB 1000BASE-T Ethernet",
    938 	  WM_T_82546_3,		WMP_F_COPPER },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    941 	  "Intel i82546GB 1000BASE-X Ethernet",
    942 	  WM_T_82546_3,		WMP_F_FIBER },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    945 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    946 	  WM_T_82546_3,		WMP_F_SERDES },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    949 	  "i82546GB quad-port Gigabit Ethernet",
    950 	  WM_T_82546_3,		WMP_F_COPPER },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    953 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    954 	  WM_T_82546_3,		WMP_F_COPPER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    957 	  "Intel PRO/1000MT (82546GB)",
    958 	  WM_T_82546_3,		WMP_F_COPPER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    961 	  "Intel i82541EI 1000BASE-T Ethernet",
    962 	  WM_T_82541,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    965 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    966 	  WM_T_82541,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    969 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    970 	  WM_T_82541,		WMP_F_COPPER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    973 	  "Intel i82541ER 1000BASE-T Ethernet",
    974 	  WM_T_82541_2,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    977 	  "Intel i82541GI 1000BASE-T Ethernet",
    978 	  WM_T_82541_2,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    981 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    982 	  WM_T_82541_2,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    985 	  "Intel i82541PI 1000BASE-T Ethernet",
    986 	  WM_T_82541_2,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    989 	  "Intel i82547EI 1000BASE-T Ethernet",
    990 	  WM_T_82547,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    993 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    994 	  WM_T_82547,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    997 	  "Intel i82547GI 1000BASE-T Ethernet",
    998 	  WM_T_82547_2,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1001 	  "Intel PRO/1000 PT (82571EB)",
   1002 	  WM_T_82571,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1005 	  "Intel PRO/1000 PF (82571EB)",
   1006 	  WM_T_82571,		WMP_F_FIBER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1009 	  "Intel PRO/1000 PB (82571EB)",
   1010 	  WM_T_82571,		WMP_F_SERDES },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1013 	  "Intel PRO/1000 QT (82571EB)",
   1014 	  WM_T_82571,		WMP_F_COPPER },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1017 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1018 	  WM_T_82571,		WMP_F_COPPER, },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1021 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1022 	  WM_T_82571,		WMP_F_COPPER, },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1025 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1026 	  WM_T_82571,		WMP_F_SERDES, },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1029 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1030 	  WM_T_82571,		WMP_F_SERDES, },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1033 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1034 	  WM_T_82571,		WMP_F_FIBER, },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1037 	  "Intel i82572EI 1000baseT Ethernet",
   1038 	  WM_T_82572,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1041 	  "Intel i82572EI 1000baseX Ethernet",
   1042 	  WM_T_82572,		WMP_F_FIBER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1045 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1046 	  WM_T_82572,		WMP_F_SERDES },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1049 	  "Intel i82572EI 1000baseT Ethernet",
   1050 	  WM_T_82572,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1053 	  "Intel i82573E",
   1054 	  WM_T_82573,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1057 	  "Intel i82573E IAMT",
   1058 	  WM_T_82573,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1061 	  "Intel i82573L Gigabit Ethernet",
   1062 	  WM_T_82573,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1065 	  "Intel i82574L",
   1066 	  WM_T_82574,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1069 	  "Intel i82574L",
   1070 	  WM_T_82574,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1073 	  "Intel i82583V",
   1074 	  WM_T_82583,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1077 	  "i80003 dual 1000baseT Ethernet",
   1078 	  WM_T_80003,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1081 	  "i80003 dual 1000baseX Ethernet",
   1082 	  WM_T_80003,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1085 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1086 	  WM_T_80003,		WMP_F_SERDES },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1089 	  "Intel i80003 1000baseT Ethernet",
   1090 	  WM_T_80003,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1093 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1094 	  WM_T_80003,		WMP_F_SERDES },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1097 	  "Intel i82801H (M_AMT) LAN Controller",
   1098 	  WM_T_ICH8,		WMP_F_COPPER },
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1100 	  "Intel i82801H (AMT) LAN Controller",
   1101 	  WM_T_ICH8,		WMP_F_COPPER },
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1103 	  "Intel i82801H LAN Controller",
   1104 	  WM_T_ICH8,		WMP_F_COPPER },
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1106 	  "Intel i82801H (IFE) LAN Controller",
   1107 	  WM_T_ICH8,		WMP_F_COPPER },
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1109 	  "Intel i82801H (M) LAN Controller",
   1110 	  WM_T_ICH8,		WMP_F_COPPER },
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1112 	  "Intel i82801H IFE (GT) LAN Controller",
   1113 	  WM_T_ICH8,		WMP_F_COPPER },
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1115 	  "Intel i82801H IFE (G) LAN Controller",
   1116 	  WM_T_ICH8,		WMP_F_COPPER },
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1118 	  "82801I (AMT) LAN Controller",
   1119 	  WM_T_ICH9,		WMP_F_COPPER },
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1121 	  "82801I LAN Controller",
   1122 	  WM_T_ICH9,		WMP_F_COPPER },
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1124 	  "82801I (G) LAN Controller",
   1125 	  WM_T_ICH9,		WMP_F_COPPER },
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1127 	  "82801I (GT) LAN Controller",
   1128 	  WM_T_ICH9,		WMP_F_COPPER },
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1130 	  "82801I (C) LAN Controller",
   1131 	  WM_T_ICH9,		WMP_F_COPPER },
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1133 	  "82801I mobile LAN Controller",
   1134 	  WM_T_ICH9,		WMP_F_COPPER },
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1136 	  "82801I mobile (V) LAN Controller",
   1137 	  WM_T_ICH9,		WMP_F_COPPER },
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1139 	  "82801I mobile (AMT) LAN Controller",
   1140 	  WM_T_ICH9,		WMP_F_COPPER },
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1142 	  "82567LM-4 LAN Controller",
   1143 	  WM_T_ICH9,		WMP_F_COPPER },
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1145 	  "82567V-3 LAN Controller",
   1146 	  WM_T_ICH9,		WMP_F_COPPER },
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1148 	  "82567LM-2 LAN Controller",
   1149 	  WM_T_ICH10,		WMP_F_COPPER },
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1151 	  "82567LF-2 LAN Controller",
   1152 	  WM_T_ICH10,		WMP_F_COPPER },
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1154 	  "82567LM-3 LAN Controller",
   1155 	  WM_T_ICH10,		WMP_F_COPPER },
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1157 	  "82567LF-3 LAN Controller",
   1158 	  WM_T_ICH10,		WMP_F_COPPER },
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1160 	  "82567V-2 LAN Controller",
   1161 	  WM_T_ICH10,		WMP_F_COPPER },
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1163 	  "82567V-3? LAN Controller",
   1164 	  WM_T_ICH10,		WMP_F_COPPER },
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1166 	  "HANKSVILLE LAN Controller",
   1167 	  WM_T_ICH10,		WMP_F_COPPER },
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1169 	  "PCH LAN (82577LM) Controller",
   1170 	  WM_T_PCH,		WMP_F_COPPER },
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1172 	  "PCH LAN (82577LC) Controller",
   1173 	  WM_T_PCH,		WMP_F_COPPER },
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1175 	  "PCH LAN (82578DM) Controller",
   1176 	  WM_T_PCH,		WMP_F_COPPER },
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1178 	  "PCH LAN (82578DC) Controller",
   1179 	  WM_T_PCH,		WMP_F_COPPER },
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1181 	  "PCH2 LAN (82579LM) Controller",
   1182 	  WM_T_PCH2,		WMP_F_COPPER },
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1184 	  "PCH2 LAN (82579V) Controller",
   1185 	  WM_T_PCH2,		WMP_F_COPPER },
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1187 	  "82575EB dual-1000baseT Ethernet",
   1188 	  WM_T_82575,		WMP_F_COPPER },
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1190 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1191 	  WM_T_82575,		WMP_F_SERDES },
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1193 	  "82575GB quad-1000baseT Ethernet",
   1194 	  WM_T_82575,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1196 	  "82575GB quad-1000baseT Ethernet (PM)",
   1197 	  WM_T_82575,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1199 	  "82576 1000BaseT Ethernet",
   1200 	  WM_T_82576,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1202 	  "82576 1000BaseX Ethernet",
   1203 	  WM_T_82576,		WMP_F_FIBER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1206 	  "82576 gigabit Ethernet (SERDES)",
   1207 	  WM_T_82576,		WMP_F_SERDES },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1210 	  "82576 quad-1000BaseT Ethernet",
   1211 	  WM_T_82576,		WMP_F_COPPER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1214 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1215 	  WM_T_82576,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1218 	  "82576 gigabit Ethernet",
   1219 	  WM_T_82576,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1222 	  "82576 gigabit Ethernet (SERDES)",
   1223 	  WM_T_82576,		WMP_F_SERDES },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1225 	  "82576 quad-gigabit Ethernet (SERDES)",
   1226 	  WM_T_82576,		WMP_F_SERDES },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1229 	  "82580 1000BaseT Ethernet",
   1230 	  WM_T_82580,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1232 	  "82580 1000BaseX Ethernet",
   1233 	  WM_T_82580,		WMP_F_FIBER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1236 	  "82580 1000BaseT Ethernet (SERDES)",
   1237 	  WM_T_82580,		WMP_F_SERDES },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1240 	  "82580 gigabit Ethernet (SGMII)",
   1241 	  WM_T_82580,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1243 	  "82580 dual-1000BaseT Ethernet",
   1244 	  WM_T_82580,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1247 	  "82580 quad-1000BaseX Ethernet",
   1248 	  WM_T_82580,		WMP_F_FIBER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1251 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1252 	  WM_T_82580,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1255 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1256 	  WM_T_82580,		WMP_F_SERDES },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1259 	  "DH89XXCC 1000BASE-KX Ethernet",
   1260 	  WM_T_82580,		WMP_F_SERDES },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1263 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1264 	  WM_T_82580,		WMP_F_SERDES },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1267 	  "I350 Gigabit Network Connection",
   1268 	  WM_T_I350,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1271 	  "I350 Gigabit Fiber Network Connection",
   1272 	  WM_T_I350,		WMP_F_FIBER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1275 	  "I350 Gigabit Backplane Connection",
   1276 	  WM_T_I350,		WMP_F_SERDES },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1279 	  "I350 Quad Port Gigabit Ethernet",
   1280 	  WM_T_I350,		WMP_F_SERDES },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1283 	  "I350 Gigabit Connection",
   1284 	  WM_T_I350,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1287 	  "I354 Gigabit Ethernet (KX)",
   1288 	  WM_T_I354,		WMP_F_SERDES },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1291 	  "I354 Gigabit Ethernet (SGMII)",
   1292 	  WM_T_I354,		WMP_F_COPPER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1295 	  "I354 Gigabit Ethernet (2.5G)",
   1296 	  WM_T_I354,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1299 	  "I210-T1 Ethernet Server Adapter",
   1300 	  WM_T_I210,		WMP_F_COPPER },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1303 	  "I210 Ethernet (Copper OEM)",
   1304 	  WM_T_I210,		WMP_F_COPPER },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1307 	  "I210 Ethernet (Copper IT)",
   1308 	  WM_T_I210,		WMP_F_COPPER },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1311 	  "I210 Ethernet (FLASH less)",
   1312 	  WM_T_I210,		WMP_F_COPPER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1315 	  "I210 Gigabit Ethernet (Fiber)",
   1316 	  WM_T_I210,		WMP_F_FIBER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1319 	  "I210 Gigabit Ethernet (SERDES)",
   1320 	  WM_T_I210,		WMP_F_SERDES },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1323 	  "I210 Gigabit Ethernet (FLASH less)",
   1324 	  WM_T_I210,		WMP_F_SERDES },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1327 	  "I210 Gigabit Ethernet (SGMII)",
   1328 	  WM_T_I210,		WMP_F_COPPER },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1331 	  "I211 Ethernet (COPPER)",
   1332 	  WM_T_I211,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1334 	  "I217 V Ethernet Connection",
   1335 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1337 	  "I217 LM Ethernet Connection",
   1338 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1340 	  "I218 V Ethernet Connection",
   1341 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1343 	  "I218 V Ethernet Connection",
   1344 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1346 	  "I218 V Ethernet Connection",
   1347 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1349 	  "I218 LM Ethernet Connection",
   1350 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1352 	  "I218 LM Ethernet Connection",
   1353 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1355 	  "I218 LM Ethernet Connection",
   1356 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1357 #if 0
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1359 	  "I219 V Ethernet Connection",
   1360 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1362 	  "I219 V Ethernet Connection",
   1363 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1365 	  "I219 V Ethernet Connection",
   1366 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1368 	  "I219 V Ethernet Connection",
   1369 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1371 	  "I219 LM Ethernet Connection",
   1372 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1374 	  "I219 LM Ethernet Connection",
   1375 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1377 	  "I219 LM Ethernet Connection",
   1378 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1380 	  "I219 LM Ethernet Connection",
   1381 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1383 	  "I219 LM Ethernet Connection",
   1384 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1385 #endif
   1386 	{ 0,			0,
   1387 	  NULL,
   1388 	  0,			0 },
   1389 };
   1390 
   1391 /*
   1392  * Register read/write functions.
   1393  * Other than CSR_{READ|WRITE}().
   1394  */
   1395 
   1396 #if 0 /* Not currently used */
   1397 static inline uint32_t
   1398 wm_io_read(struct wm_softc *sc, int reg)
   1399 {
   1400 
   1401 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1402 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1403 }
   1404 #endif
   1405 
   1406 static inline void
   1407 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1408 {
   1409 
   1410 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1411 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1412 }
   1413 
   1414 static inline void
   1415 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1416     uint32_t data)
   1417 {
   1418 	uint32_t regval;
   1419 	int i;
   1420 
   1421 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1422 
   1423 	CSR_WRITE(sc, reg, regval);
   1424 
   1425 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1426 		delay(5);
   1427 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1428 			break;
   1429 	}
   1430 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1431 		aprint_error("%s: WARNING:"
   1432 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1433 		    device_xname(sc->sc_dev), reg);
   1434 	}
   1435 }
   1436 
   1437 static inline void
   1438 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1439 {
   1440 	wa->wa_low = htole32(v & 0xffffffffU);
   1441 	if (sizeof(bus_addr_t) == 8)
   1442 		wa->wa_high = htole32((uint64_t) v >> 32);
   1443 	else
   1444 		wa->wa_high = 0;
   1445 }
   1446 
   1447 /*
   1448  * Descriptor sync/init functions.
   1449  */
   1450 static inline void
   1451 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1452 {
   1453 	struct wm_softc *sc = txq->txq_sc;
   1454 
   1455 	/* If it will wrap around, sync to the end of the ring. */
   1456 	if ((start + num) > WM_NTXDESC(txq)) {
   1457 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1458 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1459 		    (WM_NTXDESC(txq) - start), ops);
   1460 		num -= (WM_NTXDESC(txq) - start);
   1461 		start = 0;
   1462 	}
   1463 
   1464 	/* Now sync whatever is left. */
   1465 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1466 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1467 }
   1468 
   1469 static inline void
   1470 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1471 {
   1472 	struct wm_softc *sc = rxq->rxq_sc;
   1473 
   1474 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1475 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1476 }
   1477 
   1478 static inline void
   1479 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1480 {
   1481 	struct wm_softc *sc = rxq->rxq_sc;
   1482 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1483 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1484 	struct mbuf *m = rxs->rxs_mbuf;
   1485 
   1486 	/*
   1487 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1488 	 * so that the payload after the Ethernet header is aligned
   1489 	 * to a 4-byte boundary.
   1490 
   1491 	 * XXX BRAINDAMAGE ALERT!
   1492 	 * The stupid chip uses the same size for every buffer, which
   1493 	 * is set in the Receive Control register.  We are using the 2K
   1494 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1495 	 * reason, we can't "scoot" packets longer than the standard
   1496 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1497 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1498 	 * the upper layer copy the headers.
   1499 	 */
   1500 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1501 
   1502 	wm_set_dma_addr(&rxd->wrx_addr,
   1503 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1504 	rxd->wrx_len = 0;
   1505 	rxd->wrx_cksum = 0;
   1506 	rxd->wrx_status = 0;
   1507 	rxd->wrx_errors = 0;
   1508 	rxd->wrx_special = 0;
   1509 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1510 
   1511 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1512 }
   1513 
   1514 /*
   1515  * Device driver interface functions and commonly used functions.
   1516  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1517  */
   1518 
   1519 /* Lookup supported device table */
   1520 static const struct wm_product *
   1521 wm_lookup(const struct pci_attach_args *pa)
   1522 {
   1523 	const struct wm_product *wmp;
   1524 
   1525 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1526 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1527 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1528 			return wmp;
   1529 	}
   1530 	return NULL;
   1531 }
   1532 
   1533 /* The match function (ca_match) */
   1534 static int
   1535 wm_match(device_t parent, cfdata_t cf, void *aux)
   1536 {
   1537 	struct pci_attach_args *pa = aux;
   1538 
   1539 	if (wm_lookup(pa) != NULL)
   1540 		return 1;
   1541 
   1542 	return 0;
   1543 }
   1544 
   1545 /* The attach function (ca_attach) */
   1546 static void
   1547 wm_attach(device_t parent, device_t self, void *aux)
   1548 {
   1549 	struct wm_softc *sc = device_private(self);
   1550 	struct pci_attach_args *pa = aux;
   1551 	prop_dictionary_t dict;
   1552 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1553 	pci_chipset_tag_t pc = pa->pa_pc;
   1554 	int counts[PCI_INTR_TYPE_SIZE];
   1555 	pci_intr_type_t max_type;
   1556 	const char *eetype, *xname;
   1557 	bus_space_tag_t memt;
   1558 	bus_space_handle_t memh;
   1559 	bus_size_t memsize;
   1560 	int memh_valid;
   1561 	int i, error;
   1562 	const struct wm_product *wmp;
   1563 	prop_data_t ea;
   1564 	prop_number_t pn;
   1565 	uint8_t enaddr[ETHER_ADDR_LEN];
   1566 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1567 	pcireg_t preg, memtype;
   1568 	uint16_t eeprom_data, apme_mask;
   1569 	bool force_clear_smbi;
   1570 	uint32_t link_mode;
   1571 	uint32_t reg;
   1572 
   1573 	sc->sc_dev = self;
   1574 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1575 	sc->sc_stopping = false;
   1576 
   1577 	wmp = wm_lookup(pa);
   1578 #ifdef DIAGNOSTIC
   1579 	if (wmp == NULL) {
   1580 		printf("\n");
   1581 		panic("wm_attach: impossible");
   1582 	}
   1583 #endif
   1584 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1585 
   1586 	sc->sc_pc = pa->pa_pc;
   1587 	sc->sc_pcitag = pa->pa_tag;
   1588 
   1589 	if (pci_dma64_available(pa))
   1590 		sc->sc_dmat = pa->pa_dmat64;
   1591 	else
   1592 		sc->sc_dmat = pa->pa_dmat;
   1593 
   1594 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1595 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1596 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1597 
   1598 	sc->sc_type = wmp->wmp_type;
   1599 	if (sc->sc_type < WM_T_82543) {
   1600 		if (sc->sc_rev < 2) {
   1601 			aprint_error_dev(sc->sc_dev,
   1602 			    "i82542 must be at least rev. 2\n");
   1603 			return;
   1604 		}
   1605 		if (sc->sc_rev < 3)
   1606 			sc->sc_type = WM_T_82542_2_0;
   1607 	}
   1608 
   1609 	/*
   1610 	 * Disable MSI for Errata:
   1611 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1612 	 *
   1613 	 *  82544: Errata 25
   1614 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1615 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1616 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1617 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1618 	 *
   1619 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1620 	 *
   1621 	 *  82571 & 82572: Errata 63
   1622 	 */
   1623 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1624 	    || (sc->sc_type == WM_T_82572))
   1625 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1626 
   1627 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1628 	    || (sc->sc_type == WM_T_82580)
   1629 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1630 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1631 		sc->sc_flags |= WM_F_NEWQUEUE;
   1632 
   1633 	/* Set device properties (mactype) */
   1634 	dict = device_properties(sc->sc_dev);
   1635 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1636 
   1637 	/*
   1638 	 * Map the device.  All devices support memory-mapped acccess,
   1639 	 * and it is really required for normal operation.
   1640 	 */
   1641 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1642 	switch (memtype) {
   1643 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1644 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1645 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1646 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1647 		break;
   1648 	default:
   1649 		memh_valid = 0;
   1650 		break;
   1651 	}
   1652 
   1653 	if (memh_valid) {
   1654 		sc->sc_st = memt;
   1655 		sc->sc_sh = memh;
   1656 		sc->sc_ss = memsize;
   1657 	} else {
   1658 		aprint_error_dev(sc->sc_dev,
   1659 		    "unable to map device registers\n");
   1660 		return;
   1661 	}
   1662 
   1663 	/*
   1664 	 * In addition, i82544 and later support I/O mapped indirect
   1665 	 * register access.  It is not desirable (nor supported in
   1666 	 * this driver) to use it for normal operation, though it is
   1667 	 * required to work around bugs in some chip versions.
   1668 	 */
   1669 	if (sc->sc_type >= WM_T_82544) {
   1670 		/* First we have to find the I/O BAR. */
   1671 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1672 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1673 			if (memtype == PCI_MAPREG_TYPE_IO)
   1674 				break;
   1675 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1676 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1677 				i += 4;	/* skip high bits, too */
   1678 		}
   1679 		if (i < PCI_MAPREG_END) {
   1680 			/*
   1681 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1682 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1683 			 * It's no problem because newer chips has no this
   1684 			 * bug.
   1685 			 *
   1686 			 * The i8254x doesn't apparently respond when the
   1687 			 * I/O BAR is 0, which looks somewhat like it's not
   1688 			 * been configured.
   1689 			 */
   1690 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1691 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1692 				aprint_error_dev(sc->sc_dev,
   1693 				    "WARNING: I/O BAR at zero.\n");
   1694 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1695 					0, &sc->sc_iot, &sc->sc_ioh,
   1696 					NULL, &sc->sc_ios) == 0) {
   1697 				sc->sc_flags |= WM_F_IOH_VALID;
   1698 			} else {
   1699 				aprint_error_dev(sc->sc_dev,
   1700 				    "WARNING: unable to map I/O space\n");
   1701 			}
   1702 		}
   1703 
   1704 	}
   1705 
   1706 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1707 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1708 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1709 	if (sc->sc_type < WM_T_82542_2_1)
   1710 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1711 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1712 
   1713 	/* power up chip */
   1714 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1715 	    NULL)) && error != EOPNOTSUPP) {
   1716 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1717 		return;
   1718 	}
   1719 
   1720 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1721 
   1722 	/* Allocation settings */
   1723 	max_type = PCI_INTR_TYPE_MSIX;
   1724 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1725 	counts[PCI_INTR_TYPE_MSI] = 1;
   1726 	counts[PCI_INTR_TYPE_INTX] = 1;
   1727 
   1728 alloc_retry:
   1729 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1730 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1731 		return;
   1732 	}
   1733 
   1734 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1735 		error = wm_setup_msix(sc);
   1736 		if (error) {
   1737 			pci_intr_release(pc, sc->sc_intrs,
   1738 			    counts[PCI_INTR_TYPE_MSIX]);
   1739 
   1740 			/* Setup for MSI: Disable MSI-X */
   1741 			max_type = PCI_INTR_TYPE_MSI;
   1742 			counts[PCI_INTR_TYPE_MSI] = 1;
   1743 			counts[PCI_INTR_TYPE_INTX] = 1;
   1744 			goto alloc_retry;
   1745 		}
   1746 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1747 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1748 		error = wm_setup_legacy(sc);
   1749 		if (error) {
   1750 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1751 			    counts[PCI_INTR_TYPE_MSI]);
   1752 
   1753 			/* The next try is for INTx: Disable MSI */
   1754 			max_type = PCI_INTR_TYPE_INTX;
   1755 			counts[PCI_INTR_TYPE_INTX] = 1;
   1756 			goto alloc_retry;
   1757 		}
   1758 	} else {
   1759 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1760 		error = wm_setup_legacy(sc);
   1761 		if (error) {
   1762 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1763 			    counts[PCI_INTR_TYPE_INTX]);
   1764 			return;
   1765 		}
   1766 	}
   1767 
   1768 	/*
   1769 	 * Check the function ID (unit number of the chip).
   1770 	 */
   1771 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1772 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1773 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1774 	    || (sc->sc_type == WM_T_82580)
   1775 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1776 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1777 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1778 	else
   1779 		sc->sc_funcid = 0;
   1780 
   1781 	/*
   1782 	 * Determine a few things about the bus we're connected to.
   1783 	 */
   1784 	if (sc->sc_type < WM_T_82543) {
   1785 		/* We don't really know the bus characteristics here. */
   1786 		sc->sc_bus_speed = 33;
   1787 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1788 		/*
   1789 		 * CSA (Communication Streaming Architecture) is about as fast
   1790 		 * a 32-bit 66MHz PCI Bus.
   1791 		 */
   1792 		sc->sc_flags |= WM_F_CSA;
   1793 		sc->sc_bus_speed = 66;
   1794 		aprint_verbose_dev(sc->sc_dev,
   1795 		    "Communication Streaming Architecture\n");
   1796 		if (sc->sc_type == WM_T_82547) {
   1797 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1798 			callout_setfunc(&sc->sc_txfifo_ch,
   1799 					wm_82547_txfifo_stall, sc);
   1800 			aprint_verbose_dev(sc->sc_dev,
   1801 			    "using 82547 Tx FIFO stall work-around\n");
   1802 		}
   1803 	} else if (sc->sc_type >= WM_T_82571) {
   1804 		sc->sc_flags |= WM_F_PCIE;
   1805 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1806 		    && (sc->sc_type != WM_T_ICH10)
   1807 		    && (sc->sc_type != WM_T_PCH)
   1808 		    && (sc->sc_type != WM_T_PCH2)
   1809 		    && (sc->sc_type != WM_T_PCH_LPT)
   1810 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1811 			/* ICH* and PCH* have no PCIe capability registers */
   1812 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1813 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1814 				NULL) == 0)
   1815 				aprint_error_dev(sc->sc_dev,
   1816 				    "unable to find PCIe capability\n");
   1817 		}
   1818 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1819 	} else {
   1820 		reg = CSR_READ(sc, WMREG_STATUS);
   1821 		if (reg & STATUS_BUS64)
   1822 			sc->sc_flags |= WM_F_BUS64;
   1823 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1824 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1825 
   1826 			sc->sc_flags |= WM_F_PCIX;
   1827 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1828 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1829 				aprint_error_dev(sc->sc_dev,
   1830 				    "unable to find PCIX capability\n");
   1831 			else if (sc->sc_type != WM_T_82545_3 &&
   1832 				 sc->sc_type != WM_T_82546_3) {
   1833 				/*
   1834 				 * Work around a problem caused by the BIOS
   1835 				 * setting the max memory read byte count
   1836 				 * incorrectly.
   1837 				 */
   1838 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1839 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1840 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1841 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1842 
   1843 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1844 				    PCIX_CMD_BYTECNT_SHIFT;
   1845 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1846 				    PCIX_STATUS_MAXB_SHIFT;
   1847 				if (bytecnt > maxb) {
   1848 					aprint_verbose_dev(sc->sc_dev,
   1849 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1850 					    512 << bytecnt, 512 << maxb);
   1851 					pcix_cmd = (pcix_cmd &
   1852 					    ~PCIX_CMD_BYTECNT_MASK) |
   1853 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1854 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1855 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1856 					    pcix_cmd);
   1857 				}
   1858 			}
   1859 		}
   1860 		/*
   1861 		 * The quad port adapter is special; it has a PCIX-PCIX
   1862 		 * bridge on the board, and can run the secondary bus at
   1863 		 * a higher speed.
   1864 		 */
   1865 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1866 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1867 								      : 66;
   1868 		} else if (sc->sc_flags & WM_F_PCIX) {
   1869 			switch (reg & STATUS_PCIXSPD_MASK) {
   1870 			case STATUS_PCIXSPD_50_66:
   1871 				sc->sc_bus_speed = 66;
   1872 				break;
   1873 			case STATUS_PCIXSPD_66_100:
   1874 				sc->sc_bus_speed = 100;
   1875 				break;
   1876 			case STATUS_PCIXSPD_100_133:
   1877 				sc->sc_bus_speed = 133;
   1878 				break;
   1879 			default:
   1880 				aprint_error_dev(sc->sc_dev,
   1881 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1882 				    reg & STATUS_PCIXSPD_MASK);
   1883 				sc->sc_bus_speed = 66;
   1884 				break;
   1885 			}
   1886 		} else
   1887 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1888 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1889 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1890 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1891 	}
   1892 
   1893 	/* clear interesting stat counters */
   1894 	CSR_READ(sc, WMREG_COLC);
   1895 	CSR_READ(sc, WMREG_RXERRC);
   1896 
   1897 	/* get PHY control from SMBus to PCIe */
   1898 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1899 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1900 		wm_smbustopci(sc);
   1901 
   1902 	if (sc->sc_type >= WM_T_ICH8)
   1903 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1904 
   1905 	/* Set PHY, NVM mutex related stuff */
   1906 	switch (sc->sc_type) {
   1907 	case WM_T_82542_2_0:
   1908 	case WM_T_82542_2_1:
   1909 	case WM_T_82543:
   1910 	case WM_T_82544:
   1911 		/* Microwire */
   1912 		sc->sc_nvm_wordsize = 64;
   1913 		sc->sc_nvm_addrbits = 6;
   1914 		break;
   1915 	case WM_T_82540:
   1916 	case WM_T_82545:
   1917 	case WM_T_82545_3:
   1918 	case WM_T_82546:
   1919 	case WM_T_82546_3:
   1920 		/* Microwire */
   1921 		reg = CSR_READ(sc, WMREG_EECD);
   1922 		if (reg & EECD_EE_SIZE) {
   1923 			sc->sc_nvm_wordsize = 256;
   1924 			sc->sc_nvm_addrbits = 8;
   1925 		} else {
   1926 			sc->sc_nvm_wordsize = 64;
   1927 			sc->sc_nvm_addrbits = 6;
   1928 		}
   1929 		sc->sc_flags |= WM_F_LOCK_EECD;
   1930 		break;
   1931 	case WM_T_82541:
   1932 	case WM_T_82541_2:
   1933 	case WM_T_82547:
   1934 	case WM_T_82547_2:
   1935 		sc->sc_flags |= WM_F_LOCK_EECD;
   1936 		reg = CSR_READ(sc, WMREG_EECD);
   1937 		if (reg & EECD_EE_TYPE) {
   1938 			/* SPI */
   1939 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1940 			wm_nvm_set_addrbits_size_eecd(sc);
   1941 		} else {
   1942 			/* Microwire */
   1943 			if ((reg & EECD_EE_ABITS) != 0) {
   1944 				sc->sc_nvm_wordsize = 256;
   1945 				sc->sc_nvm_addrbits = 8;
   1946 			} else {
   1947 				sc->sc_nvm_wordsize = 64;
   1948 				sc->sc_nvm_addrbits = 6;
   1949 			}
   1950 		}
   1951 		break;
   1952 	case WM_T_82571:
   1953 	case WM_T_82572:
   1954 		/* SPI */
   1955 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1956 		wm_nvm_set_addrbits_size_eecd(sc);
   1957 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1958 		break;
   1959 	case WM_T_82573:
   1960 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1961 		/* FALLTHROUGH */
   1962 	case WM_T_82574:
   1963 	case WM_T_82583:
   1964 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1965 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1966 			sc->sc_nvm_wordsize = 2048;
   1967 		} else {
   1968 			/* SPI */
   1969 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1970 			wm_nvm_set_addrbits_size_eecd(sc);
   1971 		}
   1972 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1973 		break;
   1974 	case WM_T_82575:
   1975 	case WM_T_82576:
   1976 	case WM_T_82580:
   1977 	case WM_T_I350:
   1978 	case WM_T_I354:
   1979 	case WM_T_80003:
   1980 		/* SPI */
   1981 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1982 		wm_nvm_set_addrbits_size_eecd(sc);
   1983 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1984 		    | WM_F_LOCK_SWSM;
   1985 		break;
   1986 	case WM_T_ICH8:
   1987 	case WM_T_ICH9:
   1988 	case WM_T_ICH10:
   1989 	case WM_T_PCH:
   1990 	case WM_T_PCH2:
   1991 	case WM_T_PCH_LPT:
   1992 		/* FLASH */
   1993 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1994 		sc->sc_nvm_wordsize = 2048;
   1995 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   1996 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1997 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1998 			aprint_error_dev(sc->sc_dev,
   1999 			    "can't map FLASH registers\n");
   2000 			goto out;
   2001 		}
   2002 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2003 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2004 		    ICH_FLASH_SECTOR_SIZE;
   2005 		sc->sc_ich8_flash_bank_size =
   2006 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2007 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2008 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2009 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2010 		sc->sc_flashreg_offset = 0;
   2011 		break;
   2012 	case WM_T_PCH_SPT:
   2013 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2014 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2015 		sc->sc_flasht = sc->sc_st;
   2016 		sc->sc_flashh = sc->sc_sh;
   2017 		sc->sc_ich8_flash_base = 0;
   2018 		sc->sc_nvm_wordsize =
   2019 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2020 			* NVM_SIZE_MULTIPLIER;
   2021 		/* It is size in bytes, we want words */
   2022 		sc->sc_nvm_wordsize /= 2;
   2023 		/* assume 2 banks */
   2024 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2025 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2026 		break;
   2027 	case WM_T_I210:
   2028 	case WM_T_I211:
   2029 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2030 			wm_nvm_set_addrbits_size_eecd(sc);
   2031 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2032 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   2033 		} else {
   2034 			sc->sc_nvm_wordsize = INVM_SIZE;
   2035 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2036 			sc->sc_flags |= WM_F_LOCK_SWFW;
   2037 		}
   2038 		break;
   2039 	default:
   2040 		break;
   2041 	}
   2042 
   2043 	/* Reset the chip to a known state. */
   2044 	wm_reset(sc);
   2045 
   2046 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2047 	switch (sc->sc_type) {
   2048 	case WM_T_82571:
   2049 	case WM_T_82572:
   2050 		reg = CSR_READ(sc, WMREG_SWSM2);
   2051 		if ((reg & SWSM2_LOCK) == 0) {
   2052 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2053 			force_clear_smbi = true;
   2054 		} else
   2055 			force_clear_smbi = false;
   2056 		break;
   2057 	case WM_T_82573:
   2058 	case WM_T_82574:
   2059 	case WM_T_82583:
   2060 		force_clear_smbi = true;
   2061 		break;
   2062 	default:
   2063 		force_clear_smbi = false;
   2064 		break;
   2065 	}
   2066 	if (force_clear_smbi) {
   2067 		reg = CSR_READ(sc, WMREG_SWSM);
   2068 		if ((reg & SWSM_SMBI) != 0)
   2069 			aprint_error_dev(sc->sc_dev,
   2070 			    "Please update the Bootagent\n");
   2071 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2072 	}
   2073 
   2074 	/*
   2075 	 * Defer printing the EEPROM type until after verifying the checksum
   2076 	 * This allows the EEPROM type to be printed correctly in the case
   2077 	 * that no EEPROM is attached.
   2078 	 */
   2079 	/*
   2080 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2081 	 * this for later, so we can fail future reads from the EEPROM.
   2082 	 */
   2083 	if (wm_nvm_validate_checksum(sc)) {
   2084 		/*
   2085 		 * Read twice again because some PCI-e parts fail the
   2086 		 * first check due to the link being in sleep state.
   2087 		 */
   2088 		if (wm_nvm_validate_checksum(sc))
   2089 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2090 	}
   2091 
   2092 	/* Set device properties (macflags) */
   2093 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2094 
   2095 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2096 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2097 	else {
   2098 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2099 		    sc->sc_nvm_wordsize);
   2100 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2101 			aprint_verbose("iNVM");
   2102 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2103 			aprint_verbose("FLASH(HW)");
   2104 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2105 			aprint_verbose("FLASH");
   2106 		else {
   2107 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2108 				eetype = "SPI";
   2109 			else
   2110 				eetype = "MicroWire";
   2111 			aprint_verbose("(%d address bits) %s EEPROM",
   2112 			    sc->sc_nvm_addrbits, eetype);
   2113 		}
   2114 	}
   2115 	wm_nvm_version(sc);
   2116 	aprint_verbose("\n");
   2117 
   2118 	/* Check for I21[01] PLL workaround */
   2119 	if (sc->sc_type == WM_T_I210)
   2120 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2121 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2122 		/* NVM image release 3.25 has a workaround */
   2123 		if ((sc->sc_nvm_ver_major < 3)
   2124 		    || ((sc->sc_nvm_ver_major == 3)
   2125 			&& (sc->sc_nvm_ver_minor < 25))) {
   2126 			aprint_verbose_dev(sc->sc_dev,
   2127 			    "ROM image version %d.%d is older than 3.25\n",
   2128 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2129 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2130 		}
   2131 	}
   2132 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2133 		wm_pll_workaround_i210(sc);
   2134 
   2135 	wm_get_wakeup(sc);
   2136 	switch (sc->sc_type) {
   2137 	case WM_T_82571:
   2138 	case WM_T_82572:
   2139 	case WM_T_82573:
   2140 	case WM_T_82574:
   2141 	case WM_T_82583:
   2142 	case WM_T_80003:
   2143 	case WM_T_ICH8:
   2144 	case WM_T_ICH9:
   2145 	case WM_T_ICH10:
   2146 	case WM_T_PCH:
   2147 	case WM_T_PCH2:
   2148 	case WM_T_PCH_LPT:
   2149 	case WM_T_PCH_SPT:
   2150 		/* Non-AMT based hardware can now take control from firmware */
   2151 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2152 			wm_get_hw_control(sc);
   2153 		break;
   2154 	default:
   2155 		break;
   2156 	}
   2157 
   2158 	/*
   2159 	 * Read the Ethernet address from the EEPROM, if not first found
   2160 	 * in device properties.
   2161 	 */
   2162 	ea = prop_dictionary_get(dict, "mac-address");
   2163 	if (ea != NULL) {
   2164 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2165 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2166 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2167 	} else {
   2168 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2169 			aprint_error_dev(sc->sc_dev,
   2170 			    "unable to read Ethernet address\n");
   2171 			goto out;
   2172 		}
   2173 	}
   2174 
   2175 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2176 	    ether_sprintf(enaddr));
   2177 
   2178 	/*
   2179 	 * Read the config info from the EEPROM, and set up various
   2180 	 * bits in the control registers based on their contents.
   2181 	 */
   2182 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2183 	if (pn != NULL) {
   2184 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2185 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2186 	} else {
   2187 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2188 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2189 			goto out;
   2190 		}
   2191 	}
   2192 
   2193 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2194 	if (pn != NULL) {
   2195 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2196 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2197 	} else {
   2198 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2199 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2200 			goto out;
   2201 		}
   2202 	}
   2203 
   2204 	/* check for WM_F_WOL */
   2205 	switch (sc->sc_type) {
   2206 	case WM_T_82542_2_0:
   2207 	case WM_T_82542_2_1:
   2208 	case WM_T_82543:
   2209 		/* dummy? */
   2210 		eeprom_data = 0;
   2211 		apme_mask = NVM_CFG3_APME;
   2212 		break;
   2213 	case WM_T_82544:
   2214 		apme_mask = NVM_CFG2_82544_APM_EN;
   2215 		eeprom_data = cfg2;
   2216 		break;
   2217 	case WM_T_82546:
   2218 	case WM_T_82546_3:
   2219 	case WM_T_82571:
   2220 	case WM_T_82572:
   2221 	case WM_T_82573:
   2222 	case WM_T_82574:
   2223 	case WM_T_82583:
   2224 	case WM_T_80003:
   2225 	default:
   2226 		apme_mask = NVM_CFG3_APME;
   2227 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2228 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2229 		break;
   2230 	case WM_T_82575:
   2231 	case WM_T_82576:
   2232 	case WM_T_82580:
   2233 	case WM_T_I350:
   2234 	case WM_T_I354: /* XXX ok? */
   2235 	case WM_T_ICH8:
   2236 	case WM_T_ICH9:
   2237 	case WM_T_ICH10:
   2238 	case WM_T_PCH:
   2239 	case WM_T_PCH2:
   2240 	case WM_T_PCH_LPT:
   2241 	case WM_T_PCH_SPT:
   2242 		/* XXX The funcid should be checked on some devices */
   2243 		apme_mask = WUC_APME;
   2244 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2245 		break;
   2246 	}
   2247 
   2248 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2249 	if ((eeprom_data & apme_mask) != 0)
   2250 		sc->sc_flags |= WM_F_WOL;
   2251 #ifdef WM_DEBUG
   2252 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2253 		printf("WOL\n");
   2254 #endif
   2255 
   2256 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2257 		/* Check NVM for autonegotiation */
   2258 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2259 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2260 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2261 		}
   2262 	}
   2263 
   2264 	/*
   2265 	 * XXX need special handling for some multiple port cards
   2266 	 * to disable a paticular port.
   2267 	 */
   2268 
   2269 	if (sc->sc_type >= WM_T_82544) {
   2270 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2271 		if (pn != NULL) {
   2272 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2273 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2274 		} else {
   2275 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2276 				aprint_error_dev(sc->sc_dev,
   2277 				    "unable to read SWDPIN\n");
   2278 				goto out;
   2279 			}
   2280 		}
   2281 	}
   2282 
   2283 	if (cfg1 & NVM_CFG1_ILOS)
   2284 		sc->sc_ctrl |= CTRL_ILOS;
   2285 
   2286 	/*
   2287 	 * XXX
   2288 	 * This code isn't correct because pin 2 and 3 are located
   2289 	 * in different position on newer chips. Check all datasheet.
   2290 	 *
   2291 	 * Until resolve this problem, check if a chip < 82580
   2292 	 */
   2293 	if (sc->sc_type <= WM_T_82580) {
   2294 		if (sc->sc_type >= WM_T_82544) {
   2295 			sc->sc_ctrl |=
   2296 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2297 			    CTRL_SWDPIO_SHIFT;
   2298 			sc->sc_ctrl |=
   2299 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2300 			    CTRL_SWDPINS_SHIFT;
   2301 		} else {
   2302 			sc->sc_ctrl |=
   2303 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2304 			    CTRL_SWDPIO_SHIFT;
   2305 		}
   2306 	}
   2307 
   2308 	/* XXX For other than 82580? */
   2309 	if (sc->sc_type == WM_T_82580) {
   2310 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2311 		if (nvmword & __BIT(13))
   2312 			sc->sc_ctrl |= CTRL_ILOS;
   2313 	}
   2314 
   2315 #if 0
   2316 	if (sc->sc_type >= WM_T_82544) {
   2317 		if (cfg1 & NVM_CFG1_IPS0)
   2318 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2319 		if (cfg1 & NVM_CFG1_IPS1)
   2320 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2321 		sc->sc_ctrl_ext |=
   2322 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2323 		    CTRL_EXT_SWDPIO_SHIFT;
   2324 		sc->sc_ctrl_ext |=
   2325 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2326 		    CTRL_EXT_SWDPINS_SHIFT;
   2327 	} else {
   2328 		sc->sc_ctrl_ext |=
   2329 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2330 		    CTRL_EXT_SWDPIO_SHIFT;
   2331 	}
   2332 #endif
   2333 
   2334 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2335 #if 0
   2336 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2337 #endif
   2338 
   2339 	if (sc->sc_type == WM_T_PCH) {
   2340 		uint16_t val;
   2341 
   2342 		/* Save the NVM K1 bit setting */
   2343 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2344 
   2345 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2346 			sc->sc_nvm_k1_enabled = 1;
   2347 		else
   2348 			sc->sc_nvm_k1_enabled = 0;
   2349 	}
   2350 
   2351 	/*
   2352 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2353 	 * media structures accordingly.
   2354 	 */
   2355 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2356 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2357 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2358 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2359 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2360 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2361 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2362 	} else if (sc->sc_type < WM_T_82543 ||
   2363 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2364 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2365 			aprint_error_dev(sc->sc_dev,
   2366 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2367 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2368 		}
   2369 		wm_tbi_mediainit(sc);
   2370 	} else {
   2371 		switch (sc->sc_type) {
   2372 		case WM_T_82575:
   2373 		case WM_T_82576:
   2374 		case WM_T_82580:
   2375 		case WM_T_I350:
   2376 		case WM_T_I354:
   2377 		case WM_T_I210:
   2378 		case WM_T_I211:
   2379 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2380 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2381 			switch (link_mode) {
   2382 			case CTRL_EXT_LINK_MODE_1000KX:
   2383 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2384 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2385 				break;
   2386 			case CTRL_EXT_LINK_MODE_SGMII:
   2387 				if (wm_sgmii_uses_mdio(sc)) {
   2388 					aprint_verbose_dev(sc->sc_dev,
   2389 					    "SGMII(MDIO)\n");
   2390 					sc->sc_flags |= WM_F_SGMII;
   2391 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2392 					break;
   2393 				}
   2394 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2395 				/*FALLTHROUGH*/
   2396 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2397 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2398 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2399 					if (link_mode
   2400 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2401 						sc->sc_mediatype
   2402 						    = WM_MEDIATYPE_COPPER;
   2403 						sc->sc_flags |= WM_F_SGMII;
   2404 					} else {
   2405 						sc->sc_mediatype
   2406 						    = WM_MEDIATYPE_SERDES;
   2407 						aprint_verbose_dev(sc->sc_dev,
   2408 						    "SERDES\n");
   2409 					}
   2410 					break;
   2411 				}
   2412 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2413 					aprint_verbose_dev(sc->sc_dev,
   2414 					    "SERDES\n");
   2415 
   2416 				/* Change current link mode setting */
   2417 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2418 				switch (sc->sc_mediatype) {
   2419 				case WM_MEDIATYPE_COPPER:
   2420 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2421 					break;
   2422 				case WM_MEDIATYPE_SERDES:
   2423 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2424 					break;
   2425 				default:
   2426 					break;
   2427 				}
   2428 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2429 				break;
   2430 			case CTRL_EXT_LINK_MODE_GMII:
   2431 			default:
   2432 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2433 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2434 				break;
   2435 			}
   2436 
   2437 			reg &= ~CTRL_EXT_I2C_ENA;
   2438 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2439 				reg |= CTRL_EXT_I2C_ENA;
   2440 			else
   2441 				reg &= ~CTRL_EXT_I2C_ENA;
   2442 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2443 
   2444 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2445 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2446 			else
   2447 				wm_tbi_mediainit(sc);
   2448 			break;
   2449 		default:
   2450 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2451 				aprint_error_dev(sc->sc_dev,
   2452 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2453 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2454 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2455 		}
   2456 	}
   2457 
   2458 	ifp = &sc->sc_ethercom.ec_if;
   2459 	xname = device_xname(sc->sc_dev);
   2460 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2461 	ifp->if_softc = sc;
   2462 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2463 	ifp->if_extflags = IFEF_START_MPSAFE;
   2464 	ifp->if_ioctl = wm_ioctl;
   2465 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2466 		ifp->if_start = wm_nq_start;
   2467 		if (sc->sc_nqueues > 1)
   2468 			ifp->if_transmit = wm_nq_transmit;
   2469 	} else
   2470 		ifp->if_start = wm_start;
   2471 	ifp->if_watchdog = wm_watchdog;
   2472 	ifp->if_init = wm_init;
   2473 	ifp->if_stop = wm_stop;
   2474 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2475 	IFQ_SET_READY(&ifp->if_snd);
   2476 
   2477 	/* Check for jumbo frame */
   2478 	switch (sc->sc_type) {
   2479 	case WM_T_82573:
   2480 		/* XXX limited to 9234 if ASPM is disabled */
   2481 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2482 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2483 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2484 		break;
   2485 	case WM_T_82571:
   2486 	case WM_T_82572:
   2487 	case WM_T_82574:
   2488 	case WM_T_82575:
   2489 	case WM_T_82576:
   2490 	case WM_T_82580:
   2491 	case WM_T_I350:
   2492 	case WM_T_I354: /* XXXX ok? */
   2493 	case WM_T_I210:
   2494 	case WM_T_I211:
   2495 	case WM_T_80003:
   2496 	case WM_T_ICH9:
   2497 	case WM_T_ICH10:
   2498 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2499 	case WM_T_PCH_LPT:
   2500 	case WM_T_PCH_SPT:
   2501 		/* XXX limited to 9234 */
   2502 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2503 		break;
   2504 	case WM_T_PCH:
   2505 		/* XXX limited to 4096 */
   2506 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2507 		break;
   2508 	case WM_T_82542_2_0:
   2509 	case WM_T_82542_2_1:
   2510 	case WM_T_82583:
   2511 	case WM_T_ICH8:
   2512 		/* No support for jumbo frame */
   2513 		break;
   2514 	default:
   2515 		/* ETHER_MAX_LEN_JUMBO */
   2516 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2517 		break;
   2518 	}
   2519 
   2520 	/* If we're a i82543 or greater, we can support VLANs. */
   2521 	if (sc->sc_type >= WM_T_82543)
   2522 		sc->sc_ethercom.ec_capabilities |=
   2523 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2524 
   2525 	/*
   2526 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2527 	 * on i82543 and later.
   2528 	 */
   2529 	if (sc->sc_type >= WM_T_82543) {
   2530 		ifp->if_capabilities |=
   2531 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2532 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2533 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2534 		    IFCAP_CSUM_TCPv6_Tx |
   2535 		    IFCAP_CSUM_UDPv6_Tx;
   2536 	}
   2537 
   2538 	/*
   2539 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2540 	 *
   2541 	 *	82541GI (8086:1076) ... no
   2542 	 *	82572EI (8086:10b9) ... yes
   2543 	 */
   2544 	if (sc->sc_type >= WM_T_82571) {
   2545 		ifp->if_capabilities |=
   2546 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2547 	}
   2548 
   2549 	/*
   2550 	 * If we're a i82544 or greater (except i82547), we can do
   2551 	 * TCP segmentation offload.
   2552 	 */
   2553 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2554 		ifp->if_capabilities |= IFCAP_TSOv4;
   2555 	}
   2556 
   2557 	if (sc->sc_type >= WM_T_82571) {
   2558 		ifp->if_capabilities |= IFCAP_TSOv6;
   2559 	}
   2560 
   2561 #ifdef WM_MPSAFE
   2562 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2563 #else
   2564 	sc->sc_core_lock = NULL;
   2565 #endif
   2566 
   2567 	/* Attach the interface. */
   2568 	if_initialize(ifp);
   2569 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2570 	ether_ifattach(ifp, enaddr);
   2571 	if_register(ifp);
   2572 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2573 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2574 			  RND_FLAG_DEFAULT);
   2575 
   2576 #ifdef WM_EVENT_COUNTERS
   2577 	/* Attach event counters. */
   2578 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2579 	    NULL, xname, "linkintr");
   2580 
   2581 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2582 	    NULL, xname, "tx_xoff");
   2583 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2584 	    NULL, xname, "tx_xon");
   2585 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2586 	    NULL, xname, "rx_xoff");
   2587 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2588 	    NULL, xname, "rx_xon");
   2589 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2590 	    NULL, xname, "rx_macctl");
   2591 #endif /* WM_EVENT_COUNTERS */
   2592 
   2593 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2594 		pmf_class_network_register(self, ifp);
   2595 	else
   2596 		aprint_error_dev(self, "couldn't establish power handler\n");
   2597 
   2598 	sc->sc_flags |= WM_F_ATTACHED;
   2599  out:
   2600 	return;
   2601 }
   2602 
   2603 /* The detach function (ca_detach) */
   2604 static int
   2605 wm_detach(device_t self, int flags __unused)
   2606 {
   2607 	struct wm_softc *sc = device_private(self);
   2608 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2609 	int i;
   2610 
   2611 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2612 		return 0;
   2613 
   2614 	/* Stop the interface. Callouts are stopped in it. */
   2615 	wm_stop(ifp, 1);
   2616 
   2617 	pmf_device_deregister(self);
   2618 
   2619 	/* Tell the firmware about the release */
   2620 	WM_CORE_LOCK(sc);
   2621 	wm_release_manageability(sc);
   2622 	wm_release_hw_control(sc);
   2623 	WM_CORE_UNLOCK(sc);
   2624 
   2625 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2626 
   2627 	/* Delete all remaining media. */
   2628 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2629 
   2630 	ether_ifdetach(ifp);
   2631 	if_detach(ifp);
   2632 	if_percpuq_destroy(sc->sc_ipq);
   2633 
   2634 	/* Unload RX dmamaps and free mbufs */
   2635 	for (i = 0; i < sc->sc_nqueues; i++) {
   2636 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2637 		mutex_enter(rxq->rxq_lock);
   2638 		wm_rxdrain(rxq);
   2639 		mutex_exit(rxq->rxq_lock);
   2640 	}
   2641 	/* Must unlock here */
   2642 
   2643 	/* Disestablish the interrupt handler */
   2644 	for (i = 0; i < sc->sc_nintrs; i++) {
   2645 		if (sc->sc_ihs[i] != NULL) {
   2646 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2647 			sc->sc_ihs[i] = NULL;
   2648 		}
   2649 	}
   2650 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2651 
   2652 	wm_free_txrx_queues(sc);
   2653 
   2654 	/* Unmap the registers */
   2655 	if (sc->sc_ss) {
   2656 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2657 		sc->sc_ss = 0;
   2658 	}
   2659 	if (sc->sc_ios) {
   2660 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2661 		sc->sc_ios = 0;
   2662 	}
   2663 	if (sc->sc_flashs) {
   2664 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2665 		sc->sc_flashs = 0;
   2666 	}
   2667 
   2668 	if (sc->sc_core_lock)
   2669 		mutex_obj_free(sc->sc_core_lock);
   2670 	if (sc->sc_ich_nvmmtx)
   2671 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2672 
   2673 	return 0;
   2674 }
   2675 
   2676 static bool
   2677 wm_suspend(device_t self, const pmf_qual_t *qual)
   2678 {
   2679 	struct wm_softc *sc = device_private(self);
   2680 
   2681 	wm_release_manageability(sc);
   2682 	wm_release_hw_control(sc);
   2683 #ifdef WM_WOL
   2684 	wm_enable_wakeup(sc);
   2685 #endif
   2686 
   2687 	return true;
   2688 }
   2689 
   2690 static bool
   2691 wm_resume(device_t self, const pmf_qual_t *qual)
   2692 {
   2693 	struct wm_softc *sc = device_private(self);
   2694 
   2695 	wm_init_manageability(sc);
   2696 
   2697 	return true;
   2698 }
   2699 
   2700 /*
   2701  * wm_watchdog:		[ifnet interface function]
   2702  *
   2703  *	Watchdog timer handler.
   2704  */
   2705 static void
   2706 wm_watchdog(struct ifnet *ifp)
   2707 {
   2708 	int qid;
   2709 	struct wm_softc *sc = ifp->if_softc;
   2710 
   2711 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2712 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2713 
   2714 		wm_watchdog_txq(ifp, txq);
   2715 	}
   2716 
   2717 	/* Reset the interface. */
   2718 	(void) wm_init(ifp);
   2719 
   2720 	/*
   2721 	 * There are still some upper layer processing which call
   2722 	 * ifp->if_start(). e.g. ALTQ
   2723 	 */
   2724 	/* Try to get more packets going. */
   2725 	ifp->if_start(ifp);
   2726 }
   2727 
   2728 static void
   2729 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2730 {
   2731 	struct wm_softc *sc = ifp->if_softc;
   2732 
   2733 	/*
   2734 	 * Since we're using delayed interrupts, sweep up
   2735 	 * before we report an error.
   2736 	 */
   2737 	mutex_enter(txq->txq_lock);
   2738 	wm_txeof(sc, txq);
   2739 	mutex_exit(txq->txq_lock);
   2740 
   2741 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2742 #ifdef WM_DEBUG
   2743 		int i, j;
   2744 		struct wm_txsoft *txs;
   2745 #endif
   2746 		log(LOG_ERR,
   2747 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2748 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2749 		    txq->txq_next);
   2750 		ifp->if_oerrors++;
   2751 #ifdef WM_DEBUG
   2752 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2753 		    i = WM_NEXTTXS(txq, i)) {
   2754 		    txs = &txq->txq_soft[i];
   2755 		    printf("txs %d tx %d -> %d\n",
   2756 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2757 		    for (j = txs->txs_firstdesc; ;
   2758 			j = WM_NEXTTX(txq, j)) {
   2759 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2760 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2761 			printf("\t %#08x%08x\n",
   2762 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2763 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2764 			if (j == txs->txs_lastdesc)
   2765 				break;
   2766 			}
   2767 		}
   2768 #endif
   2769 	}
   2770 }
   2771 
   2772 /*
   2773  * wm_tick:
   2774  *
   2775  *	One second timer, used to check link status, sweep up
   2776  *	completed transmit jobs, etc.
   2777  */
   2778 static void
   2779 wm_tick(void *arg)
   2780 {
   2781 	struct wm_softc *sc = arg;
   2782 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2783 #ifndef WM_MPSAFE
   2784 	int s = splnet();
   2785 #endif
   2786 
   2787 	WM_CORE_LOCK(sc);
   2788 
   2789 	if (sc->sc_stopping)
   2790 		goto out;
   2791 
   2792 	if (sc->sc_type >= WM_T_82542_2_1) {
   2793 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2794 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2795 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2796 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2797 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2798 	}
   2799 
   2800 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2801 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2802 	    + CSR_READ(sc, WMREG_CRCERRS)
   2803 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2804 	    + CSR_READ(sc, WMREG_SYMERRC)
   2805 	    + CSR_READ(sc, WMREG_RXERRC)
   2806 	    + CSR_READ(sc, WMREG_SEC)
   2807 	    + CSR_READ(sc, WMREG_CEXTERR)
   2808 	    + CSR_READ(sc, WMREG_RLEC);
   2809 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2810 
   2811 	if (sc->sc_flags & WM_F_HAS_MII)
   2812 		mii_tick(&sc->sc_mii);
   2813 	else if ((sc->sc_type >= WM_T_82575)
   2814 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2815 		wm_serdes_tick(sc);
   2816 	else
   2817 		wm_tbi_tick(sc);
   2818 
   2819 out:
   2820 	WM_CORE_UNLOCK(sc);
   2821 #ifndef WM_MPSAFE
   2822 	splx(s);
   2823 #endif
   2824 
   2825 	if (!sc->sc_stopping)
   2826 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2827 }
   2828 
   2829 static int
   2830 wm_ifflags_cb(struct ethercom *ec)
   2831 {
   2832 	struct ifnet *ifp = &ec->ec_if;
   2833 	struct wm_softc *sc = ifp->if_softc;
   2834 	int rc = 0;
   2835 
   2836 	WM_CORE_LOCK(sc);
   2837 
   2838 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2839 	sc->sc_if_flags = ifp->if_flags;
   2840 
   2841 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2842 		rc = ENETRESET;
   2843 		goto out;
   2844 	}
   2845 
   2846 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2847 		wm_set_filter(sc);
   2848 
   2849 	wm_set_vlan(sc);
   2850 
   2851 out:
   2852 	WM_CORE_UNLOCK(sc);
   2853 
   2854 	return rc;
   2855 }
   2856 
   2857 /*
   2858  * wm_ioctl:		[ifnet interface function]
   2859  *
   2860  *	Handle control requests from the operator.
   2861  */
   2862 static int
   2863 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2864 {
   2865 	struct wm_softc *sc = ifp->if_softc;
   2866 	struct ifreq *ifr = (struct ifreq *) data;
   2867 	struct ifaddr *ifa = (struct ifaddr *)data;
   2868 	struct sockaddr_dl *sdl;
   2869 	int s, error;
   2870 
   2871 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2872 		device_xname(sc->sc_dev), __func__));
   2873 
   2874 #ifndef WM_MPSAFE
   2875 	s = splnet();
   2876 #endif
   2877 	switch (cmd) {
   2878 	case SIOCSIFMEDIA:
   2879 	case SIOCGIFMEDIA:
   2880 		WM_CORE_LOCK(sc);
   2881 		/* Flow control requires full-duplex mode. */
   2882 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2883 		    (ifr->ifr_media & IFM_FDX) == 0)
   2884 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2885 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2886 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2887 				/* We can do both TXPAUSE and RXPAUSE. */
   2888 				ifr->ifr_media |=
   2889 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2890 			}
   2891 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2892 		}
   2893 		WM_CORE_UNLOCK(sc);
   2894 #ifdef WM_MPSAFE
   2895 		s = splnet();
   2896 #endif
   2897 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2898 #ifdef WM_MPSAFE
   2899 		splx(s);
   2900 #endif
   2901 		break;
   2902 	case SIOCINITIFADDR:
   2903 		WM_CORE_LOCK(sc);
   2904 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2905 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2906 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2907 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2908 			/* unicast address is first multicast entry */
   2909 			wm_set_filter(sc);
   2910 			error = 0;
   2911 			WM_CORE_UNLOCK(sc);
   2912 			break;
   2913 		}
   2914 		WM_CORE_UNLOCK(sc);
   2915 		/*FALLTHROUGH*/
   2916 	default:
   2917 #ifdef WM_MPSAFE
   2918 		s = splnet();
   2919 #endif
   2920 		/* It may call wm_start, so unlock here */
   2921 		error = ether_ioctl(ifp, cmd, data);
   2922 #ifdef WM_MPSAFE
   2923 		splx(s);
   2924 #endif
   2925 		if (error != ENETRESET)
   2926 			break;
   2927 
   2928 		error = 0;
   2929 
   2930 		if (cmd == SIOCSIFCAP) {
   2931 			error = (*ifp->if_init)(ifp);
   2932 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2933 			;
   2934 		else if (ifp->if_flags & IFF_RUNNING) {
   2935 			/*
   2936 			 * Multicast list has changed; set the hardware filter
   2937 			 * accordingly.
   2938 			 */
   2939 			WM_CORE_LOCK(sc);
   2940 			wm_set_filter(sc);
   2941 			WM_CORE_UNLOCK(sc);
   2942 		}
   2943 		break;
   2944 	}
   2945 
   2946 #ifndef WM_MPSAFE
   2947 	splx(s);
   2948 #endif
   2949 	return error;
   2950 }
   2951 
   2952 /* MAC address related */
   2953 
   2954 /*
   2955  * Get the offset of MAC address and return it.
   2956  * If error occured, use offset 0.
   2957  */
   2958 static uint16_t
   2959 wm_check_alt_mac_addr(struct wm_softc *sc)
   2960 {
   2961 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2962 	uint16_t offset = NVM_OFF_MACADDR;
   2963 
   2964 	/* Try to read alternative MAC address pointer */
   2965 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2966 		return 0;
   2967 
   2968 	/* Check pointer if it's valid or not. */
   2969 	if ((offset == 0x0000) || (offset == 0xffff))
   2970 		return 0;
   2971 
   2972 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2973 	/*
   2974 	 * Check whether alternative MAC address is valid or not.
   2975 	 * Some cards have non 0xffff pointer but those don't use
   2976 	 * alternative MAC address in reality.
   2977 	 *
   2978 	 * Check whether the broadcast bit is set or not.
   2979 	 */
   2980 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2981 		if (((myea[0] & 0xff) & 0x01) == 0)
   2982 			return offset; /* Found */
   2983 
   2984 	/* Not found */
   2985 	return 0;
   2986 }
   2987 
   2988 static int
   2989 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2990 {
   2991 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2992 	uint16_t offset = NVM_OFF_MACADDR;
   2993 	int do_invert = 0;
   2994 
   2995 	switch (sc->sc_type) {
   2996 	case WM_T_82580:
   2997 	case WM_T_I350:
   2998 	case WM_T_I354:
   2999 		/* EEPROM Top Level Partitioning */
   3000 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3001 		break;
   3002 	case WM_T_82571:
   3003 	case WM_T_82575:
   3004 	case WM_T_82576:
   3005 	case WM_T_80003:
   3006 	case WM_T_I210:
   3007 	case WM_T_I211:
   3008 		offset = wm_check_alt_mac_addr(sc);
   3009 		if (offset == 0)
   3010 			if ((sc->sc_funcid & 0x01) == 1)
   3011 				do_invert = 1;
   3012 		break;
   3013 	default:
   3014 		if ((sc->sc_funcid & 0x01) == 1)
   3015 			do_invert = 1;
   3016 		break;
   3017 	}
   3018 
   3019 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   3020 		myea) != 0)
   3021 		goto bad;
   3022 
   3023 	enaddr[0] = myea[0] & 0xff;
   3024 	enaddr[1] = myea[0] >> 8;
   3025 	enaddr[2] = myea[1] & 0xff;
   3026 	enaddr[3] = myea[1] >> 8;
   3027 	enaddr[4] = myea[2] & 0xff;
   3028 	enaddr[5] = myea[2] >> 8;
   3029 
   3030 	/*
   3031 	 * Toggle the LSB of the MAC address on the second port
   3032 	 * of some dual port cards.
   3033 	 */
   3034 	if (do_invert != 0)
   3035 		enaddr[5] ^= 1;
   3036 
   3037 	return 0;
   3038 
   3039  bad:
   3040 	return -1;
   3041 }
   3042 
   3043 /*
   3044  * wm_set_ral:
   3045  *
   3046  *	Set an entery in the receive address list.
   3047  */
   3048 static void
   3049 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3050 {
   3051 	uint32_t ral_lo, ral_hi;
   3052 
   3053 	if (enaddr != NULL) {
   3054 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3055 		    (enaddr[3] << 24);
   3056 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3057 		ral_hi |= RAL_AV;
   3058 	} else {
   3059 		ral_lo = 0;
   3060 		ral_hi = 0;
   3061 	}
   3062 
   3063 	if (sc->sc_type >= WM_T_82544) {
   3064 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3065 		    ral_lo);
   3066 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3067 		    ral_hi);
   3068 	} else {
   3069 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3070 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3071 	}
   3072 }
   3073 
   3074 /*
   3075  * wm_mchash:
   3076  *
   3077  *	Compute the hash of the multicast address for the 4096-bit
   3078  *	multicast filter.
   3079  */
   3080 static uint32_t
   3081 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3082 {
   3083 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3084 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3085 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3086 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3087 	uint32_t hash;
   3088 
   3089 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3090 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3091 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3092 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3093 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3094 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3095 		return (hash & 0x3ff);
   3096 	}
   3097 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3098 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3099 
   3100 	return (hash & 0xfff);
   3101 }
   3102 
   3103 /*
   3104  * wm_set_filter:
   3105  *
   3106  *	Set up the receive filter.
   3107  */
   3108 static void
   3109 wm_set_filter(struct wm_softc *sc)
   3110 {
   3111 	struct ethercom *ec = &sc->sc_ethercom;
   3112 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3113 	struct ether_multi *enm;
   3114 	struct ether_multistep step;
   3115 	bus_addr_t mta_reg;
   3116 	uint32_t hash, reg, bit;
   3117 	int i, size, ralmax;
   3118 
   3119 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3120 		device_xname(sc->sc_dev), __func__));
   3121 
   3122 	if (sc->sc_type >= WM_T_82544)
   3123 		mta_reg = WMREG_CORDOVA_MTA;
   3124 	else
   3125 		mta_reg = WMREG_MTA;
   3126 
   3127 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3128 
   3129 	if (ifp->if_flags & IFF_BROADCAST)
   3130 		sc->sc_rctl |= RCTL_BAM;
   3131 	if (ifp->if_flags & IFF_PROMISC) {
   3132 		sc->sc_rctl |= RCTL_UPE;
   3133 		goto allmulti;
   3134 	}
   3135 
   3136 	/*
   3137 	 * Set the station address in the first RAL slot, and
   3138 	 * clear the remaining slots.
   3139 	 */
   3140 	if (sc->sc_type == WM_T_ICH8)
   3141 		size = WM_RAL_TABSIZE_ICH8 -1;
   3142 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3143 	    || (sc->sc_type == WM_T_PCH))
   3144 		size = WM_RAL_TABSIZE_ICH8;
   3145 	else if (sc->sc_type == WM_T_PCH2)
   3146 		size = WM_RAL_TABSIZE_PCH2;
   3147 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3148 		size = WM_RAL_TABSIZE_PCH_LPT;
   3149 	else if (sc->sc_type == WM_T_82575)
   3150 		size = WM_RAL_TABSIZE_82575;
   3151 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3152 		size = WM_RAL_TABSIZE_82576;
   3153 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3154 		size = WM_RAL_TABSIZE_I350;
   3155 	else
   3156 		size = WM_RAL_TABSIZE;
   3157 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3158 
   3159 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3160 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3161 		switch (i) {
   3162 		case 0:
   3163 			/* We can use all entries */
   3164 			ralmax = size;
   3165 			break;
   3166 		case 1:
   3167 			/* Only RAR[0] */
   3168 			ralmax = 1;
   3169 			break;
   3170 		default:
   3171 			/* available SHRA + RAR[0] */
   3172 			ralmax = i + 1;
   3173 		}
   3174 	} else
   3175 		ralmax = size;
   3176 	for (i = 1; i < size; i++) {
   3177 		if (i < ralmax)
   3178 			wm_set_ral(sc, NULL, i);
   3179 	}
   3180 
   3181 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3182 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3183 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3184 	    || (sc->sc_type == WM_T_PCH_SPT))
   3185 		size = WM_ICH8_MC_TABSIZE;
   3186 	else
   3187 		size = WM_MC_TABSIZE;
   3188 	/* Clear out the multicast table. */
   3189 	for (i = 0; i < size; i++)
   3190 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3191 
   3192 	ETHER_FIRST_MULTI(step, ec, enm);
   3193 	while (enm != NULL) {
   3194 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3195 			/*
   3196 			 * We must listen to a range of multicast addresses.
   3197 			 * For now, just accept all multicasts, rather than
   3198 			 * trying to set only those filter bits needed to match
   3199 			 * the range.  (At this time, the only use of address
   3200 			 * ranges is for IP multicast routing, for which the
   3201 			 * range is big enough to require all bits set.)
   3202 			 */
   3203 			goto allmulti;
   3204 		}
   3205 
   3206 		hash = wm_mchash(sc, enm->enm_addrlo);
   3207 
   3208 		reg = (hash >> 5);
   3209 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3210 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3211 		    || (sc->sc_type == WM_T_PCH2)
   3212 		    || (sc->sc_type == WM_T_PCH_LPT)
   3213 		    || (sc->sc_type == WM_T_PCH_SPT))
   3214 			reg &= 0x1f;
   3215 		else
   3216 			reg &= 0x7f;
   3217 		bit = hash & 0x1f;
   3218 
   3219 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3220 		hash |= 1U << bit;
   3221 
   3222 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3223 			/*
   3224 			 * 82544 Errata 9: Certain register cannot be written
   3225 			 * with particular alignments in PCI-X bus operation
   3226 			 * (FCAH, MTA and VFTA).
   3227 			 */
   3228 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3229 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3230 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3231 		} else
   3232 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3233 
   3234 		ETHER_NEXT_MULTI(step, enm);
   3235 	}
   3236 
   3237 	ifp->if_flags &= ~IFF_ALLMULTI;
   3238 	goto setit;
   3239 
   3240  allmulti:
   3241 	ifp->if_flags |= IFF_ALLMULTI;
   3242 	sc->sc_rctl |= RCTL_MPE;
   3243 
   3244  setit:
   3245 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3246 }
   3247 
   3248 /* Reset and init related */
   3249 
   3250 static void
   3251 wm_set_vlan(struct wm_softc *sc)
   3252 {
   3253 
   3254 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3255 		device_xname(sc->sc_dev), __func__));
   3256 
   3257 	/* Deal with VLAN enables. */
   3258 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3259 		sc->sc_ctrl |= CTRL_VME;
   3260 	else
   3261 		sc->sc_ctrl &= ~CTRL_VME;
   3262 
   3263 	/* Write the control registers. */
   3264 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3265 }
   3266 
   3267 static void
   3268 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3269 {
   3270 	uint32_t gcr;
   3271 	pcireg_t ctrl2;
   3272 
   3273 	gcr = CSR_READ(sc, WMREG_GCR);
   3274 
   3275 	/* Only take action if timeout value is defaulted to 0 */
   3276 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3277 		goto out;
   3278 
   3279 	if ((gcr & GCR_CAP_VER2) == 0) {
   3280 		gcr |= GCR_CMPL_TMOUT_10MS;
   3281 		goto out;
   3282 	}
   3283 
   3284 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3285 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3286 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3287 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3288 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3289 
   3290 out:
   3291 	/* Disable completion timeout resend */
   3292 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3293 
   3294 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3295 }
   3296 
   3297 void
   3298 wm_get_auto_rd_done(struct wm_softc *sc)
   3299 {
   3300 	int i;
   3301 
   3302 	/* wait for eeprom to reload */
   3303 	switch (sc->sc_type) {
   3304 	case WM_T_82571:
   3305 	case WM_T_82572:
   3306 	case WM_T_82573:
   3307 	case WM_T_82574:
   3308 	case WM_T_82583:
   3309 	case WM_T_82575:
   3310 	case WM_T_82576:
   3311 	case WM_T_82580:
   3312 	case WM_T_I350:
   3313 	case WM_T_I354:
   3314 	case WM_T_I210:
   3315 	case WM_T_I211:
   3316 	case WM_T_80003:
   3317 	case WM_T_ICH8:
   3318 	case WM_T_ICH9:
   3319 		for (i = 0; i < 10; i++) {
   3320 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3321 				break;
   3322 			delay(1000);
   3323 		}
   3324 		if (i == 10) {
   3325 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3326 			    "complete\n", device_xname(sc->sc_dev));
   3327 		}
   3328 		break;
   3329 	default:
   3330 		break;
   3331 	}
   3332 }
   3333 
   3334 void
   3335 wm_lan_init_done(struct wm_softc *sc)
   3336 {
   3337 	uint32_t reg = 0;
   3338 	int i;
   3339 
   3340 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3341 		device_xname(sc->sc_dev), __func__));
   3342 
   3343 	/* Wait for eeprom to reload */
   3344 	switch (sc->sc_type) {
   3345 	case WM_T_ICH10:
   3346 	case WM_T_PCH:
   3347 	case WM_T_PCH2:
   3348 	case WM_T_PCH_LPT:
   3349 	case WM_T_PCH_SPT:
   3350 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3351 			reg = CSR_READ(sc, WMREG_STATUS);
   3352 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3353 				break;
   3354 			delay(100);
   3355 		}
   3356 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3357 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3358 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3359 		}
   3360 		break;
   3361 	default:
   3362 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3363 		    __func__);
   3364 		break;
   3365 	}
   3366 
   3367 	reg &= ~STATUS_LAN_INIT_DONE;
   3368 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3369 }
   3370 
   3371 void
   3372 wm_get_cfg_done(struct wm_softc *sc)
   3373 {
   3374 	int mask;
   3375 	uint32_t reg;
   3376 	int i;
   3377 
   3378 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3379 		device_xname(sc->sc_dev), __func__));
   3380 
   3381 	/* Wait for eeprom to reload */
   3382 	switch (sc->sc_type) {
   3383 	case WM_T_82542_2_0:
   3384 	case WM_T_82542_2_1:
   3385 		/* null */
   3386 		break;
   3387 	case WM_T_82543:
   3388 	case WM_T_82544:
   3389 	case WM_T_82540:
   3390 	case WM_T_82545:
   3391 	case WM_T_82545_3:
   3392 	case WM_T_82546:
   3393 	case WM_T_82546_3:
   3394 	case WM_T_82541:
   3395 	case WM_T_82541_2:
   3396 	case WM_T_82547:
   3397 	case WM_T_82547_2:
   3398 	case WM_T_82573:
   3399 	case WM_T_82574:
   3400 	case WM_T_82583:
   3401 		/* generic */
   3402 		delay(10*1000);
   3403 		break;
   3404 	case WM_T_80003:
   3405 	case WM_T_82571:
   3406 	case WM_T_82572:
   3407 	case WM_T_82575:
   3408 	case WM_T_82576:
   3409 	case WM_T_82580:
   3410 	case WM_T_I350:
   3411 	case WM_T_I354:
   3412 	case WM_T_I210:
   3413 	case WM_T_I211:
   3414 		if (sc->sc_type == WM_T_82571) {
   3415 			/* Only 82571 shares port 0 */
   3416 			mask = EEMNGCTL_CFGDONE_0;
   3417 		} else
   3418 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3419 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3420 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3421 				break;
   3422 			delay(1000);
   3423 		}
   3424 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3425 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3426 				device_xname(sc->sc_dev), __func__));
   3427 		}
   3428 		break;
   3429 	case WM_T_ICH8:
   3430 	case WM_T_ICH9:
   3431 	case WM_T_ICH10:
   3432 	case WM_T_PCH:
   3433 	case WM_T_PCH2:
   3434 	case WM_T_PCH_LPT:
   3435 	case WM_T_PCH_SPT:
   3436 		delay(10*1000);
   3437 		if (sc->sc_type >= WM_T_ICH10)
   3438 			wm_lan_init_done(sc);
   3439 		else
   3440 			wm_get_auto_rd_done(sc);
   3441 
   3442 		reg = CSR_READ(sc, WMREG_STATUS);
   3443 		if ((reg & STATUS_PHYRA) != 0)
   3444 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3445 		break;
   3446 	default:
   3447 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3448 		    __func__);
   3449 		break;
   3450 	}
   3451 }
   3452 
   3453 /* Init hardware bits */
   3454 void
   3455 wm_initialize_hardware_bits(struct wm_softc *sc)
   3456 {
   3457 	uint32_t tarc0, tarc1, reg;
   3458 
   3459 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3460 		device_xname(sc->sc_dev), __func__));
   3461 
   3462 	/* For 82571 variant, 80003 and ICHs */
   3463 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3464 	    || (sc->sc_type >= WM_T_80003)) {
   3465 
   3466 		/* Transmit Descriptor Control 0 */
   3467 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3468 		reg |= TXDCTL_COUNT_DESC;
   3469 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3470 
   3471 		/* Transmit Descriptor Control 1 */
   3472 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3473 		reg |= TXDCTL_COUNT_DESC;
   3474 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3475 
   3476 		/* TARC0 */
   3477 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3478 		switch (sc->sc_type) {
   3479 		case WM_T_82571:
   3480 		case WM_T_82572:
   3481 		case WM_T_82573:
   3482 		case WM_T_82574:
   3483 		case WM_T_82583:
   3484 		case WM_T_80003:
   3485 			/* Clear bits 30..27 */
   3486 			tarc0 &= ~__BITS(30, 27);
   3487 			break;
   3488 		default:
   3489 			break;
   3490 		}
   3491 
   3492 		switch (sc->sc_type) {
   3493 		case WM_T_82571:
   3494 		case WM_T_82572:
   3495 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3496 
   3497 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3498 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3499 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3500 			/* 8257[12] Errata No.7 */
   3501 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3502 
   3503 			/* TARC1 bit 28 */
   3504 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3505 				tarc1 &= ~__BIT(28);
   3506 			else
   3507 				tarc1 |= __BIT(28);
   3508 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3509 
   3510 			/*
   3511 			 * 8257[12] Errata No.13
   3512 			 * Disable Dyamic Clock Gating.
   3513 			 */
   3514 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3515 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3516 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3517 			break;
   3518 		case WM_T_82573:
   3519 		case WM_T_82574:
   3520 		case WM_T_82583:
   3521 			if ((sc->sc_type == WM_T_82574)
   3522 			    || (sc->sc_type == WM_T_82583))
   3523 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3524 
   3525 			/* Extended Device Control */
   3526 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3527 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3528 			reg |= __BIT(22);	/* Set bit 22 */
   3529 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3530 
   3531 			/* Device Control */
   3532 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3533 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3534 
   3535 			/* PCIe Control Register */
   3536 			/*
   3537 			 * 82573 Errata (unknown).
   3538 			 *
   3539 			 * 82574 Errata 25 and 82583 Errata 12
   3540 			 * "Dropped Rx Packets":
   3541 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3542 			 */
   3543 			reg = CSR_READ(sc, WMREG_GCR);
   3544 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3545 			CSR_WRITE(sc, WMREG_GCR, reg);
   3546 
   3547 			if ((sc->sc_type == WM_T_82574)
   3548 			    || (sc->sc_type == WM_T_82583)) {
   3549 				/*
   3550 				 * Document says this bit must be set for
   3551 				 * proper operation.
   3552 				 */
   3553 				reg = CSR_READ(sc, WMREG_GCR);
   3554 				reg |= __BIT(22);
   3555 				CSR_WRITE(sc, WMREG_GCR, reg);
   3556 
   3557 				/*
   3558 				 * Apply workaround for hardware errata
   3559 				 * documented in errata docs Fixes issue where
   3560 				 * some error prone or unreliable PCIe
   3561 				 * completions are occurring, particularly
   3562 				 * with ASPM enabled. Without fix, issue can
   3563 				 * cause Tx timeouts.
   3564 				 */
   3565 				reg = CSR_READ(sc, WMREG_GCR2);
   3566 				reg |= __BIT(0);
   3567 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3568 			}
   3569 			break;
   3570 		case WM_T_80003:
   3571 			/* TARC0 */
   3572 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3573 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3574 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3575 
   3576 			/* TARC1 bit 28 */
   3577 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3578 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3579 				tarc1 &= ~__BIT(28);
   3580 			else
   3581 				tarc1 |= __BIT(28);
   3582 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3583 			break;
   3584 		case WM_T_ICH8:
   3585 		case WM_T_ICH9:
   3586 		case WM_T_ICH10:
   3587 		case WM_T_PCH:
   3588 		case WM_T_PCH2:
   3589 		case WM_T_PCH_LPT:
   3590 		case WM_T_PCH_SPT:
   3591 			/* TARC0 */
   3592 			if ((sc->sc_type == WM_T_ICH8)
   3593 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3594 				/* Set TARC0 bits 29 and 28 */
   3595 				tarc0 |= __BITS(29, 28);
   3596 			}
   3597 			/* Set TARC0 bits 23,24,26,27 */
   3598 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3599 
   3600 			/* CTRL_EXT */
   3601 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3602 			reg |= __BIT(22);	/* Set bit 22 */
   3603 			/*
   3604 			 * Enable PHY low-power state when MAC is at D3
   3605 			 * w/o WoL
   3606 			 */
   3607 			if (sc->sc_type >= WM_T_PCH)
   3608 				reg |= CTRL_EXT_PHYPDEN;
   3609 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3610 
   3611 			/* TARC1 */
   3612 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3613 			/* bit 28 */
   3614 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3615 				tarc1 &= ~__BIT(28);
   3616 			else
   3617 				tarc1 |= __BIT(28);
   3618 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3619 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3620 
   3621 			/* Device Status */
   3622 			if (sc->sc_type == WM_T_ICH8) {
   3623 				reg = CSR_READ(sc, WMREG_STATUS);
   3624 				reg &= ~__BIT(31);
   3625 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3626 
   3627 			}
   3628 
   3629 			/* IOSFPC */
   3630 			if (sc->sc_type == WM_T_PCH_SPT) {
   3631 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3632 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3633 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3634 			}
   3635 			/*
   3636 			 * Work-around descriptor data corruption issue during
   3637 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3638 			 * capability.
   3639 			 */
   3640 			reg = CSR_READ(sc, WMREG_RFCTL);
   3641 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3642 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3643 			break;
   3644 		default:
   3645 			break;
   3646 		}
   3647 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3648 
   3649 		/*
   3650 		 * 8257[12] Errata No.52 and some others.
   3651 		 * Avoid RSS Hash Value bug.
   3652 		 */
   3653 		switch (sc->sc_type) {
   3654 		case WM_T_82571:
   3655 		case WM_T_82572:
   3656 		case WM_T_82573:
   3657 		case WM_T_80003:
   3658 		case WM_T_ICH8:
   3659 			reg = CSR_READ(sc, WMREG_RFCTL);
   3660 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3661 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3662 			break;
   3663 		default:
   3664 			break;
   3665 		}
   3666 	}
   3667 }
   3668 
   3669 static uint32_t
   3670 wm_rxpbs_adjust_82580(uint32_t val)
   3671 {
   3672 	uint32_t rv = 0;
   3673 
   3674 	if (val < __arraycount(wm_82580_rxpbs_table))
   3675 		rv = wm_82580_rxpbs_table[val];
   3676 
   3677 	return rv;
   3678 }
   3679 
   3680 /*
   3681  * wm_reset:
   3682  *
   3683  *	Reset the i82542 chip.
   3684  */
   3685 static void
   3686 wm_reset(struct wm_softc *sc)
   3687 {
   3688 	int phy_reset = 0;
   3689 	int i, error = 0;
   3690 	uint32_t reg, mask;
   3691 
   3692 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3693 		device_xname(sc->sc_dev), __func__));
   3694 	KASSERT(sc->sc_type != 0);
   3695 
   3696 	/*
   3697 	 * Allocate on-chip memory according to the MTU size.
   3698 	 * The Packet Buffer Allocation register must be written
   3699 	 * before the chip is reset.
   3700 	 */
   3701 	switch (sc->sc_type) {
   3702 	case WM_T_82547:
   3703 	case WM_T_82547_2:
   3704 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3705 		    PBA_22K : PBA_30K;
   3706 		for (i = 0; i < sc->sc_nqueues; i++) {
   3707 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3708 			txq->txq_fifo_head = 0;
   3709 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3710 			txq->txq_fifo_size =
   3711 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3712 			txq->txq_fifo_stall = 0;
   3713 		}
   3714 		break;
   3715 	case WM_T_82571:
   3716 	case WM_T_82572:
   3717 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3718 	case WM_T_80003:
   3719 		sc->sc_pba = PBA_32K;
   3720 		break;
   3721 	case WM_T_82573:
   3722 		sc->sc_pba = PBA_12K;
   3723 		break;
   3724 	case WM_T_82574:
   3725 	case WM_T_82583:
   3726 		sc->sc_pba = PBA_20K;
   3727 		break;
   3728 	case WM_T_82576:
   3729 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3730 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3731 		break;
   3732 	case WM_T_82580:
   3733 	case WM_T_I350:
   3734 	case WM_T_I354:
   3735 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3736 		break;
   3737 	case WM_T_I210:
   3738 	case WM_T_I211:
   3739 		sc->sc_pba = PBA_34K;
   3740 		break;
   3741 	case WM_T_ICH8:
   3742 		/* Workaround for a bit corruption issue in FIFO memory */
   3743 		sc->sc_pba = PBA_8K;
   3744 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3745 		break;
   3746 	case WM_T_ICH9:
   3747 	case WM_T_ICH10:
   3748 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3749 		    PBA_14K : PBA_10K;
   3750 		break;
   3751 	case WM_T_PCH:
   3752 	case WM_T_PCH2:
   3753 	case WM_T_PCH_LPT:
   3754 	case WM_T_PCH_SPT:
   3755 		sc->sc_pba = PBA_26K;
   3756 		break;
   3757 	default:
   3758 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3759 		    PBA_40K : PBA_48K;
   3760 		break;
   3761 	}
   3762 	/*
   3763 	 * Only old or non-multiqueue devices have the PBA register
   3764 	 * XXX Need special handling for 82575.
   3765 	 */
   3766 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3767 	    || (sc->sc_type == WM_T_82575))
   3768 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3769 
   3770 	/* Prevent the PCI-E bus from sticking */
   3771 	if (sc->sc_flags & WM_F_PCIE) {
   3772 		int timeout = 800;
   3773 
   3774 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3775 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3776 
   3777 		while (timeout--) {
   3778 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3779 			    == 0)
   3780 				break;
   3781 			delay(100);
   3782 		}
   3783 	}
   3784 
   3785 	/* Set the completion timeout for interface */
   3786 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3787 	    || (sc->sc_type == WM_T_82580)
   3788 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3789 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3790 		wm_set_pcie_completion_timeout(sc);
   3791 
   3792 	/* Clear interrupt */
   3793 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3794 	if (sc->sc_nintrs > 1) {
   3795 		if (sc->sc_type != WM_T_82574) {
   3796 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3797 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3798 		} else {
   3799 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3800 		}
   3801 	}
   3802 
   3803 	/* Stop the transmit and receive processes. */
   3804 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3805 	sc->sc_rctl &= ~RCTL_EN;
   3806 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3807 	CSR_WRITE_FLUSH(sc);
   3808 
   3809 	/* XXX set_tbi_sbp_82543() */
   3810 
   3811 	delay(10*1000);
   3812 
   3813 	/* Must acquire the MDIO ownership before MAC reset */
   3814 	switch (sc->sc_type) {
   3815 	case WM_T_82573:
   3816 	case WM_T_82574:
   3817 	case WM_T_82583:
   3818 		error = wm_get_hw_semaphore_82573(sc);
   3819 		break;
   3820 	default:
   3821 		break;
   3822 	}
   3823 
   3824 	/*
   3825 	 * 82541 Errata 29? & 82547 Errata 28?
   3826 	 * See also the description about PHY_RST bit in CTRL register
   3827 	 * in 8254x_GBe_SDM.pdf.
   3828 	 */
   3829 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3830 		CSR_WRITE(sc, WMREG_CTRL,
   3831 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3832 		CSR_WRITE_FLUSH(sc);
   3833 		delay(5000);
   3834 	}
   3835 
   3836 	switch (sc->sc_type) {
   3837 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3838 	case WM_T_82541:
   3839 	case WM_T_82541_2:
   3840 	case WM_T_82547:
   3841 	case WM_T_82547_2:
   3842 		/*
   3843 		 * On some chipsets, a reset through a memory-mapped write
   3844 		 * cycle can cause the chip to reset before completing the
   3845 		 * write cycle.  This causes major headache that can be
   3846 		 * avoided by issuing the reset via indirect register writes
   3847 		 * through I/O space.
   3848 		 *
   3849 		 * So, if we successfully mapped the I/O BAR at attach time,
   3850 		 * use that.  Otherwise, try our luck with a memory-mapped
   3851 		 * reset.
   3852 		 */
   3853 		if (sc->sc_flags & WM_F_IOH_VALID)
   3854 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3855 		else
   3856 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3857 		break;
   3858 	case WM_T_82545_3:
   3859 	case WM_T_82546_3:
   3860 		/* Use the shadow control register on these chips. */
   3861 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3862 		break;
   3863 	case WM_T_80003:
   3864 		mask = swfwphysem[sc->sc_funcid];
   3865 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3866 		wm_get_swfw_semaphore(sc, mask);
   3867 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3868 		wm_put_swfw_semaphore(sc, mask);
   3869 		break;
   3870 	case WM_T_ICH8:
   3871 	case WM_T_ICH9:
   3872 	case WM_T_ICH10:
   3873 	case WM_T_PCH:
   3874 	case WM_T_PCH2:
   3875 	case WM_T_PCH_LPT:
   3876 	case WM_T_PCH_SPT:
   3877 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3878 		if (wm_phy_resetisblocked(sc) == false) {
   3879 			/*
   3880 			 * Gate automatic PHY configuration by hardware on
   3881 			 * non-managed 82579
   3882 			 */
   3883 			if ((sc->sc_type == WM_T_PCH2)
   3884 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3885 				== 0))
   3886 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3887 
   3888 			reg |= CTRL_PHY_RESET;
   3889 			phy_reset = 1;
   3890 		} else
   3891 			printf("XXX reset is blocked!!!\n");
   3892 		wm_get_swfwhw_semaphore(sc);
   3893 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3894 		/* Don't insert a completion barrier when reset */
   3895 		delay(20*1000);
   3896 		wm_put_swfwhw_semaphore(sc);
   3897 		break;
   3898 	case WM_T_82580:
   3899 	case WM_T_I350:
   3900 	case WM_T_I354:
   3901 	case WM_T_I210:
   3902 	case WM_T_I211:
   3903 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3904 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3905 			CSR_WRITE_FLUSH(sc);
   3906 		delay(5000);
   3907 		break;
   3908 	case WM_T_82542_2_0:
   3909 	case WM_T_82542_2_1:
   3910 	case WM_T_82543:
   3911 	case WM_T_82540:
   3912 	case WM_T_82545:
   3913 	case WM_T_82546:
   3914 	case WM_T_82571:
   3915 	case WM_T_82572:
   3916 	case WM_T_82573:
   3917 	case WM_T_82574:
   3918 	case WM_T_82575:
   3919 	case WM_T_82576:
   3920 	case WM_T_82583:
   3921 	default:
   3922 		/* Everything else can safely use the documented method. */
   3923 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3924 		break;
   3925 	}
   3926 
   3927 	/* Must release the MDIO ownership after MAC reset */
   3928 	switch (sc->sc_type) {
   3929 	case WM_T_82573:
   3930 	case WM_T_82574:
   3931 	case WM_T_82583:
   3932 		if (error == 0)
   3933 			wm_put_hw_semaphore_82573(sc);
   3934 		break;
   3935 	default:
   3936 		break;
   3937 	}
   3938 
   3939 	if (phy_reset != 0) {
   3940 		wm_get_cfg_done(sc);
   3941 		delay(10 * 1000);
   3942 		if (sc->sc_type >= WM_T_PCH) {
   3943 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3944 			    BM_PORT_GEN_CFG);
   3945 			reg &= ~BM_WUC_HOST_WU_BIT;
   3946 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   3947 			    BM_PORT_GEN_CFG, reg);
   3948 		}
   3949 	}
   3950 
   3951 	/* reload EEPROM */
   3952 	switch (sc->sc_type) {
   3953 	case WM_T_82542_2_0:
   3954 	case WM_T_82542_2_1:
   3955 	case WM_T_82543:
   3956 	case WM_T_82544:
   3957 		delay(10);
   3958 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3959 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3960 		CSR_WRITE_FLUSH(sc);
   3961 		delay(2000);
   3962 		break;
   3963 	case WM_T_82540:
   3964 	case WM_T_82545:
   3965 	case WM_T_82545_3:
   3966 	case WM_T_82546:
   3967 	case WM_T_82546_3:
   3968 		delay(5*1000);
   3969 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3970 		break;
   3971 	case WM_T_82541:
   3972 	case WM_T_82541_2:
   3973 	case WM_T_82547:
   3974 	case WM_T_82547_2:
   3975 		delay(20000);
   3976 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3977 		break;
   3978 	case WM_T_82571:
   3979 	case WM_T_82572:
   3980 	case WM_T_82573:
   3981 	case WM_T_82574:
   3982 	case WM_T_82583:
   3983 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3984 			delay(10);
   3985 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3986 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3987 			CSR_WRITE_FLUSH(sc);
   3988 		}
   3989 		/* check EECD_EE_AUTORD */
   3990 		wm_get_auto_rd_done(sc);
   3991 		/*
   3992 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3993 		 * is set.
   3994 		 */
   3995 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3996 		    || (sc->sc_type == WM_T_82583))
   3997 			delay(25*1000);
   3998 		break;
   3999 	case WM_T_82575:
   4000 	case WM_T_82576:
   4001 	case WM_T_82580:
   4002 	case WM_T_I350:
   4003 	case WM_T_I354:
   4004 	case WM_T_I210:
   4005 	case WM_T_I211:
   4006 	case WM_T_80003:
   4007 		/* check EECD_EE_AUTORD */
   4008 		wm_get_auto_rd_done(sc);
   4009 		break;
   4010 	case WM_T_ICH8:
   4011 	case WM_T_ICH9:
   4012 	case WM_T_ICH10:
   4013 	case WM_T_PCH:
   4014 	case WM_T_PCH2:
   4015 	case WM_T_PCH_LPT:
   4016 	case WM_T_PCH_SPT:
   4017 		break;
   4018 	default:
   4019 		panic("%s: unknown type\n", __func__);
   4020 	}
   4021 
   4022 	/* Check whether EEPROM is present or not */
   4023 	switch (sc->sc_type) {
   4024 	case WM_T_82575:
   4025 	case WM_T_82576:
   4026 	case WM_T_82580:
   4027 	case WM_T_I350:
   4028 	case WM_T_I354:
   4029 	case WM_T_ICH8:
   4030 	case WM_T_ICH9:
   4031 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4032 			/* Not found */
   4033 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4034 			if (sc->sc_type == WM_T_82575)
   4035 				wm_reset_init_script_82575(sc);
   4036 		}
   4037 		break;
   4038 	default:
   4039 		break;
   4040 	}
   4041 
   4042 	if ((sc->sc_type == WM_T_82580)
   4043 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4044 		/* clear global device reset status bit */
   4045 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4046 	}
   4047 
   4048 	/* Clear any pending interrupt events. */
   4049 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4050 	reg = CSR_READ(sc, WMREG_ICR);
   4051 	if (sc->sc_nintrs > 1) {
   4052 		if (sc->sc_type != WM_T_82574) {
   4053 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4054 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4055 		} else
   4056 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4057 	}
   4058 
   4059 	/* reload sc_ctrl */
   4060 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4061 
   4062 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4063 		wm_set_eee_i350(sc);
   4064 
   4065 	/* dummy read from WUC */
   4066 	if (sc->sc_type == WM_T_PCH)
   4067 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4068 	/*
   4069 	 * For PCH, this write will make sure that any noise will be detected
   4070 	 * as a CRC error and be dropped rather than show up as a bad packet
   4071 	 * to the DMA engine
   4072 	 */
   4073 	if (sc->sc_type == WM_T_PCH)
   4074 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4075 
   4076 	if (sc->sc_type >= WM_T_82544)
   4077 		CSR_WRITE(sc, WMREG_WUC, 0);
   4078 
   4079 	wm_reset_mdicnfg_82580(sc);
   4080 
   4081 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4082 		wm_pll_workaround_i210(sc);
   4083 }
   4084 
   4085 /*
   4086  * wm_add_rxbuf:
   4087  *
   4088  *	Add a receive buffer to the indiciated descriptor.
   4089  */
   4090 static int
   4091 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4092 {
   4093 	struct wm_softc *sc = rxq->rxq_sc;
   4094 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4095 	struct mbuf *m;
   4096 	int error;
   4097 
   4098 	KASSERT(mutex_owned(rxq->rxq_lock));
   4099 
   4100 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4101 	if (m == NULL)
   4102 		return ENOBUFS;
   4103 
   4104 	MCLGET(m, M_DONTWAIT);
   4105 	if ((m->m_flags & M_EXT) == 0) {
   4106 		m_freem(m);
   4107 		return ENOBUFS;
   4108 	}
   4109 
   4110 	if (rxs->rxs_mbuf != NULL)
   4111 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4112 
   4113 	rxs->rxs_mbuf = m;
   4114 
   4115 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4116 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4117 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4118 	if (error) {
   4119 		/* XXX XXX XXX */
   4120 		aprint_error_dev(sc->sc_dev,
   4121 		    "unable to load rx DMA map %d, error = %d\n",
   4122 		    idx, error);
   4123 		panic("wm_add_rxbuf");
   4124 	}
   4125 
   4126 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4127 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4128 
   4129 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4130 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4131 			wm_init_rxdesc(rxq, idx);
   4132 	} else
   4133 		wm_init_rxdesc(rxq, idx);
   4134 
   4135 	return 0;
   4136 }
   4137 
   4138 /*
   4139  * wm_rxdrain:
   4140  *
   4141  *	Drain the receive queue.
   4142  */
   4143 static void
   4144 wm_rxdrain(struct wm_rxqueue *rxq)
   4145 {
   4146 	struct wm_softc *sc = rxq->rxq_sc;
   4147 	struct wm_rxsoft *rxs;
   4148 	int i;
   4149 
   4150 	KASSERT(mutex_owned(rxq->rxq_lock));
   4151 
   4152 	for (i = 0; i < WM_NRXDESC; i++) {
   4153 		rxs = &rxq->rxq_soft[i];
   4154 		if (rxs->rxs_mbuf != NULL) {
   4155 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4156 			m_freem(rxs->rxs_mbuf);
   4157 			rxs->rxs_mbuf = NULL;
   4158 		}
   4159 	}
   4160 }
   4161 
   4162 
   4163 /*
   4164  * XXX copy from FreeBSD's sys/net/rss_config.c
   4165  */
   4166 /*
   4167  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4168  * effectiveness may be limited by algorithm choice and available entropy
   4169  * during the boot.
   4170  *
   4171  * XXXRW: And that we don't randomize it yet!
   4172  *
   4173  * This is the default Microsoft RSS specification key which is also
   4174  * the Chelsio T5 firmware default key.
   4175  */
   4176 #define RSS_KEYSIZE 40
   4177 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4178 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4179 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4180 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4181 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4182 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4183 };
   4184 
   4185 /*
   4186  * Caller must pass an array of size sizeof(rss_key).
   4187  *
   4188  * XXX
   4189  * As if_ixgbe may use this function, this function should not be
   4190  * if_wm specific function.
   4191  */
   4192 static void
   4193 wm_rss_getkey(uint8_t *key)
   4194 {
   4195 
   4196 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4197 }
   4198 
   4199 /*
   4200  * Setup registers for RSS.
   4201  *
   4202  * XXX not yet VMDq support
   4203  */
   4204 static void
   4205 wm_init_rss(struct wm_softc *sc)
   4206 {
   4207 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4208 	int i;
   4209 
   4210 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4211 
   4212 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4213 		int qid, reta_ent;
   4214 
   4215 		qid  = i % sc->sc_nqueues;
   4216 		switch(sc->sc_type) {
   4217 		case WM_T_82574:
   4218 			reta_ent = __SHIFTIN(qid,
   4219 			    RETA_ENT_QINDEX_MASK_82574);
   4220 			break;
   4221 		case WM_T_82575:
   4222 			reta_ent = __SHIFTIN(qid,
   4223 			    RETA_ENT_QINDEX1_MASK_82575);
   4224 			break;
   4225 		default:
   4226 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4227 			break;
   4228 		}
   4229 
   4230 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4231 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4232 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4233 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4234 	}
   4235 
   4236 	wm_rss_getkey((uint8_t *)rss_key);
   4237 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4238 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4239 
   4240 	if (sc->sc_type == WM_T_82574)
   4241 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4242 	else
   4243 		mrqc = MRQC_ENABLE_RSS_MQ;
   4244 
   4245 	/* XXXX
   4246 	 * The same as FreeBSD igb.
   4247 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4248 	 */
   4249 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4250 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4251 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4252 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4253 
   4254 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4255 }
   4256 
   4257 /*
   4258  * Adjust TX and RX queue numbers which the system actulally uses.
   4259  *
   4260  * The numbers are affected by below parameters.
   4261  *     - The nubmer of hardware queues
   4262  *     - The number of MSI-X vectors (= "nvectors" argument)
   4263  *     - ncpu
   4264  */
   4265 static void
   4266 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4267 {
   4268 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4269 
   4270 	if (nvectors < 2) {
   4271 		sc->sc_nqueues = 1;
   4272 		return;
   4273 	}
   4274 
   4275 	switch(sc->sc_type) {
   4276 	case WM_T_82572:
   4277 		hw_ntxqueues = 2;
   4278 		hw_nrxqueues = 2;
   4279 		break;
   4280 	case WM_T_82574:
   4281 		hw_ntxqueues = 2;
   4282 		hw_nrxqueues = 2;
   4283 		break;
   4284 	case WM_T_82575:
   4285 		hw_ntxqueues = 4;
   4286 		hw_nrxqueues = 4;
   4287 		break;
   4288 	case WM_T_82576:
   4289 		hw_ntxqueues = 16;
   4290 		hw_nrxqueues = 16;
   4291 		break;
   4292 	case WM_T_82580:
   4293 	case WM_T_I350:
   4294 	case WM_T_I354:
   4295 		hw_ntxqueues = 8;
   4296 		hw_nrxqueues = 8;
   4297 		break;
   4298 	case WM_T_I210:
   4299 		hw_ntxqueues = 4;
   4300 		hw_nrxqueues = 4;
   4301 		break;
   4302 	case WM_T_I211:
   4303 		hw_ntxqueues = 2;
   4304 		hw_nrxqueues = 2;
   4305 		break;
   4306 		/*
   4307 		 * As below ethernet controllers does not support MSI-X,
   4308 		 * this driver let them not use multiqueue.
   4309 		 *     - WM_T_80003
   4310 		 *     - WM_T_ICH8
   4311 		 *     - WM_T_ICH9
   4312 		 *     - WM_T_ICH10
   4313 		 *     - WM_T_PCH
   4314 		 *     - WM_T_PCH2
   4315 		 *     - WM_T_PCH_LPT
   4316 		 */
   4317 	default:
   4318 		hw_ntxqueues = 1;
   4319 		hw_nrxqueues = 1;
   4320 		break;
   4321 	}
   4322 
   4323 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4324 
   4325 	/*
   4326 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4327 	 * the number of queues used actually.
   4328 	 */
   4329 	if (nvectors < hw_nqueues + 1) {
   4330 		sc->sc_nqueues = nvectors - 1;
   4331 	} else {
   4332 		sc->sc_nqueues = hw_nqueues;
   4333 	}
   4334 
   4335 	/*
   4336 	 * As queues more then cpus cannot improve scaling, we limit
   4337 	 * the number of queues used actually.
   4338 	 */
   4339 	if (ncpu < sc->sc_nqueues)
   4340 		sc->sc_nqueues = ncpu;
   4341 }
   4342 
   4343 /*
   4344  * Both single interrupt MSI and INTx can use this function.
   4345  */
   4346 static int
   4347 wm_setup_legacy(struct wm_softc *sc)
   4348 {
   4349 	pci_chipset_tag_t pc = sc->sc_pc;
   4350 	const char *intrstr = NULL;
   4351 	char intrbuf[PCI_INTRSTR_LEN];
   4352 	int error;
   4353 
   4354 	error = wm_alloc_txrx_queues(sc);
   4355 	if (error) {
   4356 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4357 		    error);
   4358 		return ENOMEM;
   4359 	}
   4360 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4361 	    sizeof(intrbuf));
   4362 #ifdef WM_MPSAFE
   4363 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4364 #endif
   4365 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4366 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4367 	if (sc->sc_ihs[0] == NULL) {
   4368 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4369 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4370 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4371 		return ENOMEM;
   4372 	}
   4373 
   4374 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4375 	sc->sc_nintrs = 1;
   4376 	return 0;
   4377 }
   4378 
   4379 static int
   4380 wm_setup_msix(struct wm_softc *sc)
   4381 {
   4382 	void *vih;
   4383 	kcpuset_t *affinity;
   4384 	int qidx, error, intr_idx, txrx_established;
   4385 	pci_chipset_tag_t pc = sc->sc_pc;
   4386 	const char *intrstr = NULL;
   4387 	char intrbuf[PCI_INTRSTR_LEN];
   4388 	char intr_xname[INTRDEVNAMEBUF];
   4389 
   4390 	if (sc->sc_nqueues < ncpu) {
   4391 		/*
   4392 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4393 		 * interrupts start from CPU#1.
   4394 		 */
   4395 		sc->sc_affinity_offset = 1;
   4396 	} else {
   4397 		/*
   4398 		 * In this case, this device use all CPUs. So, we unify
   4399 		 * affinitied cpu_index to msix vector number for readability.
   4400 		 */
   4401 		sc->sc_affinity_offset = 0;
   4402 	}
   4403 
   4404 	error = wm_alloc_txrx_queues(sc);
   4405 	if (error) {
   4406 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4407 		    error);
   4408 		return ENOMEM;
   4409 	}
   4410 
   4411 	kcpuset_create(&affinity, false);
   4412 	intr_idx = 0;
   4413 
   4414 	/*
   4415 	 * TX and RX
   4416 	 */
   4417 	txrx_established = 0;
   4418 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4419 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4420 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4421 
   4422 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4423 		    sizeof(intrbuf));
   4424 #ifdef WM_MPSAFE
   4425 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4426 		    PCI_INTR_MPSAFE, true);
   4427 #endif
   4428 		memset(intr_xname, 0, sizeof(intr_xname));
   4429 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4430 		    device_xname(sc->sc_dev), qidx);
   4431 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4432 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4433 		if (vih == NULL) {
   4434 			aprint_error_dev(sc->sc_dev,
   4435 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4436 			    intrstr ? " at " : "",
   4437 			    intrstr ? intrstr : "");
   4438 
   4439 			goto fail;
   4440 		}
   4441 		kcpuset_zero(affinity);
   4442 		/* Round-robin affinity */
   4443 		kcpuset_set(affinity, affinity_to);
   4444 		error = interrupt_distribute(vih, affinity, NULL);
   4445 		if (error == 0) {
   4446 			aprint_normal_dev(sc->sc_dev,
   4447 			    "for TX and RX interrupting at %s affinity to %u\n",
   4448 			    intrstr, affinity_to);
   4449 		} else {
   4450 			aprint_normal_dev(sc->sc_dev,
   4451 			    "for TX and RX interrupting at %s\n", intrstr);
   4452 		}
   4453 		sc->sc_ihs[intr_idx] = vih;
   4454 		wmq->wmq_id= qidx;
   4455 		wmq->wmq_intr_idx = intr_idx;
   4456 
   4457 		txrx_established++;
   4458 		intr_idx++;
   4459 	}
   4460 
   4461 	/*
   4462 	 * LINK
   4463 	 */
   4464 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4465 	    sizeof(intrbuf));
   4466 #ifdef WM_MPSAFE
   4467 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4468 #endif
   4469 	memset(intr_xname, 0, sizeof(intr_xname));
   4470 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4471 	    device_xname(sc->sc_dev));
   4472 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4473 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4474 	if (vih == NULL) {
   4475 		aprint_error_dev(sc->sc_dev,
   4476 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4477 		    intrstr ? " at " : "",
   4478 		    intrstr ? intrstr : "");
   4479 
   4480 		goto fail;
   4481 	}
   4482 	/* keep default affinity to LINK interrupt */
   4483 	aprint_normal_dev(sc->sc_dev,
   4484 	    "for LINK interrupting at %s\n", intrstr);
   4485 	sc->sc_ihs[intr_idx] = vih;
   4486 	sc->sc_link_intr_idx = intr_idx;
   4487 
   4488 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4489 	kcpuset_destroy(affinity);
   4490 	return 0;
   4491 
   4492  fail:
   4493 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4494 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4495 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4496 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4497 	}
   4498 
   4499 	kcpuset_destroy(affinity);
   4500 	return ENOMEM;
   4501 }
   4502 
   4503 /*
   4504  * wm_init:		[ifnet interface function]
   4505  *
   4506  *	Initialize the interface.
   4507  */
   4508 static int
   4509 wm_init(struct ifnet *ifp)
   4510 {
   4511 	struct wm_softc *sc = ifp->if_softc;
   4512 	int ret;
   4513 
   4514 	WM_CORE_LOCK(sc);
   4515 	ret = wm_init_locked(ifp);
   4516 	WM_CORE_UNLOCK(sc);
   4517 
   4518 	return ret;
   4519 }
   4520 
   4521 static int
   4522 wm_init_locked(struct ifnet *ifp)
   4523 {
   4524 	struct wm_softc *sc = ifp->if_softc;
   4525 	int i, j, trynum, error = 0;
   4526 	uint32_t reg;
   4527 
   4528 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4529 		device_xname(sc->sc_dev), __func__));
   4530 	KASSERT(WM_CORE_LOCKED(sc));
   4531 
   4532 	/*
   4533 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4534 	 * There is a small but measurable benefit to avoiding the adjusment
   4535 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4536 	 * on such platforms.  One possibility is that the DMA itself is
   4537 	 * slightly more efficient if the front of the entire packet (instead
   4538 	 * of the front of the headers) is aligned.
   4539 	 *
   4540 	 * Note we must always set align_tweak to 0 if we are using
   4541 	 * jumbo frames.
   4542 	 */
   4543 #ifdef __NO_STRICT_ALIGNMENT
   4544 	sc->sc_align_tweak = 0;
   4545 #else
   4546 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4547 		sc->sc_align_tweak = 0;
   4548 	else
   4549 		sc->sc_align_tweak = 2;
   4550 #endif /* __NO_STRICT_ALIGNMENT */
   4551 
   4552 	/* Cancel any pending I/O. */
   4553 	wm_stop_locked(ifp, 0);
   4554 
   4555 	/* update statistics before reset */
   4556 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4557 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4558 
   4559 	/* Reset the chip to a known state. */
   4560 	wm_reset(sc);
   4561 
   4562 	switch (sc->sc_type) {
   4563 	case WM_T_82571:
   4564 	case WM_T_82572:
   4565 	case WM_T_82573:
   4566 	case WM_T_82574:
   4567 	case WM_T_82583:
   4568 	case WM_T_80003:
   4569 	case WM_T_ICH8:
   4570 	case WM_T_ICH9:
   4571 	case WM_T_ICH10:
   4572 	case WM_T_PCH:
   4573 	case WM_T_PCH2:
   4574 	case WM_T_PCH_LPT:
   4575 	case WM_T_PCH_SPT:
   4576 		/* AMT based hardware can now take control from firmware */
   4577 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4578 			wm_get_hw_control(sc);
   4579 		break;
   4580 	default:
   4581 		break;
   4582 	}
   4583 
   4584 	/* Init hardware bits */
   4585 	wm_initialize_hardware_bits(sc);
   4586 
   4587 	/* Reset the PHY. */
   4588 	if (sc->sc_flags & WM_F_HAS_MII)
   4589 		wm_gmii_reset(sc);
   4590 
   4591 	/* Calculate (E)ITR value */
   4592 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4593 		sc->sc_itr = 450;	/* For EITR */
   4594 	} else if (sc->sc_type >= WM_T_82543) {
   4595 		/*
   4596 		 * Set up the interrupt throttling register (units of 256ns)
   4597 		 * Note that a footnote in Intel's documentation says this
   4598 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4599 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4600 		 * that that is also true for the 1024ns units of the other
   4601 		 * interrupt-related timer registers -- so, really, we ought
   4602 		 * to divide this value by 4 when the link speed is low.
   4603 		 *
   4604 		 * XXX implement this division at link speed change!
   4605 		 */
   4606 
   4607 		/*
   4608 		 * For N interrupts/sec, set this value to:
   4609 		 * 1000000000 / (N * 256).  Note that we set the
   4610 		 * absolute and packet timer values to this value
   4611 		 * divided by 4 to get "simple timer" behavior.
   4612 		 */
   4613 
   4614 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4615 	}
   4616 
   4617 	error = wm_init_txrx_queues(sc);
   4618 	if (error)
   4619 		goto out;
   4620 
   4621 	/*
   4622 	 * Clear out the VLAN table -- we don't use it (yet).
   4623 	 */
   4624 	CSR_WRITE(sc, WMREG_VET, 0);
   4625 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4626 		trynum = 10; /* Due to hw errata */
   4627 	else
   4628 		trynum = 1;
   4629 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4630 		for (j = 0; j < trynum; j++)
   4631 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4632 
   4633 	/*
   4634 	 * Set up flow-control parameters.
   4635 	 *
   4636 	 * XXX Values could probably stand some tuning.
   4637 	 */
   4638 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4639 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4640 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4641 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4642 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4643 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4644 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4645 	}
   4646 
   4647 	sc->sc_fcrtl = FCRTL_DFLT;
   4648 	if (sc->sc_type < WM_T_82543) {
   4649 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4650 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4651 	} else {
   4652 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4653 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4654 	}
   4655 
   4656 	if (sc->sc_type == WM_T_80003)
   4657 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4658 	else
   4659 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4660 
   4661 	/* Writes the control register. */
   4662 	wm_set_vlan(sc);
   4663 
   4664 	if (sc->sc_flags & WM_F_HAS_MII) {
   4665 		int val;
   4666 
   4667 		switch (sc->sc_type) {
   4668 		case WM_T_80003:
   4669 		case WM_T_ICH8:
   4670 		case WM_T_ICH9:
   4671 		case WM_T_ICH10:
   4672 		case WM_T_PCH:
   4673 		case WM_T_PCH2:
   4674 		case WM_T_PCH_LPT:
   4675 		case WM_T_PCH_SPT:
   4676 			/*
   4677 			 * Set the mac to wait the maximum time between each
   4678 			 * iteration and increase the max iterations when
   4679 			 * polling the phy; this fixes erroneous timeouts at
   4680 			 * 10Mbps.
   4681 			 */
   4682 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4683 			    0xFFFF);
   4684 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4685 			val |= 0x3F;
   4686 			wm_kmrn_writereg(sc,
   4687 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4688 			break;
   4689 		default:
   4690 			break;
   4691 		}
   4692 
   4693 		if (sc->sc_type == WM_T_80003) {
   4694 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4695 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4696 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4697 
   4698 			/* Bypass RX and TX FIFO's */
   4699 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4700 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4701 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4702 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4703 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4704 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4705 		}
   4706 	}
   4707 #if 0
   4708 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4709 #endif
   4710 
   4711 	/* Set up checksum offload parameters. */
   4712 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4713 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4714 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4715 		reg |= RXCSUM_IPOFL;
   4716 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4717 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4718 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4719 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4720 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4721 
   4722 	/* Set up MSI-X */
   4723 	if (sc->sc_nintrs > 1) {
   4724 		uint32_t ivar;
   4725 		struct wm_queue *wmq;
   4726 		int qid, qintr_idx;
   4727 
   4728 		if (sc->sc_type == WM_T_82575) {
   4729 			/* Interrupt control */
   4730 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4731 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4732 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4733 
   4734 			/* TX and RX */
   4735 			for (i = 0; i < sc->sc_nqueues; i++) {
   4736 				wmq = &sc->sc_queue[i];
   4737 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4738 				    EITR_TX_QUEUE(wmq->wmq_id)
   4739 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4740 			}
   4741 			/* Link status */
   4742 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4743 			    EITR_OTHER);
   4744 		} else if (sc->sc_type == WM_T_82574) {
   4745 			/* Interrupt control */
   4746 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4747 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4748 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4749 
   4750 			ivar = 0;
   4751 			/* TX and RX */
   4752 			for (i = 0; i < sc->sc_nqueues; i++) {
   4753 				wmq = &sc->sc_queue[i];
   4754 				qid = wmq->wmq_id;
   4755 				qintr_idx = wmq->wmq_intr_idx;
   4756 
   4757 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4758 				    IVAR_TX_MASK_Q_82574(qid));
   4759 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4760 				    IVAR_RX_MASK_Q_82574(qid));
   4761 			}
   4762 			/* Link status */
   4763 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4764 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4765 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4766 		} else {
   4767 			/* Interrupt control */
   4768 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4769 			    | GPIE_EIAME | GPIE_PBA);
   4770 
   4771 			switch (sc->sc_type) {
   4772 			case WM_T_82580:
   4773 			case WM_T_I350:
   4774 			case WM_T_I354:
   4775 			case WM_T_I210:
   4776 			case WM_T_I211:
   4777 				/* TX and RX */
   4778 				for (i = 0; i < sc->sc_nqueues; i++) {
   4779 					wmq = &sc->sc_queue[i];
   4780 					qid = wmq->wmq_id;
   4781 					qintr_idx = wmq->wmq_intr_idx;
   4782 
   4783 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4784 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4785 					ivar |= __SHIFTIN((qintr_idx
   4786 						| IVAR_VALID),
   4787 					    IVAR_TX_MASK_Q(qid));
   4788 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4789 					ivar |= __SHIFTIN((qintr_idx
   4790 						| IVAR_VALID),
   4791 					    IVAR_RX_MASK_Q(qid));
   4792 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4793 				}
   4794 				break;
   4795 			case WM_T_82576:
   4796 				/* TX and RX */
   4797 				for (i = 0; i < sc->sc_nqueues; i++) {
   4798 					wmq = &sc->sc_queue[i];
   4799 					qid = wmq->wmq_id;
   4800 					qintr_idx = wmq->wmq_intr_idx;
   4801 
   4802 					ivar = CSR_READ(sc,
   4803 					    WMREG_IVAR_Q_82576(qid));
   4804 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4805 					ivar |= __SHIFTIN((qintr_idx
   4806 						| IVAR_VALID),
   4807 					    IVAR_TX_MASK_Q_82576(qid));
   4808 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4809 					ivar |= __SHIFTIN((qintr_idx
   4810 						| IVAR_VALID),
   4811 					    IVAR_RX_MASK_Q_82576(qid));
   4812 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4813 					    ivar);
   4814 				}
   4815 				break;
   4816 			default:
   4817 				break;
   4818 			}
   4819 
   4820 			/* Link status */
   4821 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4822 			    IVAR_MISC_OTHER);
   4823 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4824 		}
   4825 
   4826 		if (sc->sc_nqueues > 1) {
   4827 			wm_init_rss(sc);
   4828 
   4829 			/*
   4830 			** NOTE: Receive Full-Packet Checksum Offload
   4831 			** is mutually exclusive with Multiqueue. However
   4832 			** this is not the same as TCP/IP checksums which
   4833 			** still work.
   4834 			*/
   4835 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4836 			reg |= RXCSUM_PCSD;
   4837 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4838 		}
   4839 	}
   4840 
   4841 	/* Set up the interrupt registers. */
   4842 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4843 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4844 	    ICR_RXO | ICR_RXT0;
   4845 	if (sc->sc_nintrs > 1) {
   4846 		uint32_t mask;
   4847 		struct wm_queue *wmq;
   4848 
   4849 		switch (sc->sc_type) {
   4850 		case WM_T_82574:
   4851 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4852 			    WMREG_EIAC_82574_MSIX_MASK);
   4853 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4854 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4855 			break;
   4856 		default:
   4857 			if (sc->sc_type == WM_T_82575) {
   4858 				mask = 0;
   4859 				for (i = 0; i < sc->sc_nqueues; i++) {
   4860 					wmq = &sc->sc_queue[i];
   4861 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   4862 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   4863 				}
   4864 				mask |= EITR_OTHER;
   4865 			} else {
   4866 				mask = 0;
   4867 				for (i = 0; i < sc->sc_nqueues; i++) {
   4868 					wmq = &sc->sc_queue[i];
   4869 					mask |= 1 << wmq->wmq_intr_idx;
   4870 				}
   4871 				mask |= 1 << sc->sc_link_intr_idx;
   4872 			}
   4873 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4874 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4875 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4876 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4877 			break;
   4878 		}
   4879 	} else
   4880 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4881 
   4882 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4883 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4884 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4885 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4886 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4887 		reg |= KABGTXD_BGSQLBIAS;
   4888 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4889 	}
   4890 
   4891 	/* Set up the inter-packet gap. */
   4892 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4893 
   4894 	if (sc->sc_type >= WM_T_82543) {
   4895 		/*
   4896 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4897 		 * the multi queue function with MSI-X.
   4898 		 */
   4899 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4900 			int qidx;
   4901 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4902 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   4903 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   4904 				    sc->sc_itr);
   4905 			}
   4906 			/*
   4907 			 * Link interrupts occur much less than TX
   4908 			 * interrupts and RX interrupts. So, we don't
   4909 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4910 			 * FreeBSD's if_igb.
   4911 			 */
   4912 		} else
   4913 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4914 	}
   4915 
   4916 	/* Set the VLAN ethernetype. */
   4917 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4918 
   4919 	/*
   4920 	 * Set up the transmit control register; we start out with
   4921 	 * a collision distance suitable for FDX, but update it whe
   4922 	 * we resolve the media type.
   4923 	 */
   4924 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4925 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4926 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4927 	if (sc->sc_type >= WM_T_82571)
   4928 		sc->sc_tctl |= TCTL_MULR;
   4929 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4930 
   4931 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4932 		/* Write TDT after TCTL.EN is set. See the document. */
   4933 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4934 	}
   4935 
   4936 	if (sc->sc_type == WM_T_80003) {
   4937 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4938 		reg &= ~TCTL_EXT_GCEX_MASK;
   4939 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4940 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4941 	}
   4942 
   4943 	/* Set the media. */
   4944 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4945 		goto out;
   4946 
   4947 	/* Configure for OS presence */
   4948 	wm_init_manageability(sc);
   4949 
   4950 	/*
   4951 	 * Set up the receive control register; we actually program
   4952 	 * the register when we set the receive filter.  Use multicast
   4953 	 * address offset type 0.
   4954 	 *
   4955 	 * Only the i82544 has the ability to strip the incoming
   4956 	 * CRC, so we don't enable that feature.
   4957 	 */
   4958 	sc->sc_mchash_type = 0;
   4959 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4960 	    | RCTL_MO(sc->sc_mchash_type);
   4961 
   4962 	/*
   4963 	 * The I350 has a bug where it always strips the CRC whether
   4964 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4965 	 */
   4966 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4967 	    || (sc->sc_type == WM_T_I210))
   4968 		sc->sc_rctl |= RCTL_SECRC;
   4969 
   4970 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4971 	    && (ifp->if_mtu > ETHERMTU)) {
   4972 		sc->sc_rctl |= RCTL_LPE;
   4973 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4974 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4975 	}
   4976 
   4977 	if (MCLBYTES == 2048) {
   4978 		sc->sc_rctl |= RCTL_2k;
   4979 	} else {
   4980 		if (sc->sc_type >= WM_T_82543) {
   4981 			switch (MCLBYTES) {
   4982 			case 4096:
   4983 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4984 				break;
   4985 			case 8192:
   4986 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4987 				break;
   4988 			case 16384:
   4989 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4990 				break;
   4991 			default:
   4992 				panic("wm_init: MCLBYTES %d unsupported",
   4993 				    MCLBYTES);
   4994 				break;
   4995 			}
   4996 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4997 	}
   4998 
   4999 	/* Set the receive filter. */
   5000 	wm_set_filter(sc);
   5001 
   5002 	/* Enable ECC */
   5003 	switch (sc->sc_type) {
   5004 	case WM_T_82571:
   5005 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5006 		reg |= PBA_ECC_CORR_EN;
   5007 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5008 		break;
   5009 	case WM_T_PCH_LPT:
   5010 	case WM_T_PCH_SPT:
   5011 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5012 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5013 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5014 
   5015 		reg = CSR_READ(sc, WMREG_CTRL);
   5016 		reg |= CTRL_MEHE;
   5017 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5018 		break;
   5019 	default:
   5020 		break;
   5021 	}
   5022 
   5023 	/* On 575 and later set RDT only if RX enabled */
   5024 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5025 		int qidx;
   5026 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5027 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5028 			for (i = 0; i < WM_NRXDESC; i++) {
   5029 				mutex_enter(rxq->rxq_lock);
   5030 				wm_init_rxdesc(rxq, i);
   5031 				mutex_exit(rxq->rxq_lock);
   5032 
   5033 			}
   5034 		}
   5035 	}
   5036 
   5037 	sc->sc_stopping = false;
   5038 
   5039 	/* Start the one second link check clock. */
   5040 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5041 
   5042 	/* ...all done! */
   5043 	ifp->if_flags |= IFF_RUNNING;
   5044 	ifp->if_flags &= ~IFF_OACTIVE;
   5045 
   5046  out:
   5047 	sc->sc_if_flags = ifp->if_flags;
   5048 	if (error)
   5049 		log(LOG_ERR, "%s: interface not running\n",
   5050 		    device_xname(sc->sc_dev));
   5051 	return error;
   5052 }
   5053 
   5054 /*
   5055  * wm_stop:		[ifnet interface function]
   5056  *
   5057  *	Stop transmission on the interface.
   5058  */
   5059 static void
   5060 wm_stop(struct ifnet *ifp, int disable)
   5061 {
   5062 	struct wm_softc *sc = ifp->if_softc;
   5063 
   5064 	WM_CORE_LOCK(sc);
   5065 	wm_stop_locked(ifp, disable);
   5066 	WM_CORE_UNLOCK(sc);
   5067 }
   5068 
   5069 static void
   5070 wm_stop_locked(struct ifnet *ifp, int disable)
   5071 {
   5072 	struct wm_softc *sc = ifp->if_softc;
   5073 	struct wm_txsoft *txs;
   5074 	int i, qidx;
   5075 
   5076 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5077 		device_xname(sc->sc_dev), __func__));
   5078 	KASSERT(WM_CORE_LOCKED(sc));
   5079 
   5080 	sc->sc_stopping = true;
   5081 
   5082 	/* Stop the one second clock. */
   5083 	callout_stop(&sc->sc_tick_ch);
   5084 
   5085 	/* Stop the 82547 Tx FIFO stall check timer. */
   5086 	if (sc->sc_type == WM_T_82547)
   5087 		callout_stop(&sc->sc_txfifo_ch);
   5088 
   5089 	if (sc->sc_flags & WM_F_HAS_MII) {
   5090 		/* Down the MII. */
   5091 		mii_down(&sc->sc_mii);
   5092 	} else {
   5093 #if 0
   5094 		/* Should we clear PHY's status properly? */
   5095 		wm_reset(sc);
   5096 #endif
   5097 	}
   5098 
   5099 	/* Stop the transmit and receive processes. */
   5100 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5101 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5102 	sc->sc_rctl &= ~RCTL_EN;
   5103 
   5104 	/*
   5105 	 * Clear the interrupt mask to ensure the device cannot assert its
   5106 	 * interrupt line.
   5107 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5108 	 * service any currently pending or shared interrupt.
   5109 	 */
   5110 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5111 	sc->sc_icr = 0;
   5112 	if (sc->sc_nintrs > 1) {
   5113 		if (sc->sc_type != WM_T_82574) {
   5114 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5115 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5116 		} else
   5117 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5118 	}
   5119 
   5120 	/* Release any queued transmit buffers. */
   5121 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5122 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5123 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5124 		mutex_enter(txq->txq_lock);
   5125 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5126 			txs = &txq->txq_soft[i];
   5127 			if (txs->txs_mbuf != NULL) {
   5128 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5129 				m_freem(txs->txs_mbuf);
   5130 				txs->txs_mbuf = NULL;
   5131 			}
   5132 		}
   5133 		if (sc->sc_type == WM_T_PCH_SPT) {
   5134 			pcireg_t preg;
   5135 			uint32_t reg;
   5136 			int nexttx;
   5137 
   5138 			/* First, disable MULR fix in FEXTNVM11 */
   5139 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5140 			reg |= FEXTNVM11_DIS_MULRFIX;
   5141 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5142 
   5143 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5144 			    WM_PCI_DESCRING_STATUS);
   5145 			reg = CSR_READ(sc, WMREG_TDLEN(0));
   5146 			printf("XXX RST: FLUSH = %08x, len = %u\n",
   5147 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
   5148 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
   5149 			    && (reg != 0)) {
   5150 				/* TX */
   5151 				printf("XXX need TX flush (reg = %08x)\n",
   5152 				    preg);
   5153 				wm_init_tx_descs(sc, txq);
   5154 				wm_init_tx_regs(sc, wmq, txq);
   5155 				nexttx = txq->txq_next;
   5156 				wm_set_dma_addr(
   5157 					&txq->txq_descs[nexttx].wtx_addr,
   5158 					WM_CDTXADDR(txq, nexttx));
   5159 				txq->txq_descs[nexttx].wtx_cmdlen
   5160 				    = htole32(WTX_CMD_IFCS | 512);
   5161 				wm_cdtxsync(txq, nexttx, 1,
   5162 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5163 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5164 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5165 				CSR_WRITE_FLUSH(sc);
   5166 				delay(250);
   5167 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5168 			}
   5169 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5170 			    WM_PCI_DESCRING_STATUS);
   5171 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5172 				/* RX */
   5173 				printf("XXX need RX flush\n");
   5174 			}
   5175 		}
   5176 		mutex_exit(txq->txq_lock);
   5177 	}
   5178 
   5179 	/* Mark the interface as down and cancel the watchdog timer. */
   5180 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5181 	ifp->if_timer = 0;
   5182 
   5183 	if (disable) {
   5184 		for (i = 0; i < sc->sc_nqueues; i++) {
   5185 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5186 			mutex_enter(rxq->rxq_lock);
   5187 			wm_rxdrain(rxq);
   5188 			mutex_exit(rxq->rxq_lock);
   5189 		}
   5190 	}
   5191 
   5192 #if 0 /* notyet */
   5193 	if (sc->sc_type >= WM_T_82544)
   5194 		CSR_WRITE(sc, WMREG_WUC, 0);
   5195 #endif
   5196 }
   5197 
   5198 static void
   5199 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5200 {
   5201 	struct mbuf *m;
   5202 	int i;
   5203 
   5204 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5205 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5206 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5207 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5208 		    m->m_data, m->m_len, m->m_flags);
   5209 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5210 	    i, i == 1 ? "" : "s");
   5211 }
   5212 
   5213 /*
   5214  * wm_82547_txfifo_stall:
   5215  *
   5216  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5217  *	reset the FIFO pointers, and restart packet transmission.
   5218  */
   5219 static void
   5220 wm_82547_txfifo_stall(void *arg)
   5221 {
   5222 	struct wm_softc *sc = arg;
   5223 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5224 
   5225 	mutex_enter(txq->txq_lock);
   5226 
   5227 	if (sc->sc_stopping)
   5228 		goto out;
   5229 
   5230 	if (txq->txq_fifo_stall) {
   5231 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5232 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5233 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5234 			/*
   5235 			 * Packets have drained.  Stop transmitter, reset
   5236 			 * FIFO pointers, restart transmitter, and kick
   5237 			 * the packet queue.
   5238 			 */
   5239 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5240 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5241 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5242 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5243 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5244 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5245 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5246 			CSR_WRITE_FLUSH(sc);
   5247 
   5248 			txq->txq_fifo_head = 0;
   5249 			txq->txq_fifo_stall = 0;
   5250 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5251 		} else {
   5252 			/*
   5253 			 * Still waiting for packets to drain; try again in
   5254 			 * another tick.
   5255 			 */
   5256 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5257 		}
   5258 	}
   5259 
   5260 out:
   5261 	mutex_exit(txq->txq_lock);
   5262 }
   5263 
   5264 /*
   5265  * wm_82547_txfifo_bugchk:
   5266  *
   5267  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5268  *	prevent enqueueing a packet that would wrap around the end
   5269  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5270  *
   5271  *	We do this by checking the amount of space before the end
   5272  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5273  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5274  *	the internal FIFO pointers to the beginning, and restart
   5275  *	transmission on the interface.
   5276  */
   5277 #define	WM_FIFO_HDR		0x10
   5278 #define	WM_82547_PAD_LEN	0x3e0
   5279 static int
   5280 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5281 {
   5282 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5283 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5284 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5285 
   5286 	/* Just return if already stalled. */
   5287 	if (txq->txq_fifo_stall)
   5288 		return 1;
   5289 
   5290 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5291 		/* Stall only occurs in half-duplex mode. */
   5292 		goto send_packet;
   5293 	}
   5294 
   5295 	if (len >= WM_82547_PAD_LEN + space) {
   5296 		txq->txq_fifo_stall = 1;
   5297 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5298 		return 1;
   5299 	}
   5300 
   5301  send_packet:
   5302 	txq->txq_fifo_head += len;
   5303 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5304 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5305 
   5306 	return 0;
   5307 }
   5308 
   5309 static int
   5310 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5311 {
   5312 	int error;
   5313 
   5314 	/*
   5315 	 * Allocate the control data structures, and create and load the
   5316 	 * DMA map for it.
   5317 	 *
   5318 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5319 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5320 	 * both sets within the same 4G segment.
   5321 	 */
   5322 	if (sc->sc_type < WM_T_82544)
   5323 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5324 	else
   5325 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5326 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5327 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5328 	else
   5329 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5330 
   5331 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5332 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5333 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5334 		aprint_error_dev(sc->sc_dev,
   5335 		    "unable to allocate TX control data, error = %d\n",
   5336 		    error);
   5337 		goto fail_0;
   5338 	}
   5339 
   5340 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5341 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5342 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5343 		aprint_error_dev(sc->sc_dev,
   5344 		    "unable to map TX control data, error = %d\n", error);
   5345 		goto fail_1;
   5346 	}
   5347 
   5348 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5349 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5350 		aprint_error_dev(sc->sc_dev,
   5351 		    "unable to create TX control data DMA map, error = %d\n",
   5352 		    error);
   5353 		goto fail_2;
   5354 	}
   5355 
   5356 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5357 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5358 		aprint_error_dev(sc->sc_dev,
   5359 		    "unable to load TX control data DMA map, error = %d\n",
   5360 		    error);
   5361 		goto fail_3;
   5362 	}
   5363 
   5364 	return 0;
   5365 
   5366  fail_3:
   5367 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5368  fail_2:
   5369 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5370 	    WM_TXDESCS_SIZE(txq));
   5371  fail_1:
   5372 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5373  fail_0:
   5374 	return error;
   5375 }
   5376 
   5377 static void
   5378 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5379 {
   5380 
   5381 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5382 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5383 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5384 	    WM_TXDESCS_SIZE(txq));
   5385 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5386 }
   5387 
   5388 static int
   5389 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5390 {
   5391 	int error;
   5392 
   5393 	/*
   5394 	 * Allocate the control data structures, and create and load the
   5395 	 * DMA map for it.
   5396 	 *
   5397 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5398 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5399 	 * both sets within the same 4G segment.
   5400 	 */
   5401 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5402 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5403 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5404 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5405 		aprint_error_dev(sc->sc_dev,
   5406 		    "unable to allocate RX control data, error = %d\n",
   5407 		    error);
   5408 		goto fail_0;
   5409 	}
   5410 
   5411 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5412 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5413 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5414 		aprint_error_dev(sc->sc_dev,
   5415 		    "unable to map RX control data, error = %d\n", error);
   5416 		goto fail_1;
   5417 	}
   5418 
   5419 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5420 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5421 		aprint_error_dev(sc->sc_dev,
   5422 		    "unable to create RX control data DMA map, error = %d\n",
   5423 		    error);
   5424 		goto fail_2;
   5425 	}
   5426 
   5427 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5428 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5429 		aprint_error_dev(sc->sc_dev,
   5430 		    "unable to load RX control data DMA map, error = %d\n",
   5431 		    error);
   5432 		goto fail_3;
   5433 	}
   5434 
   5435 	return 0;
   5436 
   5437  fail_3:
   5438 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5439  fail_2:
   5440 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5441 	    rxq->rxq_desc_size);
   5442  fail_1:
   5443 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5444  fail_0:
   5445 	return error;
   5446 }
   5447 
   5448 static void
   5449 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5450 {
   5451 
   5452 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5453 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5454 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5455 	    rxq->rxq_desc_size);
   5456 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5457 }
   5458 
   5459 
   5460 static int
   5461 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5462 {
   5463 	int i, error;
   5464 
   5465 	/* Create the transmit buffer DMA maps. */
   5466 	WM_TXQUEUELEN(txq) =
   5467 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5468 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5469 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5470 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5471 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5472 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5473 			aprint_error_dev(sc->sc_dev,
   5474 			    "unable to create Tx DMA map %d, error = %d\n",
   5475 			    i, error);
   5476 			goto fail;
   5477 		}
   5478 	}
   5479 
   5480 	return 0;
   5481 
   5482  fail:
   5483 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5484 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5485 			bus_dmamap_destroy(sc->sc_dmat,
   5486 			    txq->txq_soft[i].txs_dmamap);
   5487 	}
   5488 	return error;
   5489 }
   5490 
   5491 static void
   5492 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5493 {
   5494 	int i;
   5495 
   5496 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5497 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5498 			bus_dmamap_destroy(sc->sc_dmat,
   5499 			    txq->txq_soft[i].txs_dmamap);
   5500 	}
   5501 }
   5502 
   5503 static int
   5504 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5505 {
   5506 	int i, error;
   5507 
   5508 	/* Create the receive buffer DMA maps. */
   5509 	for (i = 0; i < WM_NRXDESC; i++) {
   5510 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5511 			    MCLBYTES, 0, 0,
   5512 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5513 			aprint_error_dev(sc->sc_dev,
   5514 			    "unable to create Rx DMA map %d error = %d\n",
   5515 			    i, error);
   5516 			goto fail;
   5517 		}
   5518 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5519 	}
   5520 
   5521 	return 0;
   5522 
   5523  fail:
   5524 	for (i = 0; i < WM_NRXDESC; i++) {
   5525 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5526 			bus_dmamap_destroy(sc->sc_dmat,
   5527 			    rxq->rxq_soft[i].rxs_dmamap);
   5528 	}
   5529 	return error;
   5530 }
   5531 
   5532 static void
   5533 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5534 {
   5535 	int i;
   5536 
   5537 	for (i = 0; i < WM_NRXDESC; i++) {
   5538 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5539 			bus_dmamap_destroy(sc->sc_dmat,
   5540 			    rxq->rxq_soft[i].rxs_dmamap);
   5541 	}
   5542 }
   5543 
   5544 /*
   5545  * wm_alloc_quques:
   5546  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5547  */
   5548 static int
   5549 wm_alloc_txrx_queues(struct wm_softc *sc)
   5550 {
   5551 	int i, error, tx_done, rx_done;
   5552 
   5553 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5554 	    KM_SLEEP);
   5555 	if (sc->sc_queue == NULL) {
   5556 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5557 		error = ENOMEM;
   5558 		goto fail_0;
   5559 	}
   5560 
   5561 	/*
   5562 	 * For transmission
   5563 	 */
   5564 	error = 0;
   5565 	tx_done = 0;
   5566 	for (i = 0; i < sc->sc_nqueues; i++) {
   5567 #ifdef WM_EVENT_COUNTERS
   5568 		int j;
   5569 		const char *xname;
   5570 #endif
   5571 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5572 		txq->txq_sc = sc;
   5573 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5574 
   5575 		error = wm_alloc_tx_descs(sc, txq);
   5576 		if (error)
   5577 			break;
   5578 		error = wm_alloc_tx_buffer(sc, txq);
   5579 		if (error) {
   5580 			wm_free_tx_descs(sc, txq);
   5581 			break;
   5582 		}
   5583 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5584 		if (txq->txq_interq == NULL) {
   5585 			wm_free_tx_descs(sc, txq);
   5586 			wm_free_tx_buffer(sc, txq);
   5587 			error = ENOMEM;
   5588 			break;
   5589 		}
   5590 
   5591 #ifdef WM_EVENT_COUNTERS
   5592 		xname = device_xname(sc->sc_dev);
   5593 
   5594 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5595 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5596 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5597 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5598 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5599 
   5600 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5601 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5602 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5603 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5604 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5605 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5606 
   5607 		for (j = 0; j < WM_NTXSEGS; j++) {
   5608 			snprintf(txq->txq_txseg_evcnt_names[j],
   5609 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5610 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5611 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5612 		}
   5613 
   5614 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5615 
   5616 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5617 #endif /* WM_EVENT_COUNTERS */
   5618 
   5619 		tx_done++;
   5620 	}
   5621 	if (error)
   5622 		goto fail_1;
   5623 
   5624 	/*
   5625 	 * For recieve
   5626 	 */
   5627 	error = 0;
   5628 	rx_done = 0;
   5629 	for (i = 0; i < sc->sc_nqueues; i++) {
   5630 #ifdef WM_EVENT_COUNTERS
   5631 		const char *xname;
   5632 #endif
   5633 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5634 		rxq->rxq_sc = sc;
   5635 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5636 
   5637 		error = wm_alloc_rx_descs(sc, rxq);
   5638 		if (error)
   5639 			break;
   5640 
   5641 		error = wm_alloc_rx_buffer(sc, rxq);
   5642 		if (error) {
   5643 			wm_free_rx_descs(sc, rxq);
   5644 			break;
   5645 		}
   5646 
   5647 #ifdef WM_EVENT_COUNTERS
   5648 		xname = device_xname(sc->sc_dev);
   5649 
   5650 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5651 
   5652 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5653 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5654 #endif /* WM_EVENT_COUNTERS */
   5655 
   5656 		rx_done++;
   5657 	}
   5658 	if (error)
   5659 		goto fail_2;
   5660 
   5661 	return 0;
   5662 
   5663  fail_2:
   5664 	for (i = 0; i < rx_done; i++) {
   5665 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5666 		wm_free_rx_buffer(sc, rxq);
   5667 		wm_free_rx_descs(sc, rxq);
   5668 		if (rxq->rxq_lock)
   5669 			mutex_obj_free(rxq->rxq_lock);
   5670 	}
   5671  fail_1:
   5672 	for (i = 0; i < tx_done; i++) {
   5673 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5674 		pcq_destroy(txq->txq_interq);
   5675 		wm_free_tx_buffer(sc, txq);
   5676 		wm_free_tx_descs(sc, txq);
   5677 		if (txq->txq_lock)
   5678 			mutex_obj_free(txq->txq_lock);
   5679 	}
   5680 
   5681 	kmem_free(sc->sc_queue,
   5682 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5683  fail_0:
   5684 	return error;
   5685 }
   5686 
   5687 /*
   5688  * wm_free_quques:
   5689  *	Free {tx,rx}descs and {tx,rx} buffers
   5690  */
   5691 static void
   5692 wm_free_txrx_queues(struct wm_softc *sc)
   5693 {
   5694 	int i;
   5695 
   5696 	for (i = 0; i < sc->sc_nqueues; i++) {
   5697 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5698 		wm_free_rx_buffer(sc, rxq);
   5699 		wm_free_rx_descs(sc, rxq);
   5700 		if (rxq->rxq_lock)
   5701 			mutex_obj_free(rxq->rxq_lock);
   5702 	}
   5703 
   5704 	for (i = 0; i < sc->sc_nqueues; i++) {
   5705 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5706 		wm_free_tx_buffer(sc, txq);
   5707 		wm_free_tx_descs(sc, txq);
   5708 		if (txq->txq_lock)
   5709 			mutex_obj_free(txq->txq_lock);
   5710 	}
   5711 
   5712 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5713 }
   5714 
   5715 static void
   5716 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5717 {
   5718 
   5719 	KASSERT(mutex_owned(txq->txq_lock));
   5720 
   5721 	/* Initialize the transmit descriptor ring. */
   5722 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5723 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5724 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5725 	txq->txq_free = WM_NTXDESC(txq);
   5726 	txq->txq_next = 0;
   5727 }
   5728 
   5729 static void
   5730 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5731     struct wm_txqueue *txq)
   5732 {
   5733 
   5734 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5735 		device_xname(sc->sc_dev), __func__));
   5736 	KASSERT(mutex_owned(txq->txq_lock));
   5737 
   5738 	if (sc->sc_type < WM_T_82543) {
   5739 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5740 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5741 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5742 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5743 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5744 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5745 	} else {
   5746 		int qid = wmq->wmq_id;
   5747 
   5748 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5749 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5750 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5751 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5752 
   5753 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5754 			/*
   5755 			 * Don't write TDT before TCTL.EN is set.
   5756 			 * See the document.
   5757 			 */
   5758 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5759 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5760 			    | TXDCTL_WTHRESH(0));
   5761 		else {
   5762 			/* ITR / 4 */
   5763 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5764 			if (sc->sc_type >= WM_T_82540) {
   5765 				/* should be same */
   5766 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5767 			}
   5768 
   5769 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5770 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5771 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5772 		}
   5773 	}
   5774 }
   5775 
   5776 static void
   5777 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5778 {
   5779 	int i;
   5780 
   5781 	KASSERT(mutex_owned(txq->txq_lock));
   5782 
   5783 	/* Initialize the transmit job descriptors. */
   5784 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5785 		txq->txq_soft[i].txs_mbuf = NULL;
   5786 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5787 	txq->txq_snext = 0;
   5788 	txq->txq_sdirty = 0;
   5789 }
   5790 
   5791 static void
   5792 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5793     struct wm_txqueue *txq)
   5794 {
   5795 
   5796 	KASSERT(mutex_owned(txq->txq_lock));
   5797 
   5798 	/*
   5799 	 * Set up some register offsets that are different between
   5800 	 * the i82542 and the i82543 and later chips.
   5801 	 */
   5802 	if (sc->sc_type < WM_T_82543)
   5803 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5804 	else
   5805 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5806 
   5807 	wm_init_tx_descs(sc, txq);
   5808 	wm_init_tx_regs(sc, wmq, txq);
   5809 	wm_init_tx_buffer(sc, txq);
   5810 }
   5811 
   5812 static void
   5813 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5814     struct wm_rxqueue *rxq)
   5815 {
   5816 
   5817 	KASSERT(mutex_owned(rxq->rxq_lock));
   5818 
   5819 	/*
   5820 	 * Initialize the receive descriptor and receive job
   5821 	 * descriptor rings.
   5822 	 */
   5823 	if (sc->sc_type < WM_T_82543) {
   5824 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5825 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5826 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5827 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5828 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5829 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5830 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5831 
   5832 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5833 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5834 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5835 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5836 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5837 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5838 	} else {
   5839 		int qid = wmq->wmq_id;
   5840 
   5841 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5842 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5843 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5844 
   5845 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5846 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5847 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5848 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5849 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5850 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5851 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5852 			    | RXDCTL_WTHRESH(1));
   5853 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5854 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5855 		} else {
   5856 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5857 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5858 			/* ITR / 4 */
   5859 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5860 			/* MUST be same */
   5861 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5862 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5863 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5864 		}
   5865 	}
   5866 }
   5867 
   5868 static int
   5869 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5870 {
   5871 	struct wm_rxsoft *rxs;
   5872 	int error, i;
   5873 
   5874 	KASSERT(mutex_owned(rxq->rxq_lock));
   5875 
   5876 	for (i = 0; i < WM_NRXDESC; i++) {
   5877 		rxs = &rxq->rxq_soft[i];
   5878 		if (rxs->rxs_mbuf == NULL) {
   5879 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5880 				log(LOG_ERR, "%s: unable to allocate or map "
   5881 				    "rx buffer %d, error = %d\n",
   5882 				    device_xname(sc->sc_dev), i, error);
   5883 				/*
   5884 				 * XXX Should attempt to run with fewer receive
   5885 				 * XXX buffers instead of just failing.
   5886 				 */
   5887 				wm_rxdrain(rxq);
   5888 				return ENOMEM;
   5889 			}
   5890 		} else {
   5891 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5892 				wm_init_rxdesc(rxq, i);
   5893 			/*
   5894 			 * For 82575 and newer device, the RX descriptors
   5895 			 * must be initialized after the setting of RCTL.EN in
   5896 			 * wm_set_filter()
   5897 			 */
   5898 		}
   5899 	}
   5900 	rxq->rxq_ptr = 0;
   5901 	rxq->rxq_discard = 0;
   5902 	WM_RXCHAIN_RESET(rxq);
   5903 
   5904 	return 0;
   5905 }
   5906 
   5907 static int
   5908 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5909     struct wm_rxqueue *rxq)
   5910 {
   5911 
   5912 	KASSERT(mutex_owned(rxq->rxq_lock));
   5913 
   5914 	/*
   5915 	 * Set up some register offsets that are different between
   5916 	 * the i82542 and the i82543 and later chips.
   5917 	 */
   5918 	if (sc->sc_type < WM_T_82543)
   5919 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5920 	else
   5921 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   5922 
   5923 	wm_init_rx_regs(sc, wmq, rxq);
   5924 	return wm_init_rx_buffer(sc, rxq);
   5925 }
   5926 
   5927 /*
   5928  * wm_init_quques:
   5929  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5930  */
   5931 static int
   5932 wm_init_txrx_queues(struct wm_softc *sc)
   5933 {
   5934 	int i, error = 0;
   5935 
   5936 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5937 		device_xname(sc->sc_dev), __func__));
   5938 
   5939 	for (i = 0; i < sc->sc_nqueues; i++) {
   5940 		struct wm_queue *wmq = &sc->sc_queue[i];
   5941 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5942 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5943 
   5944 		mutex_enter(txq->txq_lock);
   5945 		wm_init_tx_queue(sc, wmq, txq);
   5946 		mutex_exit(txq->txq_lock);
   5947 
   5948 		mutex_enter(rxq->rxq_lock);
   5949 		error = wm_init_rx_queue(sc, wmq, rxq);
   5950 		mutex_exit(rxq->rxq_lock);
   5951 		if (error)
   5952 			break;
   5953 	}
   5954 
   5955 	return error;
   5956 }
   5957 
   5958 /*
   5959  * wm_tx_offload:
   5960  *
   5961  *	Set up TCP/IP checksumming parameters for the
   5962  *	specified packet.
   5963  */
   5964 static int
   5965 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5966     uint8_t *fieldsp)
   5967 {
   5968 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5969 	struct mbuf *m0 = txs->txs_mbuf;
   5970 	struct livengood_tcpip_ctxdesc *t;
   5971 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5972 	uint32_t ipcse;
   5973 	struct ether_header *eh;
   5974 	int offset, iphl;
   5975 	uint8_t fields;
   5976 
   5977 	/*
   5978 	 * XXX It would be nice if the mbuf pkthdr had offset
   5979 	 * fields for the protocol headers.
   5980 	 */
   5981 
   5982 	eh = mtod(m0, struct ether_header *);
   5983 	switch (htons(eh->ether_type)) {
   5984 	case ETHERTYPE_IP:
   5985 	case ETHERTYPE_IPV6:
   5986 		offset = ETHER_HDR_LEN;
   5987 		break;
   5988 
   5989 	case ETHERTYPE_VLAN:
   5990 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5991 		break;
   5992 
   5993 	default:
   5994 		/*
   5995 		 * Don't support this protocol or encapsulation.
   5996 		 */
   5997 		*fieldsp = 0;
   5998 		*cmdp = 0;
   5999 		return 0;
   6000 	}
   6001 
   6002 	if ((m0->m_pkthdr.csum_flags &
   6003 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6004 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6005 	} else {
   6006 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6007 	}
   6008 	ipcse = offset + iphl - 1;
   6009 
   6010 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6011 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6012 	seg = 0;
   6013 	fields = 0;
   6014 
   6015 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6016 		int hlen = offset + iphl;
   6017 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6018 
   6019 		if (__predict_false(m0->m_len <
   6020 				    (hlen + sizeof(struct tcphdr)))) {
   6021 			/*
   6022 			 * TCP/IP headers are not in the first mbuf; we need
   6023 			 * to do this the slow and painful way.  Let's just
   6024 			 * hope this doesn't happen very often.
   6025 			 */
   6026 			struct tcphdr th;
   6027 
   6028 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6029 
   6030 			m_copydata(m0, hlen, sizeof(th), &th);
   6031 			if (v4) {
   6032 				struct ip ip;
   6033 
   6034 				m_copydata(m0, offset, sizeof(ip), &ip);
   6035 				ip.ip_len = 0;
   6036 				m_copyback(m0,
   6037 				    offset + offsetof(struct ip, ip_len),
   6038 				    sizeof(ip.ip_len), &ip.ip_len);
   6039 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6040 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6041 			} else {
   6042 				struct ip6_hdr ip6;
   6043 
   6044 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6045 				ip6.ip6_plen = 0;
   6046 				m_copyback(m0,
   6047 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6048 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6049 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6050 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6051 			}
   6052 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6053 			    sizeof(th.th_sum), &th.th_sum);
   6054 
   6055 			hlen += th.th_off << 2;
   6056 		} else {
   6057 			/*
   6058 			 * TCP/IP headers are in the first mbuf; we can do
   6059 			 * this the easy way.
   6060 			 */
   6061 			struct tcphdr *th;
   6062 
   6063 			if (v4) {
   6064 				struct ip *ip =
   6065 				    (void *)(mtod(m0, char *) + offset);
   6066 				th = (void *)(mtod(m0, char *) + hlen);
   6067 
   6068 				ip->ip_len = 0;
   6069 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6070 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6071 			} else {
   6072 				struct ip6_hdr *ip6 =
   6073 				    (void *)(mtod(m0, char *) + offset);
   6074 				th = (void *)(mtod(m0, char *) + hlen);
   6075 
   6076 				ip6->ip6_plen = 0;
   6077 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6078 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6079 			}
   6080 			hlen += th->th_off << 2;
   6081 		}
   6082 
   6083 		if (v4) {
   6084 			WM_Q_EVCNT_INCR(txq, txtso);
   6085 			cmdlen |= WTX_TCPIP_CMD_IP;
   6086 		} else {
   6087 			WM_Q_EVCNT_INCR(txq, txtso6);
   6088 			ipcse = 0;
   6089 		}
   6090 		cmd |= WTX_TCPIP_CMD_TSE;
   6091 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6092 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6093 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6094 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6095 	}
   6096 
   6097 	/*
   6098 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6099 	 * offload feature, if we load the context descriptor, we
   6100 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6101 	 */
   6102 
   6103 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6104 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6105 	    WTX_TCPIP_IPCSE(ipcse);
   6106 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6107 		WM_Q_EVCNT_INCR(txq, txipsum);
   6108 		fields |= WTX_IXSM;
   6109 	}
   6110 
   6111 	offset += iphl;
   6112 
   6113 	if (m0->m_pkthdr.csum_flags &
   6114 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6115 		WM_Q_EVCNT_INCR(txq, txtusum);
   6116 		fields |= WTX_TXSM;
   6117 		tucs = WTX_TCPIP_TUCSS(offset) |
   6118 		    WTX_TCPIP_TUCSO(offset +
   6119 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6120 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6121 	} else if ((m0->m_pkthdr.csum_flags &
   6122 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6123 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6124 		fields |= WTX_TXSM;
   6125 		tucs = WTX_TCPIP_TUCSS(offset) |
   6126 		    WTX_TCPIP_TUCSO(offset +
   6127 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6128 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6129 	} else {
   6130 		/* Just initialize it to a valid TCP context. */
   6131 		tucs = WTX_TCPIP_TUCSS(offset) |
   6132 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6133 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6134 	}
   6135 
   6136 	/* Fill in the context descriptor. */
   6137 	t = (struct livengood_tcpip_ctxdesc *)
   6138 	    &txq->txq_descs[txq->txq_next];
   6139 	t->tcpip_ipcs = htole32(ipcs);
   6140 	t->tcpip_tucs = htole32(tucs);
   6141 	t->tcpip_cmdlen = htole32(cmdlen);
   6142 	t->tcpip_seg = htole32(seg);
   6143 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6144 
   6145 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6146 	txs->txs_ndesc++;
   6147 
   6148 	*cmdp = cmd;
   6149 	*fieldsp = fields;
   6150 
   6151 	return 0;
   6152 }
   6153 
   6154 /*
   6155  * wm_start:		[ifnet interface function]
   6156  *
   6157  *	Start packet transmission on the interface.
   6158  */
   6159 static void
   6160 wm_start(struct ifnet *ifp)
   6161 {
   6162 	struct wm_softc *sc = ifp->if_softc;
   6163 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6164 
   6165 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6166 
   6167 	mutex_enter(txq->txq_lock);
   6168 	if (!sc->sc_stopping)
   6169 		wm_start_locked(ifp);
   6170 	mutex_exit(txq->txq_lock);
   6171 }
   6172 
   6173 static void
   6174 wm_start_locked(struct ifnet *ifp)
   6175 {
   6176 	struct wm_softc *sc = ifp->if_softc;
   6177 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6178 	struct mbuf *m0;
   6179 	struct m_tag *mtag;
   6180 	struct wm_txsoft *txs;
   6181 	bus_dmamap_t dmamap;
   6182 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6183 	bus_addr_t curaddr;
   6184 	bus_size_t seglen, curlen;
   6185 	uint32_t cksumcmd;
   6186 	uint8_t cksumfields;
   6187 
   6188 	KASSERT(mutex_owned(txq->txq_lock));
   6189 
   6190 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6191 		return;
   6192 
   6193 	/* Remember the previous number of free descriptors. */
   6194 	ofree = txq->txq_free;
   6195 
   6196 	/*
   6197 	 * Loop through the send queue, setting up transmit descriptors
   6198 	 * until we drain the queue, or use up all available transmit
   6199 	 * descriptors.
   6200 	 */
   6201 	for (;;) {
   6202 		m0 = NULL;
   6203 
   6204 		/* Get a work queue entry. */
   6205 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6206 			wm_txeof(sc, txq);
   6207 			if (txq->txq_sfree == 0) {
   6208 				DPRINTF(WM_DEBUG_TX,
   6209 				    ("%s: TX: no free job descriptors\n",
   6210 					device_xname(sc->sc_dev)));
   6211 				WM_Q_EVCNT_INCR(txq, txsstall);
   6212 				break;
   6213 			}
   6214 		}
   6215 
   6216 		/* Grab a packet off the queue. */
   6217 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6218 		if (m0 == NULL)
   6219 			break;
   6220 
   6221 		DPRINTF(WM_DEBUG_TX,
   6222 		    ("%s: TX: have packet to transmit: %p\n",
   6223 		    device_xname(sc->sc_dev), m0));
   6224 
   6225 		txs = &txq->txq_soft[txq->txq_snext];
   6226 		dmamap = txs->txs_dmamap;
   6227 
   6228 		use_tso = (m0->m_pkthdr.csum_flags &
   6229 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6230 
   6231 		/*
   6232 		 * So says the Linux driver:
   6233 		 * The controller does a simple calculation to make sure
   6234 		 * there is enough room in the FIFO before initiating the
   6235 		 * DMA for each buffer.  The calc is:
   6236 		 *	4 = ceil(buffer len / MSS)
   6237 		 * To make sure we don't overrun the FIFO, adjust the max
   6238 		 * buffer len if the MSS drops.
   6239 		 */
   6240 		dmamap->dm_maxsegsz =
   6241 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6242 		    ? m0->m_pkthdr.segsz << 2
   6243 		    : WTX_MAX_LEN;
   6244 
   6245 		/*
   6246 		 * Load the DMA map.  If this fails, the packet either
   6247 		 * didn't fit in the allotted number of segments, or we
   6248 		 * were short on resources.  For the too-many-segments
   6249 		 * case, we simply report an error and drop the packet,
   6250 		 * since we can't sanely copy a jumbo packet to a single
   6251 		 * buffer.
   6252 		 */
   6253 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6254 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6255 		if (error) {
   6256 			if (error == EFBIG) {
   6257 				WM_Q_EVCNT_INCR(txq, txdrop);
   6258 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6259 				    "DMA segments, dropping...\n",
   6260 				    device_xname(sc->sc_dev));
   6261 				wm_dump_mbuf_chain(sc, m0);
   6262 				m_freem(m0);
   6263 				continue;
   6264 			}
   6265 			/*  Short on resources, just stop for now. */
   6266 			DPRINTF(WM_DEBUG_TX,
   6267 			    ("%s: TX: dmamap load failed: %d\n",
   6268 			    device_xname(sc->sc_dev), error));
   6269 			break;
   6270 		}
   6271 
   6272 		segs_needed = dmamap->dm_nsegs;
   6273 		if (use_tso) {
   6274 			/* For sentinel descriptor; see below. */
   6275 			segs_needed++;
   6276 		}
   6277 
   6278 		/*
   6279 		 * Ensure we have enough descriptors free to describe
   6280 		 * the packet.  Note, we always reserve one descriptor
   6281 		 * at the end of the ring due to the semantics of the
   6282 		 * TDT register, plus one more in the event we need
   6283 		 * to load offload context.
   6284 		 */
   6285 		if (segs_needed > txq->txq_free - 2) {
   6286 			/*
   6287 			 * Not enough free descriptors to transmit this
   6288 			 * packet.  We haven't committed anything yet,
   6289 			 * so just unload the DMA map, put the packet
   6290 			 * pack on the queue, and punt.  Notify the upper
   6291 			 * layer that there are no more slots left.
   6292 			 */
   6293 			DPRINTF(WM_DEBUG_TX,
   6294 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6295 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6296 			    segs_needed, txq->txq_free - 1));
   6297 			ifp->if_flags |= IFF_OACTIVE;
   6298 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6299 			WM_Q_EVCNT_INCR(txq, txdstall);
   6300 			break;
   6301 		}
   6302 
   6303 		/*
   6304 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6305 		 * once we know we can transmit the packet, since we
   6306 		 * do some internal FIFO space accounting here.
   6307 		 */
   6308 		if (sc->sc_type == WM_T_82547 &&
   6309 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6310 			DPRINTF(WM_DEBUG_TX,
   6311 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6312 			    device_xname(sc->sc_dev)));
   6313 			ifp->if_flags |= IFF_OACTIVE;
   6314 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6315 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6316 			break;
   6317 		}
   6318 
   6319 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6320 
   6321 		DPRINTF(WM_DEBUG_TX,
   6322 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6323 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6324 
   6325 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6326 
   6327 		/*
   6328 		 * Store a pointer to the packet so that we can free it
   6329 		 * later.
   6330 		 *
   6331 		 * Initially, we consider the number of descriptors the
   6332 		 * packet uses the number of DMA segments.  This may be
   6333 		 * incremented by 1 if we do checksum offload (a descriptor
   6334 		 * is used to set the checksum context).
   6335 		 */
   6336 		txs->txs_mbuf = m0;
   6337 		txs->txs_firstdesc = txq->txq_next;
   6338 		txs->txs_ndesc = segs_needed;
   6339 
   6340 		/* Set up offload parameters for this packet. */
   6341 		if (m0->m_pkthdr.csum_flags &
   6342 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6343 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6344 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6345 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6346 					  &cksumfields) != 0) {
   6347 				/* Error message already displayed. */
   6348 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6349 				continue;
   6350 			}
   6351 		} else {
   6352 			cksumcmd = 0;
   6353 			cksumfields = 0;
   6354 		}
   6355 
   6356 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6357 
   6358 		/* Sync the DMA map. */
   6359 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6360 		    BUS_DMASYNC_PREWRITE);
   6361 
   6362 		/* Initialize the transmit descriptor. */
   6363 		for (nexttx = txq->txq_next, seg = 0;
   6364 		     seg < dmamap->dm_nsegs; seg++) {
   6365 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6366 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6367 			     seglen != 0;
   6368 			     curaddr += curlen, seglen -= curlen,
   6369 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6370 				curlen = seglen;
   6371 
   6372 				/*
   6373 				 * So says the Linux driver:
   6374 				 * Work around for premature descriptor
   6375 				 * write-backs in TSO mode.  Append a
   6376 				 * 4-byte sentinel descriptor.
   6377 				 */
   6378 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6379 				    curlen > 8)
   6380 					curlen -= 4;
   6381 
   6382 				wm_set_dma_addr(
   6383 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6384 				txq->txq_descs[nexttx].wtx_cmdlen
   6385 				    = htole32(cksumcmd | curlen);
   6386 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6387 				    = 0;
   6388 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6389 				    = cksumfields;
   6390 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6391 				lasttx = nexttx;
   6392 
   6393 				DPRINTF(WM_DEBUG_TX,
   6394 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6395 				     "len %#04zx\n",
   6396 				    device_xname(sc->sc_dev), nexttx,
   6397 				    (uint64_t)curaddr, curlen));
   6398 			}
   6399 		}
   6400 
   6401 		KASSERT(lasttx != -1);
   6402 
   6403 		/*
   6404 		 * Set up the command byte on the last descriptor of
   6405 		 * the packet.  If we're in the interrupt delay window,
   6406 		 * delay the interrupt.
   6407 		 */
   6408 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6409 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6410 
   6411 		/*
   6412 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6413 		 * up the descriptor to encapsulate the packet for us.
   6414 		 *
   6415 		 * This is only valid on the last descriptor of the packet.
   6416 		 */
   6417 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6418 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6419 			    htole32(WTX_CMD_VLE);
   6420 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6421 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6422 		}
   6423 
   6424 		txs->txs_lastdesc = lasttx;
   6425 
   6426 		DPRINTF(WM_DEBUG_TX,
   6427 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6428 		    device_xname(sc->sc_dev),
   6429 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6430 
   6431 		/* Sync the descriptors we're using. */
   6432 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6433 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6434 
   6435 		/* Give the packet to the chip. */
   6436 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6437 
   6438 		DPRINTF(WM_DEBUG_TX,
   6439 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6440 
   6441 		DPRINTF(WM_DEBUG_TX,
   6442 		    ("%s: TX: finished transmitting packet, job %d\n",
   6443 		    device_xname(sc->sc_dev), txq->txq_snext));
   6444 
   6445 		/* Advance the tx pointer. */
   6446 		txq->txq_free -= txs->txs_ndesc;
   6447 		txq->txq_next = nexttx;
   6448 
   6449 		txq->txq_sfree--;
   6450 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6451 
   6452 		/* Pass the packet to any BPF listeners. */
   6453 		bpf_mtap(ifp, m0);
   6454 	}
   6455 
   6456 	if (m0 != NULL) {
   6457 		ifp->if_flags |= IFF_OACTIVE;
   6458 		WM_Q_EVCNT_INCR(txq, txdrop);
   6459 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6460 			__func__));
   6461 		m_freem(m0);
   6462 	}
   6463 
   6464 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6465 		/* No more slots; notify upper layer. */
   6466 		ifp->if_flags |= IFF_OACTIVE;
   6467 	}
   6468 
   6469 	if (txq->txq_free != ofree) {
   6470 		/* Set a watchdog timer in case the chip flakes out. */
   6471 		ifp->if_timer = 5;
   6472 	}
   6473 }
   6474 
   6475 /*
   6476  * wm_nq_tx_offload:
   6477  *
   6478  *	Set up TCP/IP checksumming parameters for the
   6479  *	specified packet, for NEWQUEUE devices
   6480  */
   6481 static int
   6482 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6483     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6484 {
   6485 	struct mbuf *m0 = txs->txs_mbuf;
   6486 	struct m_tag *mtag;
   6487 	uint32_t vl_len, mssidx, cmdc;
   6488 	struct ether_header *eh;
   6489 	int offset, iphl;
   6490 
   6491 	/*
   6492 	 * XXX It would be nice if the mbuf pkthdr had offset
   6493 	 * fields for the protocol headers.
   6494 	 */
   6495 	*cmdlenp = 0;
   6496 	*fieldsp = 0;
   6497 
   6498 	eh = mtod(m0, struct ether_header *);
   6499 	switch (htons(eh->ether_type)) {
   6500 	case ETHERTYPE_IP:
   6501 	case ETHERTYPE_IPV6:
   6502 		offset = ETHER_HDR_LEN;
   6503 		break;
   6504 
   6505 	case ETHERTYPE_VLAN:
   6506 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6507 		break;
   6508 
   6509 	default:
   6510 		/* Don't support this protocol or encapsulation. */
   6511 		*do_csum = false;
   6512 		return 0;
   6513 	}
   6514 	*do_csum = true;
   6515 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6516 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6517 
   6518 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6519 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6520 
   6521 	if ((m0->m_pkthdr.csum_flags &
   6522 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6523 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6524 	} else {
   6525 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6526 	}
   6527 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6528 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6529 
   6530 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6531 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6532 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6533 		*cmdlenp |= NQTX_CMD_VLE;
   6534 	}
   6535 
   6536 	mssidx = 0;
   6537 
   6538 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6539 		int hlen = offset + iphl;
   6540 		int tcp_hlen;
   6541 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6542 
   6543 		if (__predict_false(m0->m_len <
   6544 				    (hlen + sizeof(struct tcphdr)))) {
   6545 			/*
   6546 			 * TCP/IP headers are not in the first mbuf; we need
   6547 			 * to do this the slow and painful way.  Let's just
   6548 			 * hope this doesn't happen very often.
   6549 			 */
   6550 			struct tcphdr th;
   6551 
   6552 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6553 
   6554 			m_copydata(m0, hlen, sizeof(th), &th);
   6555 			if (v4) {
   6556 				struct ip ip;
   6557 
   6558 				m_copydata(m0, offset, sizeof(ip), &ip);
   6559 				ip.ip_len = 0;
   6560 				m_copyback(m0,
   6561 				    offset + offsetof(struct ip, ip_len),
   6562 				    sizeof(ip.ip_len), &ip.ip_len);
   6563 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6564 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6565 			} else {
   6566 				struct ip6_hdr ip6;
   6567 
   6568 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6569 				ip6.ip6_plen = 0;
   6570 				m_copyback(m0,
   6571 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6572 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6573 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6574 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6575 			}
   6576 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6577 			    sizeof(th.th_sum), &th.th_sum);
   6578 
   6579 			tcp_hlen = th.th_off << 2;
   6580 		} else {
   6581 			/*
   6582 			 * TCP/IP headers are in the first mbuf; we can do
   6583 			 * this the easy way.
   6584 			 */
   6585 			struct tcphdr *th;
   6586 
   6587 			if (v4) {
   6588 				struct ip *ip =
   6589 				    (void *)(mtod(m0, char *) + offset);
   6590 				th = (void *)(mtod(m0, char *) + hlen);
   6591 
   6592 				ip->ip_len = 0;
   6593 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6594 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6595 			} else {
   6596 				struct ip6_hdr *ip6 =
   6597 				    (void *)(mtod(m0, char *) + offset);
   6598 				th = (void *)(mtod(m0, char *) + hlen);
   6599 
   6600 				ip6->ip6_plen = 0;
   6601 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6602 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6603 			}
   6604 			tcp_hlen = th->th_off << 2;
   6605 		}
   6606 		hlen += tcp_hlen;
   6607 		*cmdlenp |= NQTX_CMD_TSE;
   6608 
   6609 		if (v4) {
   6610 			WM_Q_EVCNT_INCR(txq, txtso);
   6611 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6612 		} else {
   6613 			WM_Q_EVCNT_INCR(txq, txtso6);
   6614 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6615 		}
   6616 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6617 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6618 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6619 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6620 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6621 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6622 	} else {
   6623 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6624 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6625 	}
   6626 
   6627 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6628 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6629 		cmdc |= NQTXC_CMD_IP4;
   6630 	}
   6631 
   6632 	if (m0->m_pkthdr.csum_flags &
   6633 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6634 		WM_Q_EVCNT_INCR(txq, txtusum);
   6635 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6636 			cmdc |= NQTXC_CMD_TCP;
   6637 		} else {
   6638 			cmdc |= NQTXC_CMD_UDP;
   6639 		}
   6640 		cmdc |= NQTXC_CMD_IP4;
   6641 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6642 	}
   6643 	if (m0->m_pkthdr.csum_flags &
   6644 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6645 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6646 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6647 			cmdc |= NQTXC_CMD_TCP;
   6648 		} else {
   6649 			cmdc |= NQTXC_CMD_UDP;
   6650 		}
   6651 		cmdc |= NQTXC_CMD_IP6;
   6652 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6653 	}
   6654 
   6655 	/* Fill in the context descriptor. */
   6656 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6657 	    htole32(vl_len);
   6658 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6659 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6660 	    htole32(cmdc);
   6661 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6662 	    htole32(mssidx);
   6663 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6664 	DPRINTF(WM_DEBUG_TX,
   6665 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6666 	    txq->txq_next, 0, vl_len));
   6667 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6668 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6669 	txs->txs_ndesc++;
   6670 	return 0;
   6671 }
   6672 
   6673 /*
   6674  * wm_nq_start:		[ifnet interface function]
   6675  *
   6676  *	Start packet transmission on the interface for NEWQUEUE devices
   6677  */
   6678 static void
   6679 wm_nq_start(struct ifnet *ifp)
   6680 {
   6681 	struct wm_softc *sc = ifp->if_softc;
   6682 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6683 
   6684 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6685 
   6686 	mutex_enter(txq->txq_lock);
   6687 	if (!sc->sc_stopping)
   6688 		wm_nq_start_locked(ifp);
   6689 	mutex_exit(txq->txq_lock);
   6690 }
   6691 
   6692 static void
   6693 wm_nq_start_locked(struct ifnet *ifp)
   6694 {
   6695 	struct wm_softc *sc = ifp->if_softc;
   6696 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6697 
   6698 	wm_nq_send_common_locked(ifp, txq, false);
   6699 }
   6700 
   6701 static inline int
   6702 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6703 {
   6704 	struct wm_softc *sc = ifp->if_softc;
   6705 	u_int cpuid = cpu_index(curcpu());
   6706 
   6707 	/*
   6708 	 * Currently, simple distribute strategy.
   6709 	 * TODO:
   6710 	 * destribute by flowid(RSS has value).
   6711 	 */
   6712 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6713 }
   6714 
   6715 static int
   6716 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6717 {
   6718 	int qid;
   6719 	struct wm_softc *sc = ifp->if_softc;
   6720 	struct wm_txqueue *txq;
   6721 
   6722 	qid = wm_nq_select_txqueue(ifp, m);
   6723 	txq = &sc->sc_queue[qid].wmq_txq;
   6724 
   6725 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6726 		m_freem(m);
   6727 		WM_Q_EVCNT_INCR(txq, txdrop);
   6728 		return ENOBUFS;
   6729 	}
   6730 
   6731 	if (mutex_tryenter(txq->txq_lock)) {
   6732 		/* XXXX should be per TX queue */
   6733 		ifp->if_obytes += m->m_pkthdr.len;
   6734 		if (m->m_flags & M_MCAST)
   6735 			ifp->if_omcasts++;
   6736 
   6737 		if (!sc->sc_stopping)
   6738 			wm_nq_transmit_locked(ifp, txq);
   6739 		mutex_exit(txq->txq_lock);
   6740 	}
   6741 
   6742 	return 0;
   6743 }
   6744 
   6745 static void
   6746 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6747 {
   6748 
   6749 	wm_nq_send_common_locked(ifp, txq, true);
   6750 }
   6751 
   6752 static void
   6753 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6754     bool is_transmit)
   6755 {
   6756 	struct wm_softc *sc = ifp->if_softc;
   6757 	struct mbuf *m0;
   6758 	struct m_tag *mtag;
   6759 	struct wm_txsoft *txs;
   6760 	bus_dmamap_t dmamap;
   6761 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6762 	bool do_csum, sent;
   6763 
   6764 	KASSERT(mutex_owned(txq->txq_lock));
   6765 
   6766 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6767 		return;
   6768 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6769 		return;
   6770 
   6771 	sent = false;
   6772 
   6773 	/*
   6774 	 * Loop through the send queue, setting up transmit descriptors
   6775 	 * until we drain the queue, or use up all available transmit
   6776 	 * descriptors.
   6777 	 */
   6778 	for (;;) {
   6779 		m0 = NULL;
   6780 
   6781 		/* Get a work queue entry. */
   6782 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6783 			wm_txeof(sc, txq);
   6784 			if (txq->txq_sfree == 0) {
   6785 				DPRINTF(WM_DEBUG_TX,
   6786 				    ("%s: TX: no free job descriptors\n",
   6787 					device_xname(sc->sc_dev)));
   6788 				WM_Q_EVCNT_INCR(txq, txsstall);
   6789 				break;
   6790 			}
   6791 		}
   6792 
   6793 		/* Grab a packet off the queue. */
   6794 		if (is_transmit)
   6795 			m0 = pcq_get(txq->txq_interq);
   6796 		else
   6797 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6798 		if (m0 == NULL)
   6799 			break;
   6800 
   6801 		DPRINTF(WM_DEBUG_TX,
   6802 		    ("%s: TX: have packet to transmit: %p\n",
   6803 		    device_xname(sc->sc_dev), m0));
   6804 
   6805 		txs = &txq->txq_soft[txq->txq_snext];
   6806 		dmamap = txs->txs_dmamap;
   6807 
   6808 		/*
   6809 		 * Load the DMA map.  If this fails, the packet either
   6810 		 * didn't fit in the allotted number of segments, or we
   6811 		 * were short on resources.  For the too-many-segments
   6812 		 * case, we simply report an error and drop the packet,
   6813 		 * since we can't sanely copy a jumbo packet to a single
   6814 		 * buffer.
   6815 		 */
   6816 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6817 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6818 		if (error) {
   6819 			if (error == EFBIG) {
   6820 				WM_Q_EVCNT_INCR(txq, txdrop);
   6821 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6822 				    "DMA segments, dropping...\n",
   6823 				    device_xname(sc->sc_dev));
   6824 				wm_dump_mbuf_chain(sc, m0);
   6825 				m_freem(m0);
   6826 				continue;
   6827 			}
   6828 			/* Short on resources, just stop for now. */
   6829 			DPRINTF(WM_DEBUG_TX,
   6830 			    ("%s: TX: dmamap load failed: %d\n",
   6831 			    device_xname(sc->sc_dev), error));
   6832 			break;
   6833 		}
   6834 
   6835 		segs_needed = dmamap->dm_nsegs;
   6836 
   6837 		/*
   6838 		 * Ensure we have enough descriptors free to describe
   6839 		 * the packet.  Note, we always reserve one descriptor
   6840 		 * at the end of the ring due to the semantics of the
   6841 		 * TDT register, plus one more in the event we need
   6842 		 * to load offload context.
   6843 		 */
   6844 		if (segs_needed > txq->txq_free - 2) {
   6845 			/*
   6846 			 * Not enough free descriptors to transmit this
   6847 			 * packet.  We haven't committed anything yet,
   6848 			 * so just unload the DMA map, put the packet
   6849 			 * pack on the queue, and punt.  Notify the upper
   6850 			 * layer that there are no more slots left.
   6851 			 */
   6852 			DPRINTF(WM_DEBUG_TX,
   6853 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6854 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6855 			    segs_needed, txq->txq_free - 1));
   6856 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6857 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6858 			WM_Q_EVCNT_INCR(txq, txdstall);
   6859 			break;
   6860 		}
   6861 
   6862 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6863 
   6864 		DPRINTF(WM_DEBUG_TX,
   6865 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6866 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6867 
   6868 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6869 
   6870 		/*
   6871 		 * Store a pointer to the packet so that we can free it
   6872 		 * later.
   6873 		 *
   6874 		 * Initially, we consider the number of descriptors the
   6875 		 * packet uses the number of DMA segments.  This may be
   6876 		 * incremented by 1 if we do checksum offload (a descriptor
   6877 		 * is used to set the checksum context).
   6878 		 */
   6879 		txs->txs_mbuf = m0;
   6880 		txs->txs_firstdesc = txq->txq_next;
   6881 		txs->txs_ndesc = segs_needed;
   6882 
   6883 		/* Set up offload parameters for this packet. */
   6884 		uint32_t cmdlen, fields, dcmdlen;
   6885 		if (m0->m_pkthdr.csum_flags &
   6886 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6887 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6888 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6889 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   6890 			    &do_csum) != 0) {
   6891 				/* Error message already displayed. */
   6892 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6893 				continue;
   6894 			}
   6895 		} else {
   6896 			do_csum = false;
   6897 			cmdlen = 0;
   6898 			fields = 0;
   6899 		}
   6900 
   6901 		/* Sync the DMA map. */
   6902 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6903 		    BUS_DMASYNC_PREWRITE);
   6904 
   6905 		/* Initialize the first transmit descriptor. */
   6906 		nexttx = txq->txq_next;
   6907 		if (!do_csum) {
   6908 			/* setup a legacy descriptor */
   6909 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   6910 			    dmamap->dm_segs[0].ds_addr);
   6911 			txq->txq_descs[nexttx].wtx_cmdlen =
   6912 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6913 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6914 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6915 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6916 			    NULL) {
   6917 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6918 				    htole32(WTX_CMD_VLE);
   6919 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6920 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6921 			} else {
   6922 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6923 			}
   6924 			dcmdlen = 0;
   6925 		} else {
   6926 			/* setup an advanced data descriptor */
   6927 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6928 			    htole64(dmamap->dm_segs[0].ds_addr);
   6929 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6930 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6931 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6932 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6933 			    htole32(fields);
   6934 			DPRINTF(WM_DEBUG_TX,
   6935 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6936 			    device_xname(sc->sc_dev), nexttx,
   6937 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6938 			DPRINTF(WM_DEBUG_TX,
   6939 			    ("\t 0x%08x%08x\n", fields,
   6940 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6941 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6942 		}
   6943 
   6944 		lasttx = nexttx;
   6945 		nexttx = WM_NEXTTX(txq, nexttx);
   6946 		/*
   6947 		 * fill in the next descriptors. legacy or adcanced format
   6948 		 * is the same here
   6949 		 */
   6950 		for (seg = 1; seg < dmamap->dm_nsegs;
   6951 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6952 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6953 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6954 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6955 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6956 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6957 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6958 			lasttx = nexttx;
   6959 
   6960 			DPRINTF(WM_DEBUG_TX,
   6961 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6962 			     "len %#04zx\n",
   6963 			    device_xname(sc->sc_dev), nexttx,
   6964 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6965 			    dmamap->dm_segs[seg].ds_len));
   6966 		}
   6967 
   6968 		KASSERT(lasttx != -1);
   6969 
   6970 		/*
   6971 		 * Set up the command byte on the last descriptor of
   6972 		 * the packet.  If we're in the interrupt delay window,
   6973 		 * delay the interrupt.
   6974 		 */
   6975 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6976 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6977 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6978 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6979 
   6980 		txs->txs_lastdesc = lasttx;
   6981 
   6982 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6983 		    device_xname(sc->sc_dev),
   6984 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6985 
   6986 		/* Sync the descriptors we're using. */
   6987 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6988 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6989 
   6990 		/* Give the packet to the chip. */
   6991 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6992 		sent = true;
   6993 
   6994 		DPRINTF(WM_DEBUG_TX,
   6995 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6996 
   6997 		DPRINTF(WM_DEBUG_TX,
   6998 		    ("%s: TX: finished transmitting packet, job %d\n",
   6999 		    device_xname(sc->sc_dev), txq->txq_snext));
   7000 
   7001 		/* Advance the tx pointer. */
   7002 		txq->txq_free -= txs->txs_ndesc;
   7003 		txq->txq_next = nexttx;
   7004 
   7005 		txq->txq_sfree--;
   7006 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7007 
   7008 		/* Pass the packet to any BPF listeners. */
   7009 		bpf_mtap(ifp, m0);
   7010 	}
   7011 
   7012 	if (m0 != NULL) {
   7013 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7014 		WM_Q_EVCNT_INCR(txq, txdrop);
   7015 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7016 			__func__));
   7017 		m_freem(m0);
   7018 	}
   7019 
   7020 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7021 		/* No more slots; notify upper layer. */
   7022 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7023 	}
   7024 
   7025 	if (sent) {
   7026 		/* Set a watchdog timer in case the chip flakes out. */
   7027 		ifp->if_timer = 5;
   7028 	}
   7029 }
   7030 
   7031 /* Interrupt */
   7032 
   7033 /*
   7034  * wm_txeof:
   7035  *
   7036  *	Helper; handle transmit interrupts.
   7037  */
   7038 static int
   7039 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7040 {
   7041 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7042 	struct wm_txsoft *txs;
   7043 	bool processed = false;
   7044 	int count = 0;
   7045 	int i;
   7046 	uint8_t status;
   7047 
   7048 	KASSERT(mutex_owned(txq->txq_lock));
   7049 
   7050 	if (sc->sc_stopping)
   7051 		return 0;
   7052 
   7053 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7054 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7055 	else
   7056 		ifp->if_flags &= ~IFF_OACTIVE;
   7057 
   7058 	/*
   7059 	 * Go through the Tx list and free mbufs for those
   7060 	 * frames which have been transmitted.
   7061 	 */
   7062 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7063 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7064 		txs = &txq->txq_soft[i];
   7065 
   7066 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7067 			device_xname(sc->sc_dev), i));
   7068 
   7069 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7070 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7071 
   7072 		status =
   7073 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7074 		if ((status & WTX_ST_DD) == 0) {
   7075 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7076 			    BUS_DMASYNC_PREREAD);
   7077 			break;
   7078 		}
   7079 
   7080 		processed = true;
   7081 		count++;
   7082 		DPRINTF(WM_DEBUG_TX,
   7083 		    ("%s: TX: job %d done: descs %d..%d\n",
   7084 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7085 		    txs->txs_lastdesc));
   7086 
   7087 		/*
   7088 		 * XXX We should probably be using the statistics
   7089 		 * XXX registers, but I don't know if they exist
   7090 		 * XXX on chips before the i82544.
   7091 		 */
   7092 
   7093 #ifdef WM_EVENT_COUNTERS
   7094 		if (status & WTX_ST_TU)
   7095 			WM_Q_EVCNT_INCR(txq, tu);
   7096 #endif /* WM_EVENT_COUNTERS */
   7097 
   7098 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7099 			ifp->if_oerrors++;
   7100 			if (status & WTX_ST_LC)
   7101 				log(LOG_WARNING, "%s: late collision\n",
   7102 				    device_xname(sc->sc_dev));
   7103 			else if (status & WTX_ST_EC) {
   7104 				ifp->if_collisions += 16;
   7105 				log(LOG_WARNING, "%s: excessive collisions\n",
   7106 				    device_xname(sc->sc_dev));
   7107 			}
   7108 		} else
   7109 			ifp->if_opackets++;
   7110 
   7111 		txq->txq_free += txs->txs_ndesc;
   7112 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7113 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7114 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7115 		m_freem(txs->txs_mbuf);
   7116 		txs->txs_mbuf = NULL;
   7117 	}
   7118 
   7119 	/* Update the dirty transmit buffer pointer. */
   7120 	txq->txq_sdirty = i;
   7121 	DPRINTF(WM_DEBUG_TX,
   7122 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7123 
   7124 	if (count != 0)
   7125 		rnd_add_uint32(&sc->rnd_source, count);
   7126 
   7127 	/*
   7128 	 * If there are no more pending transmissions, cancel the watchdog
   7129 	 * timer.
   7130 	 */
   7131 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7132 		ifp->if_timer = 0;
   7133 
   7134 	return processed;
   7135 }
   7136 
   7137 /*
   7138  * wm_rxeof:
   7139  *
   7140  *	Helper; handle receive interrupts.
   7141  */
   7142 static void
   7143 wm_rxeof(struct wm_rxqueue *rxq)
   7144 {
   7145 	struct wm_softc *sc = rxq->rxq_sc;
   7146 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7147 	struct wm_rxsoft *rxs;
   7148 	struct mbuf *m;
   7149 	int i, len;
   7150 	int count = 0;
   7151 	uint8_t status, errors;
   7152 	uint16_t vlantag;
   7153 
   7154 	KASSERT(mutex_owned(rxq->rxq_lock));
   7155 
   7156 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7157 		rxs = &rxq->rxq_soft[i];
   7158 
   7159 		DPRINTF(WM_DEBUG_RX,
   7160 		    ("%s: RX: checking descriptor %d\n",
   7161 		    device_xname(sc->sc_dev), i));
   7162 
   7163 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7164 
   7165 		status = rxq->rxq_descs[i].wrx_status;
   7166 		errors = rxq->rxq_descs[i].wrx_errors;
   7167 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7168 		vlantag = rxq->rxq_descs[i].wrx_special;
   7169 
   7170 		if ((status & WRX_ST_DD) == 0) {
   7171 			/* We have processed all of the receive descriptors. */
   7172 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7173 			break;
   7174 		}
   7175 
   7176 		count++;
   7177 		if (__predict_false(rxq->rxq_discard)) {
   7178 			DPRINTF(WM_DEBUG_RX,
   7179 			    ("%s: RX: discarding contents of descriptor %d\n",
   7180 			    device_xname(sc->sc_dev), i));
   7181 			wm_init_rxdesc(rxq, i);
   7182 			if (status & WRX_ST_EOP) {
   7183 				/* Reset our state. */
   7184 				DPRINTF(WM_DEBUG_RX,
   7185 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7186 				    device_xname(sc->sc_dev)));
   7187 				rxq->rxq_discard = 0;
   7188 			}
   7189 			continue;
   7190 		}
   7191 
   7192 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7193 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7194 
   7195 		m = rxs->rxs_mbuf;
   7196 
   7197 		/*
   7198 		 * Add a new receive buffer to the ring, unless of
   7199 		 * course the length is zero. Treat the latter as a
   7200 		 * failed mapping.
   7201 		 */
   7202 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7203 			/*
   7204 			 * Failed, throw away what we've done so
   7205 			 * far, and discard the rest of the packet.
   7206 			 */
   7207 			ifp->if_ierrors++;
   7208 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7209 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7210 			wm_init_rxdesc(rxq, i);
   7211 			if ((status & WRX_ST_EOP) == 0)
   7212 				rxq->rxq_discard = 1;
   7213 			if (rxq->rxq_head != NULL)
   7214 				m_freem(rxq->rxq_head);
   7215 			WM_RXCHAIN_RESET(rxq);
   7216 			DPRINTF(WM_DEBUG_RX,
   7217 			    ("%s: RX: Rx buffer allocation failed, "
   7218 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7219 			    rxq->rxq_discard ? " (discard)" : ""));
   7220 			continue;
   7221 		}
   7222 
   7223 		m->m_len = len;
   7224 		rxq->rxq_len += len;
   7225 		DPRINTF(WM_DEBUG_RX,
   7226 		    ("%s: RX: buffer at %p len %d\n",
   7227 		    device_xname(sc->sc_dev), m->m_data, len));
   7228 
   7229 		/* If this is not the end of the packet, keep looking. */
   7230 		if ((status & WRX_ST_EOP) == 0) {
   7231 			WM_RXCHAIN_LINK(rxq, m);
   7232 			DPRINTF(WM_DEBUG_RX,
   7233 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7234 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7235 			continue;
   7236 		}
   7237 
   7238 		/*
   7239 		 * Okay, we have the entire packet now.  The chip is
   7240 		 * configured to include the FCS except I350 and I21[01]
   7241 		 * (not all chips can be configured to strip it),
   7242 		 * so we need to trim it.
   7243 		 * May need to adjust length of previous mbuf in the
   7244 		 * chain if the current mbuf is too short.
   7245 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7246 		 * is always set in I350, so we don't trim it.
   7247 		 */
   7248 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7249 		    && (sc->sc_type != WM_T_I210)
   7250 		    && (sc->sc_type != WM_T_I211)) {
   7251 			if (m->m_len < ETHER_CRC_LEN) {
   7252 				rxq->rxq_tail->m_len
   7253 				    -= (ETHER_CRC_LEN - m->m_len);
   7254 				m->m_len = 0;
   7255 			} else
   7256 				m->m_len -= ETHER_CRC_LEN;
   7257 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7258 		} else
   7259 			len = rxq->rxq_len;
   7260 
   7261 		WM_RXCHAIN_LINK(rxq, m);
   7262 
   7263 		*rxq->rxq_tailp = NULL;
   7264 		m = rxq->rxq_head;
   7265 
   7266 		WM_RXCHAIN_RESET(rxq);
   7267 
   7268 		DPRINTF(WM_DEBUG_RX,
   7269 		    ("%s: RX: have entire packet, len -> %d\n",
   7270 		    device_xname(sc->sc_dev), len));
   7271 
   7272 		/* If an error occurred, update stats and drop the packet. */
   7273 		if (errors &
   7274 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7275 			if (errors & WRX_ER_SE)
   7276 				log(LOG_WARNING, "%s: symbol error\n",
   7277 				    device_xname(sc->sc_dev));
   7278 			else if (errors & WRX_ER_SEQ)
   7279 				log(LOG_WARNING, "%s: receive sequence error\n",
   7280 				    device_xname(sc->sc_dev));
   7281 			else if (errors & WRX_ER_CE)
   7282 				log(LOG_WARNING, "%s: CRC error\n",
   7283 				    device_xname(sc->sc_dev));
   7284 			m_freem(m);
   7285 			continue;
   7286 		}
   7287 
   7288 		/* No errors.  Receive the packet. */
   7289 		m_set_rcvif(m, ifp);
   7290 		m->m_pkthdr.len = len;
   7291 
   7292 		/*
   7293 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7294 		 * for us.  Associate the tag with the packet.
   7295 		 */
   7296 		/* XXXX should check for i350 and i354 */
   7297 		if ((status & WRX_ST_VP) != 0) {
   7298 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7299 		}
   7300 
   7301 		/* Set up checksum info for this packet. */
   7302 		if ((status & WRX_ST_IXSM) == 0) {
   7303 			if (status & WRX_ST_IPCS) {
   7304 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7305 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7306 				if (errors & WRX_ER_IPE)
   7307 					m->m_pkthdr.csum_flags |=
   7308 					    M_CSUM_IPv4_BAD;
   7309 			}
   7310 			if (status & WRX_ST_TCPCS) {
   7311 				/*
   7312 				 * Note: we don't know if this was TCP or UDP,
   7313 				 * so we just set both bits, and expect the
   7314 				 * upper layers to deal.
   7315 				 */
   7316 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7317 				m->m_pkthdr.csum_flags |=
   7318 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7319 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7320 				if (errors & WRX_ER_TCPE)
   7321 					m->m_pkthdr.csum_flags |=
   7322 					    M_CSUM_TCP_UDP_BAD;
   7323 			}
   7324 		}
   7325 
   7326 		ifp->if_ipackets++;
   7327 
   7328 		mutex_exit(rxq->rxq_lock);
   7329 
   7330 		/* Pass this up to any BPF listeners. */
   7331 		bpf_mtap(ifp, m);
   7332 
   7333 		/* Pass it on. */
   7334 		if_percpuq_enqueue(sc->sc_ipq, m);
   7335 
   7336 		mutex_enter(rxq->rxq_lock);
   7337 
   7338 		if (sc->sc_stopping)
   7339 			break;
   7340 	}
   7341 
   7342 	/* Update the receive pointer. */
   7343 	rxq->rxq_ptr = i;
   7344 	if (count != 0)
   7345 		rnd_add_uint32(&sc->rnd_source, count);
   7346 
   7347 	DPRINTF(WM_DEBUG_RX,
   7348 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7349 }
   7350 
   7351 /*
   7352  * wm_linkintr_gmii:
   7353  *
   7354  *	Helper; handle link interrupts for GMII.
   7355  */
   7356 static void
   7357 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7358 {
   7359 
   7360 	KASSERT(WM_CORE_LOCKED(sc));
   7361 
   7362 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7363 		__func__));
   7364 
   7365 	if (icr & ICR_LSC) {
   7366 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7367 
   7368 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7369 			wm_gig_downshift_workaround_ich8lan(sc);
   7370 
   7371 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7372 			device_xname(sc->sc_dev)));
   7373 		mii_pollstat(&sc->sc_mii);
   7374 		if (sc->sc_type == WM_T_82543) {
   7375 			int miistatus, active;
   7376 
   7377 			/*
   7378 			 * With 82543, we need to force speed and
   7379 			 * duplex on the MAC equal to what the PHY
   7380 			 * speed and duplex configuration is.
   7381 			 */
   7382 			miistatus = sc->sc_mii.mii_media_status;
   7383 
   7384 			if (miistatus & IFM_ACTIVE) {
   7385 				active = sc->sc_mii.mii_media_active;
   7386 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7387 				switch (IFM_SUBTYPE(active)) {
   7388 				case IFM_10_T:
   7389 					sc->sc_ctrl |= CTRL_SPEED_10;
   7390 					break;
   7391 				case IFM_100_TX:
   7392 					sc->sc_ctrl |= CTRL_SPEED_100;
   7393 					break;
   7394 				case IFM_1000_T:
   7395 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7396 					break;
   7397 				default:
   7398 					/*
   7399 					 * fiber?
   7400 					 * Shoud not enter here.
   7401 					 */
   7402 					printf("unknown media (%x)\n", active);
   7403 					break;
   7404 				}
   7405 				if (active & IFM_FDX)
   7406 					sc->sc_ctrl |= CTRL_FD;
   7407 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7408 			}
   7409 		} else if ((sc->sc_type == WM_T_ICH8)
   7410 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7411 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7412 		} else if (sc->sc_type == WM_T_PCH) {
   7413 			wm_k1_gig_workaround_hv(sc,
   7414 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7415 		}
   7416 
   7417 		if ((sc->sc_phytype == WMPHY_82578)
   7418 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7419 			== IFM_1000_T)) {
   7420 
   7421 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7422 				delay(200*1000); /* XXX too big */
   7423 
   7424 				/* Link stall fix for link up */
   7425 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7426 				    HV_MUX_DATA_CTRL,
   7427 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7428 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7429 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7430 				    HV_MUX_DATA_CTRL,
   7431 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7432 			}
   7433 		}
   7434 	} else if (icr & ICR_RXSEQ) {
   7435 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7436 			device_xname(sc->sc_dev)));
   7437 	}
   7438 }
   7439 
   7440 /*
   7441  * wm_linkintr_tbi:
   7442  *
   7443  *	Helper; handle link interrupts for TBI mode.
   7444  */
   7445 static void
   7446 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7447 {
   7448 	uint32_t status;
   7449 
   7450 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7451 		__func__));
   7452 
   7453 	status = CSR_READ(sc, WMREG_STATUS);
   7454 	if (icr & ICR_LSC) {
   7455 		if (status & STATUS_LU) {
   7456 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7457 			    device_xname(sc->sc_dev),
   7458 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7459 			/*
   7460 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7461 			 * so we should update sc->sc_ctrl
   7462 			 */
   7463 
   7464 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7465 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7466 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7467 			if (status & STATUS_FD)
   7468 				sc->sc_tctl |=
   7469 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7470 			else
   7471 				sc->sc_tctl |=
   7472 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7473 			if (sc->sc_ctrl & CTRL_TFCE)
   7474 				sc->sc_fcrtl |= FCRTL_XONE;
   7475 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7476 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7477 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7478 				      sc->sc_fcrtl);
   7479 			sc->sc_tbi_linkup = 1;
   7480 		} else {
   7481 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7482 			    device_xname(sc->sc_dev)));
   7483 			sc->sc_tbi_linkup = 0;
   7484 		}
   7485 		/* Update LED */
   7486 		wm_tbi_serdes_set_linkled(sc);
   7487 	} else if (icr & ICR_RXSEQ) {
   7488 		DPRINTF(WM_DEBUG_LINK,
   7489 		    ("%s: LINK: Receive sequence error\n",
   7490 		    device_xname(sc->sc_dev)));
   7491 	}
   7492 }
   7493 
   7494 /*
   7495  * wm_linkintr_serdes:
   7496  *
   7497  *	Helper; handle link interrupts for TBI mode.
   7498  */
   7499 static void
   7500 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7501 {
   7502 	struct mii_data *mii = &sc->sc_mii;
   7503 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7504 	uint32_t pcs_adv, pcs_lpab, reg;
   7505 
   7506 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7507 		__func__));
   7508 
   7509 	if (icr & ICR_LSC) {
   7510 		/* Check PCS */
   7511 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7512 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7513 			mii->mii_media_status |= IFM_ACTIVE;
   7514 			sc->sc_tbi_linkup = 1;
   7515 		} else {
   7516 			mii->mii_media_status |= IFM_NONE;
   7517 			sc->sc_tbi_linkup = 0;
   7518 			wm_tbi_serdes_set_linkled(sc);
   7519 			return;
   7520 		}
   7521 		mii->mii_media_active |= IFM_1000_SX;
   7522 		if ((reg & PCS_LSTS_FDX) != 0)
   7523 			mii->mii_media_active |= IFM_FDX;
   7524 		else
   7525 			mii->mii_media_active |= IFM_HDX;
   7526 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7527 			/* Check flow */
   7528 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7529 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7530 				DPRINTF(WM_DEBUG_LINK,
   7531 				    ("XXX LINKOK but not ACOMP\n"));
   7532 				return;
   7533 			}
   7534 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7535 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7536 			DPRINTF(WM_DEBUG_LINK,
   7537 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7538 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7539 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7540 				mii->mii_media_active |= IFM_FLOW
   7541 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7542 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7543 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7544 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7545 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7546 				mii->mii_media_active |= IFM_FLOW
   7547 				    | IFM_ETH_TXPAUSE;
   7548 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7549 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7550 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7551 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7552 				mii->mii_media_active |= IFM_FLOW
   7553 				    | IFM_ETH_RXPAUSE;
   7554 		}
   7555 		/* Update LED */
   7556 		wm_tbi_serdes_set_linkled(sc);
   7557 	} else {
   7558 		DPRINTF(WM_DEBUG_LINK,
   7559 		    ("%s: LINK: Receive sequence error\n",
   7560 		    device_xname(sc->sc_dev)));
   7561 	}
   7562 }
   7563 
   7564 /*
   7565  * wm_linkintr:
   7566  *
   7567  *	Helper; handle link interrupts.
   7568  */
   7569 static void
   7570 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7571 {
   7572 
   7573 	KASSERT(WM_CORE_LOCKED(sc));
   7574 
   7575 	if (sc->sc_flags & WM_F_HAS_MII)
   7576 		wm_linkintr_gmii(sc, icr);
   7577 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7578 	    && (sc->sc_type >= WM_T_82575))
   7579 		wm_linkintr_serdes(sc, icr);
   7580 	else
   7581 		wm_linkintr_tbi(sc, icr);
   7582 }
   7583 
   7584 /*
   7585  * wm_intr_legacy:
   7586  *
   7587  *	Interrupt service routine for INTx and MSI.
   7588  */
   7589 static int
   7590 wm_intr_legacy(void *arg)
   7591 {
   7592 	struct wm_softc *sc = arg;
   7593 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7594 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7595 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7596 	uint32_t icr, rndval = 0;
   7597 	int handled = 0;
   7598 
   7599 	DPRINTF(WM_DEBUG_TX,
   7600 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7601 	while (1 /* CONSTCOND */) {
   7602 		icr = CSR_READ(sc, WMREG_ICR);
   7603 		if ((icr & sc->sc_icr) == 0)
   7604 			break;
   7605 		if (rndval == 0)
   7606 			rndval = icr;
   7607 
   7608 		mutex_enter(rxq->rxq_lock);
   7609 
   7610 		if (sc->sc_stopping) {
   7611 			mutex_exit(rxq->rxq_lock);
   7612 			break;
   7613 		}
   7614 
   7615 		handled = 1;
   7616 
   7617 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7618 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7619 			DPRINTF(WM_DEBUG_RX,
   7620 			    ("%s: RX: got Rx intr 0x%08x\n",
   7621 			    device_xname(sc->sc_dev),
   7622 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7623 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7624 		}
   7625 #endif
   7626 		wm_rxeof(rxq);
   7627 
   7628 		mutex_exit(rxq->rxq_lock);
   7629 		mutex_enter(txq->txq_lock);
   7630 
   7631 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7632 		if (icr & ICR_TXDW) {
   7633 			DPRINTF(WM_DEBUG_TX,
   7634 			    ("%s: TX: got TXDW interrupt\n",
   7635 			    device_xname(sc->sc_dev)));
   7636 			WM_Q_EVCNT_INCR(txq, txdw);
   7637 		}
   7638 #endif
   7639 		wm_txeof(sc, txq);
   7640 
   7641 		mutex_exit(txq->txq_lock);
   7642 		WM_CORE_LOCK(sc);
   7643 
   7644 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7645 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7646 			wm_linkintr(sc, icr);
   7647 		}
   7648 
   7649 		WM_CORE_UNLOCK(sc);
   7650 
   7651 		if (icr & ICR_RXO) {
   7652 #if defined(WM_DEBUG)
   7653 			log(LOG_WARNING, "%s: Receive overrun\n",
   7654 			    device_xname(sc->sc_dev));
   7655 #endif /* defined(WM_DEBUG) */
   7656 		}
   7657 	}
   7658 
   7659 	rnd_add_uint32(&sc->rnd_source, rndval);
   7660 
   7661 	if (handled) {
   7662 		/* Try to get more packets going. */
   7663 		ifp->if_start(ifp);
   7664 	}
   7665 
   7666 	return handled;
   7667 }
   7668 
   7669 static int
   7670 wm_txrxintr_msix(void *arg)
   7671 {
   7672 	struct wm_queue *wmq = arg;
   7673 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7674 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7675 	struct wm_softc *sc = txq->txq_sc;
   7676 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7677 
   7678 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7679 
   7680 	DPRINTF(WM_DEBUG_TX,
   7681 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7682 
   7683 	if (sc->sc_type == WM_T_82574)
   7684 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7685 	else if (sc->sc_type == WM_T_82575)
   7686 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7687 	else
   7688 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7689 
   7690 	if (!sc->sc_stopping) {
   7691 		mutex_enter(txq->txq_lock);
   7692 
   7693 		WM_Q_EVCNT_INCR(txq, txdw);
   7694 		wm_txeof(sc, txq);
   7695 
   7696 		/* Try to get more packets going. */
   7697 		if (pcq_peek(txq->txq_interq) != NULL)
   7698 			wm_nq_transmit_locked(ifp, txq);
   7699 		/*
   7700 		 * There are still some upper layer processing which call
   7701 		 * ifp->if_start(). e.g. ALTQ
   7702 		 */
   7703 		if (wmq->wmq_id == 0) {
   7704 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7705 				wm_nq_start_locked(ifp);
   7706 		}
   7707 		mutex_exit(txq->txq_lock);
   7708 	}
   7709 
   7710 	DPRINTF(WM_DEBUG_RX,
   7711 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7712 
   7713 	if (!sc->sc_stopping) {
   7714 		mutex_enter(rxq->rxq_lock);
   7715 		WM_Q_EVCNT_INCR(rxq, rxintr);
   7716 		wm_rxeof(rxq);
   7717 		mutex_exit(rxq->rxq_lock);
   7718 	}
   7719 
   7720 	if (sc->sc_type == WM_T_82574)
   7721 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7722 	else if (sc->sc_type == WM_T_82575)
   7723 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7724 	else
   7725 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7726 
   7727 	return 1;
   7728 }
   7729 
   7730 /*
   7731  * wm_linkintr_msix:
   7732  *
   7733  *	Interrupt service routine for link status change for MSI-X.
   7734  */
   7735 static int
   7736 wm_linkintr_msix(void *arg)
   7737 {
   7738 	struct wm_softc *sc = arg;
   7739 	uint32_t reg;
   7740 
   7741 	DPRINTF(WM_DEBUG_LINK,
   7742 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7743 
   7744 	reg = CSR_READ(sc, WMREG_ICR);
   7745 	WM_CORE_LOCK(sc);
   7746 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7747 		goto out;
   7748 
   7749 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7750 	wm_linkintr(sc, ICR_LSC);
   7751 
   7752 out:
   7753 	WM_CORE_UNLOCK(sc);
   7754 
   7755 	if (sc->sc_type == WM_T_82574)
   7756 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7757 	else if (sc->sc_type == WM_T_82575)
   7758 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7759 	else
   7760 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7761 
   7762 	return 1;
   7763 }
   7764 
   7765 /*
   7766  * Media related.
   7767  * GMII, SGMII, TBI (and SERDES)
   7768  */
   7769 
   7770 /* Common */
   7771 
   7772 /*
   7773  * wm_tbi_serdes_set_linkled:
   7774  *
   7775  *	Update the link LED on TBI and SERDES devices.
   7776  */
   7777 static void
   7778 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7779 {
   7780 
   7781 	if (sc->sc_tbi_linkup)
   7782 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7783 	else
   7784 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7785 
   7786 	/* 82540 or newer devices are active low */
   7787 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7788 
   7789 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7790 }
   7791 
   7792 /* GMII related */
   7793 
   7794 /*
   7795  * wm_gmii_reset:
   7796  *
   7797  *	Reset the PHY.
   7798  */
   7799 static void
   7800 wm_gmii_reset(struct wm_softc *sc)
   7801 {
   7802 	uint32_t reg;
   7803 	int rv;
   7804 
   7805 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7806 		device_xname(sc->sc_dev), __func__));
   7807 
   7808 	/* Get phy semaphore */
   7809 	switch (sc->sc_type) {
   7810 	case WM_T_82571:
   7811 	case WM_T_82572:
   7812 	case WM_T_82573:
   7813 	case WM_T_82574:
   7814 	case WM_T_82583:
   7815 		 /* XXX should get sw semaphore, too */
   7816 		rv = wm_get_swsm_semaphore(sc);
   7817 		break;
   7818 	case WM_T_82575:
   7819 	case WM_T_82576:
   7820 	case WM_T_82580:
   7821 	case WM_T_I350:
   7822 	case WM_T_I354:
   7823 	case WM_T_I210:
   7824 	case WM_T_I211:
   7825 	case WM_T_80003:
   7826 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7827 		break;
   7828 	case WM_T_ICH8:
   7829 	case WM_T_ICH9:
   7830 	case WM_T_ICH10:
   7831 	case WM_T_PCH:
   7832 	case WM_T_PCH2:
   7833 	case WM_T_PCH_LPT:
   7834 	case WM_T_PCH_SPT:
   7835 		rv = wm_get_swfwhw_semaphore(sc);
   7836 		break;
   7837 	default:
   7838 		/* nothing to do*/
   7839 		rv = 0;
   7840 		break;
   7841 	}
   7842 	if (rv != 0) {
   7843 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7844 		    __func__);
   7845 		return;
   7846 	}
   7847 
   7848 	switch (sc->sc_type) {
   7849 	case WM_T_82542_2_0:
   7850 	case WM_T_82542_2_1:
   7851 		/* null */
   7852 		break;
   7853 	case WM_T_82543:
   7854 		/*
   7855 		 * With 82543, we need to force speed and duplex on the MAC
   7856 		 * equal to what the PHY speed and duplex configuration is.
   7857 		 * In addition, we need to perform a hardware reset on the PHY
   7858 		 * to take it out of reset.
   7859 		 */
   7860 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7861 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7862 
   7863 		/* The PHY reset pin is active-low. */
   7864 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7865 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7866 		    CTRL_EXT_SWDPIN(4));
   7867 		reg |= CTRL_EXT_SWDPIO(4);
   7868 
   7869 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7870 		CSR_WRITE_FLUSH(sc);
   7871 		delay(10*1000);
   7872 
   7873 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7874 		CSR_WRITE_FLUSH(sc);
   7875 		delay(150);
   7876 #if 0
   7877 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7878 #endif
   7879 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7880 		break;
   7881 	case WM_T_82544:	/* reset 10000us */
   7882 	case WM_T_82540:
   7883 	case WM_T_82545:
   7884 	case WM_T_82545_3:
   7885 	case WM_T_82546:
   7886 	case WM_T_82546_3:
   7887 	case WM_T_82541:
   7888 	case WM_T_82541_2:
   7889 	case WM_T_82547:
   7890 	case WM_T_82547_2:
   7891 	case WM_T_82571:	/* reset 100us */
   7892 	case WM_T_82572:
   7893 	case WM_T_82573:
   7894 	case WM_T_82574:
   7895 	case WM_T_82575:
   7896 	case WM_T_82576:
   7897 	case WM_T_82580:
   7898 	case WM_T_I350:
   7899 	case WM_T_I354:
   7900 	case WM_T_I210:
   7901 	case WM_T_I211:
   7902 	case WM_T_82583:
   7903 	case WM_T_80003:
   7904 		/* generic reset */
   7905 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7906 		CSR_WRITE_FLUSH(sc);
   7907 		delay(20000);
   7908 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7909 		CSR_WRITE_FLUSH(sc);
   7910 		delay(20000);
   7911 
   7912 		if ((sc->sc_type == WM_T_82541)
   7913 		    || (sc->sc_type == WM_T_82541_2)
   7914 		    || (sc->sc_type == WM_T_82547)
   7915 		    || (sc->sc_type == WM_T_82547_2)) {
   7916 			/* workaround for igp are done in igp_reset() */
   7917 			/* XXX add code to set LED after phy reset */
   7918 		}
   7919 		break;
   7920 	case WM_T_ICH8:
   7921 	case WM_T_ICH9:
   7922 	case WM_T_ICH10:
   7923 	case WM_T_PCH:
   7924 	case WM_T_PCH2:
   7925 	case WM_T_PCH_LPT:
   7926 	case WM_T_PCH_SPT:
   7927 		/* generic reset */
   7928 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7929 		CSR_WRITE_FLUSH(sc);
   7930 		delay(100);
   7931 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7932 		CSR_WRITE_FLUSH(sc);
   7933 		delay(150);
   7934 		break;
   7935 	default:
   7936 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7937 		    __func__);
   7938 		break;
   7939 	}
   7940 
   7941 	/* release PHY semaphore */
   7942 	switch (sc->sc_type) {
   7943 	case WM_T_82571:
   7944 	case WM_T_82572:
   7945 	case WM_T_82573:
   7946 	case WM_T_82574:
   7947 	case WM_T_82583:
   7948 		 /* XXX should put sw semaphore, too */
   7949 		wm_put_swsm_semaphore(sc);
   7950 		break;
   7951 	case WM_T_82575:
   7952 	case WM_T_82576:
   7953 	case WM_T_82580:
   7954 	case WM_T_I350:
   7955 	case WM_T_I354:
   7956 	case WM_T_I210:
   7957 	case WM_T_I211:
   7958 	case WM_T_80003:
   7959 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7960 		break;
   7961 	case WM_T_ICH8:
   7962 	case WM_T_ICH9:
   7963 	case WM_T_ICH10:
   7964 	case WM_T_PCH:
   7965 	case WM_T_PCH2:
   7966 	case WM_T_PCH_LPT:
   7967 	case WM_T_PCH_SPT:
   7968 		wm_put_swfwhw_semaphore(sc);
   7969 		break;
   7970 	default:
   7971 		/* nothing to do */
   7972 		rv = 0;
   7973 		break;
   7974 	}
   7975 
   7976 	/* get_cfg_done */
   7977 	wm_get_cfg_done(sc);
   7978 
   7979 	/* extra setup */
   7980 	switch (sc->sc_type) {
   7981 	case WM_T_82542_2_0:
   7982 	case WM_T_82542_2_1:
   7983 	case WM_T_82543:
   7984 	case WM_T_82544:
   7985 	case WM_T_82540:
   7986 	case WM_T_82545:
   7987 	case WM_T_82545_3:
   7988 	case WM_T_82546:
   7989 	case WM_T_82546_3:
   7990 	case WM_T_82541_2:
   7991 	case WM_T_82547_2:
   7992 	case WM_T_82571:
   7993 	case WM_T_82572:
   7994 	case WM_T_82573:
   7995 	case WM_T_82575:
   7996 	case WM_T_82576:
   7997 	case WM_T_82580:
   7998 	case WM_T_I350:
   7999 	case WM_T_I354:
   8000 	case WM_T_I210:
   8001 	case WM_T_I211:
   8002 	case WM_T_80003:
   8003 		/* null */
   8004 		break;
   8005 	case WM_T_82574:
   8006 	case WM_T_82583:
   8007 		wm_lplu_d0_disable(sc);
   8008 		break;
   8009 	case WM_T_82541:
   8010 	case WM_T_82547:
   8011 		/* XXX Configure actively LED after PHY reset */
   8012 		break;
   8013 	case WM_T_ICH8:
   8014 	case WM_T_ICH9:
   8015 	case WM_T_ICH10:
   8016 	case WM_T_PCH:
   8017 	case WM_T_PCH2:
   8018 	case WM_T_PCH_LPT:
   8019 	case WM_T_PCH_SPT:
   8020 		/* Allow time for h/w to get to a quiescent state afer reset */
   8021 		delay(10*1000);
   8022 
   8023 		if (sc->sc_type == WM_T_PCH)
   8024 			wm_hv_phy_workaround_ich8lan(sc);
   8025 
   8026 		if (sc->sc_type == WM_T_PCH2)
   8027 			wm_lv_phy_workaround_ich8lan(sc);
   8028 
   8029 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   8030 			/*
   8031 			 * dummy read to clear the phy wakeup bit after lcd
   8032 			 * reset
   8033 			 */
   8034 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   8035 		}
   8036 
   8037 		/*
   8038 		 * XXX Configure the LCD with th extended configuration region
   8039 		 * in NVM
   8040 		 */
   8041 
   8042 		/* Disable D0 LPLU. */
   8043 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8044 			wm_lplu_d0_disable_pch(sc);
   8045 		else
   8046 			wm_lplu_d0_disable(sc);	/* ICH* */
   8047 		break;
   8048 	default:
   8049 		panic("%s: unknown type\n", __func__);
   8050 		break;
   8051 	}
   8052 }
   8053 
   8054 /*
   8055  * wm_get_phy_id_82575:
   8056  *
   8057  * Return PHY ID. Return -1 if it failed.
   8058  */
   8059 static int
   8060 wm_get_phy_id_82575(struct wm_softc *sc)
   8061 {
   8062 	uint32_t reg;
   8063 	int phyid = -1;
   8064 
   8065 	/* XXX */
   8066 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8067 		return -1;
   8068 
   8069 	if (wm_sgmii_uses_mdio(sc)) {
   8070 		switch (sc->sc_type) {
   8071 		case WM_T_82575:
   8072 		case WM_T_82576:
   8073 			reg = CSR_READ(sc, WMREG_MDIC);
   8074 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8075 			break;
   8076 		case WM_T_82580:
   8077 		case WM_T_I350:
   8078 		case WM_T_I354:
   8079 		case WM_T_I210:
   8080 		case WM_T_I211:
   8081 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8082 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8083 			break;
   8084 		default:
   8085 			return -1;
   8086 		}
   8087 	}
   8088 
   8089 	return phyid;
   8090 }
   8091 
   8092 
   8093 /*
   8094  * wm_gmii_mediainit:
   8095  *
   8096  *	Initialize media for use on 1000BASE-T devices.
   8097  */
   8098 static void
   8099 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8100 {
   8101 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8102 	struct mii_data *mii = &sc->sc_mii;
   8103 	uint32_t reg;
   8104 
   8105 	/* We have GMII. */
   8106 	sc->sc_flags |= WM_F_HAS_MII;
   8107 
   8108 	if (sc->sc_type == WM_T_80003)
   8109 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8110 	else
   8111 		sc->sc_tipg = TIPG_1000T_DFLT;
   8112 
   8113 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8114 	if ((sc->sc_type == WM_T_82580)
   8115 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8116 	    || (sc->sc_type == WM_T_I211)) {
   8117 		reg = CSR_READ(sc, WMREG_PHPM);
   8118 		reg &= ~PHPM_GO_LINK_D;
   8119 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8120 	}
   8121 
   8122 	/*
   8123 	 * Let the chip set speed/duplex on its own based on
   8124 	 * signals from the PHY.
   8125 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8126 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8127 	 */
   8128 	sc->sc_ctrl |= CTRL_SLU;
   8129 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8130 
   8131 	/* Initialize our media structures and probe the GMII. */
   8132 	mii->mii_ifp = ifp;
   8133 
   8134 	/*
   8135 	 * Determine the PHY access method.
   8136 	 *
   8137 	 *  For SGMII, use SGMII specific method.
   8138 	 *
   8139 	 *  For some devices, we can determine the PHY access method
   8140 	 * from sc_type.
   8141 	 *
   8142 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8143 	 * access  method by sc_type, so use the PCI product ID for some
   8144 	 * devices.
   8145 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8146 	 * can't detect, then use bm's method.
   8147 	 */
   8148 	switch (prodid) {
   8149 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8150 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8151 		/* 82577 */
   8152 		sc->sc_phytype = WMPHY_82577;
   8153 		break;
   8154 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8155 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8156 		/* 82578 */
   8157 		sc->sc_phytype = WMPHY_82578;
   8158 		break;
   8159 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8160 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8161 		/* 82579 */
   8162 		sc->sc_phytype = WMPHY_82579;
   8163 		break;
   8164 	case PCI_PRODUCT_INTEL_82801I_BM:
   8165 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8166 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8167 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8168 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8169 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8170 		/* 82567 */
   8171 		sc->sc_phytype = WMPHY_BM;
   8172 		mii->mii_readreg = wm_gmii_bm_readreg;
   8173 		mii->mii_writereg = wm_gmii_bm_writereg;
   8174 		break;
   8175 	default:
   8176 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8177 		    && !wm_sgmii_uses_mdio(sc)){
   8178 			/* SGMII */
   8179 			mii->mii_readreg = wm_sgmii_readreg;
   8180 			mii->mii_writereg = wm_sgmii_writereg;
   8181 		} else if (sc->sc_type >= WM_T_80003) {
   8182 			/* 80003 */
   8183 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8184 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8185 		} else if (sc->sc_type >= WM_T_I210) {
   8186 			/* I210 and I211 */
   8187 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8188 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8189 		} else if (sc->sc_type >= WM_T_82580) {
   8190 			/* 82580, I350 and I354 */
   8191 			sc->sc_phytype = WMPHY_82580;
   8192 			mii->mii_readreg = wm_gmii_82580_readreg;
   8193 			mii->mii_writereg = wm_gmii_82580_writereg;
   8194 		} else if (sc->sc_type >= WM_T_82544) {
   8195 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8196 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8197 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8198 		} else {
   8199 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8200 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8201 		}
   8202 		break;
   8203 	}
   8204 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8205 		/* All PCH* use _hv_ */
   8206 		mii->mii_readreg = wm_gmii_hv_readreg;
   8207 		mii->mii_writereg = wm_gmii_hv_writereg;
   8208 	}
   8209 	mii->mii_statchg = wm_gmii_statchg;
   8210 
   8211 	wm_gmii_reset(sc);
   8212 
   8213 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8214 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8215 	    wm_gmii_mediastatus);
   8216 
   8217 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8218 	    || (sc->sc_type == WM_T_82580)
   8219 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8220 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8221 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8222 			/* Attach only one port */
   8223 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8224 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8225 		} else {
   8226 			int i, id;
   8227 			uint32_t ctrl_ext;
   8228 
   8229 			id = wm_get_phy_id_82575(sc);
   8230 			if (id != -1) {
   8231 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8232 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8233 			}
   8234 			if ((id == -1)
   8235 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8236 				/* Power on sgmii phy if it is disabled */
   8237 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8238 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8239 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8240 				CSR_WRITE_FLUSH(sc);
   8241 				delay(300*1000); /* XXX too long */
   8242 
   8243 				/* from 1 to 8 */
   8244 				for (i = 1; i < 8; i++)
   8245 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8246 					    0xffffffff, i, MII_OFFSET_ANY,
   8247 					    MIIF_DOPAUSE);
   8248 
   8249 				/* restore previous sfp cage power state */
   8250 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8251 			}
   8252 		}
   8253 	} else {
   8254 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8255 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8256 	}
   8257 
   8258 	/*
   8259 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8260 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8261 	 */
   8262 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8263 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8264 		wm_set_mdio_slow_mode_hv(sc);
   8265 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8266 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8267 	}
   8268 
   8269 	/*
   8270 	 * (For ICH8 variants)
   8271 	 * If PHY detection failed, use BM's r/w function and retry.
   8272 	 */
   8273 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8274 		/* if failed, retry with *_bm_* */
   8275 		mii->mii_readreg = wm_gmii_bm_readreg;
   8276 		mii->mii_writereg = wm_gmii_bm_writereg;
   8277 
   8278 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8279 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8280 	}
   8281 
   8282 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8283 		/* Any PHY wasn't find */
   8284 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8285 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8286 		sc->sc_phytype = WMPHY_NONE;
   8287 	} else {
   8288 		/*
   8289 		 * PHY Found!
   8290 		 * Check PHY type.
   8291 		 */
   8292 		uint32_t model;
   8293 		struct mii_softc *child;
   8294 
   8295 		child = LIST_FIRST(&mii->mii_phys);
   8296 		model = child->mii_mpd_model;
   8297 		if (model == MII_MODEL_yyINTEL_I82566)
   8298 			sc->sc_phytype = WMPHY_IGP_3;
   8299 
   8300 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8301 	}
   8302 }
   8303 
   8304 /*
   8305  * wm_gmii_mediachange:	[ifmedia interface function]
   8306  *
   8307  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8308  */
   8309 static int
   8310 wm_gmii_mediachange(struct ifnet *ifp)
   8311 {
   8312 	struct wm_softc *sc = ifp->if_softc;
   8313 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8314 	int rc;
   8315 
   8316 	if ((ifp->if_flags & IFF_UP) == 0)
   8317 		return 0;
   8318 
   8319 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8320 	sc->sc_ctrl |= CTRL_SLU;
   8321 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8322 	    || (sc->sc_type > WM_T_82543)) {
   8323 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8324 	} else {
   8325 		sc->sc_ctrl &= ~CTRL_ASDE;
   8326 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8327 		if (ife->ifm_media & IFM_FDX)
   8328 			sc->sc_ctrl |= CTRL_FD;
   8329 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8330 		case IFM_10_T:
   8331 			sc->sc_ctrl |= CTRL_SPEED_10;
   8332 			break;
   8333 		case IFM_100_TX:
   8334 			sc->sc_ctrl |= CTRL_SPEED_100;
   8335 			break;
   8336 		case IFM_1000_T:
   8337 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8338 			break;
   8339 		default:
   8340 			panic("wm_gmii_mediachange: bad media 0x%x",
   8341 			    ife->ifm_media);
   8342 		}
   8343 	}
   8344 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8345 	if (sc->sc_type <= WM_T_82543)
   8346 		wm_gmii_reset(sc);
   8347 
   8348 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8349 		return 0;
   8350 	return rc;
   8351 }
   8352 
   8353 /*
   8354  * wm_gmii_mediastatus:	[ifmedia interface function]
   8355  *
   8356  *	Get the current interface media status on a 1000BASE-T device.
   8357  */
   8358 static void
   8359 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8360 {
   8361 	struct wm_softc *sc = ifp->if_softc;
   8362 
   8363 	ether_mediastatus(ifp, ifmr);
   8364 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8365 	    | sc->sc_flowflags;
   8366 }
   8367 
   8368 #define	MDI_IO		CTRL_SWDPIN(2)
   8369 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8370 #define	MDI_CLK		CTRL_SWDPIN(3)
   8371 
   8372 static void
   8373 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8374 {
   8375 	uint32_t i, v;
   8376 
   8377 	v = CSR_READ(sc, WMREG_CTRL);
   8378 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8379 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8380 
   8381 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8382 		if (data & i)
   8383 			v |= MDI_IO;
   8384 		else
   8385 			v &= ~MDI_IO;
   8386 		CSR_WRITE(sc, WMREG_CTRL, v);
   8387 		CSR_WRITE_FLUSH(sc);
   8388 		delay(10);
   8389 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8390 		CSR_WRITE_FLUSH(sc);
   8391 		delay(10);
   8392 		CSR_WRITE(sc, WMREG_CTRL, v);
   8393 		CSR_WRITE_FLUSH(sc);
   8394 		delay(10);
   8395 	}
   8396 }
   8397 
   8398 static uint32_t
   8399 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8400 {
   8401 	uint32_t v, i, data = 0;
   8402 
   8403 	v = CSR_READ(sc, WMREG_CTRL);
   8404 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8405 	v |= CTRL_SWDPIO(3);
   8406 
   8407 	CSR_WRITE(sc, WMREG_CTRL, v);
   8408 	CSR_WRITE_FLUSH(sc);
   8409 	delay(10);
   8410 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8411 	CSR_WRITE_FLUSH(sc);
   8412 	delay(10);
   8413 	CSR_WRITE(sc, WMREG_CTRL, v);
   8414 	CSR_WRITE_FLUSH(sc);
   8415 	delay(10);
   8416 
   8417 	for (i = 0; i < 16; i++) {
   8418 		data <<= 1;
   8419 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8420 		CSR_WRITE_FLUSH(sc);
   8421 		delay(10);
   8422 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8423 			data |= 1;
   8424 		CSR_WRITE(sc, WMREG_CTRL, v);
   8425 		CSR_WRITE_FLUSH(sc);
   8426 		delay(10);
   8427 	}
   8428 
   8429 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8430 	CSR_WRITE_FLUSH(sc);
   8431 	delay(10);
   8432 	CSR_WRITE(sc, WMREG_CTRL, v);
   8433 	CSR_WRITE_FLUSH(sc);
   8434 	delay(10);
   8435 
   8436 	return data;
   8437 }
   8438 
   8439 #undef MDI_IO
   8440 #undef MDI_DIR
   8441 #undef MDI_CLK
   8442 
   8443 /*
   8444  * wm_gmii_i82543_readreg:	[mii interface function]
   8445  *
   8446  *	Read a PHY register on the GMII (i82543 version).
   8447  */
   8448 static int
   8449 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8450 {
   8451 	struct wm_softc *sc = device_private(self);
   8452 	int rv;
   8453 
   8454 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8455 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8456 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8457 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8458 
   8459 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8460 	    device_xname(sc->sc_dev), phy, reg, rv));
   8461 
   8462 	return rv;
   8463 }
   8464 
   8465 /*
   8466  * wm_gmii_i82543_writereg:	[mii interface function]
   8467  *
   8468  *	Write a PHY register on the GMII (i82543 version).
   8469  */
   8470 static void
   8471 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8472 {
   8473 	struct wm_softc *sc = device_private(self);
   8474 
   8475 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8476 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8477 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8478 	    (MII_COMMAND_START << 30), 32);
   8479 }
   8480 
   8481 /*
   8482  * wm_gmii_i82544_readreg:	[mii interface function]
   8483  *
   8484  *	Read a PHY register on the GMII.
   8485  */
   8486 static int
   8487 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8488 {
   8489 	struct wm_softc *sc = device_private(self);
   8490 	uint32_t mdic = 0;
   8491 	int i, rv;
   8492 
   8493 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8494 	    MDIC_REGADD(reg));
   8495 
   8496 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8497 		mdic = CSR_READ(sc, WMREG_MDIC);
   8498 		if (mdic & MDIC_READY)
   8499 			break;
   8500 		delay(50);
   8501 	}
   8502 
   8503 	if ((mdic & MDIC_READY) == 0) {
   8504 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8505 		    device_xname(sc->sc_dev), phy, reg);
   8506 		rv = 0;
   8507 	} else if (mdic & MDIC_E) {
   8508 #if 0 /* This is normal if no PHY is present. */
   8509 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8510 		    device_xname(sc->sc_dev), phy, reg);
   8511 #endif
   8512 		rv = 0;
   8513 	} else {
   8514 		rv = MDIC_DATA(mdic);
   8515 		if (rv == 0xffff)
   8516 			rv = 0;
   8517 	}
   8518 
   8519 	return rv;
   8520 }
   8521 
   8522 /*
   8523  * wm_gmii_i82544_writereg:	[mii interface function]
   8524  *
   8525  *	Write a PHY register on the GMII.
   8526  */
   8527 static void
   8528 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8529 {
   8530 	struct wm_softc *sc = device_private(self);
   8531 	uint32_t mdic = 0;
   8532 	int i;
   8533 
   8534 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8535 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8536 
   8537 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8538 		mdic = CSR_READ(sc, WMREG_MDIC);
   8539 		if (mdic & MDIC_READY)
   8540 			break;
   8541 		delay(50);
   8542 	}
   8543 
   8544 	if ((mdic & MDIC_READY) == 0)
   8545 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8546 		    device_xname(sc->sc_dev), phy, reg);
   8547 	else if (mdic & MDIC_E)
   8548 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8549 		    device_xname(sc->sc_dev), phy, reg);
   8550 }
   8551 
   8552 /*
   8553  * wm_gmii_i80003_readreg:	[mii interface function]
   8554  *
   8555  *	Read a PHY register on the kumeran
   8556  * This could be handled by the PHY layer if we didn't have to lock the
   8557  * ressource ...
   8558  */
   8559 static int
   8560 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8561 {
   8562 	struct wm_softc *sc = device_private(self);
   8563 	int sem;
   8564 	int rv;
   8565 
   8566 	if (phy != 1) /* only one PHY on kumeran bus */
   8567 		return 0;
   8568 
   8569 	sem = swfwphysem[sc->sc_funcid];
   8570 	if (wm_get_swfw_semaphore(sc, sem)) {
   8571 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8572 		    __func__);
   8573 		return 0;
   8574 	}
   8575 
   8576 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8577 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8578 		    reg >> GG82563_PAGE_SHIFT);
   8579 	} else {
   8580 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8581 		    reg >> GG82563_PAGE_SHIFT);
   8582 	}
   8583 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8584 	delay(200);
   8585 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8586 	delay(200);
   8587 
   8588 	wm_put_swfw_semaphore(sc, sem);
   8589 	return rv;
   8590 }
   8591 
   8592 /*
   8593  * wm_gmii_i80003_writereg:	[mii interface function]
   8594  *
   8595  *	Write a PHY register on the kumeran.
   8596  * This could be handled by the PHY layer if we didn't have to lock the
   8597  * ressource ...
   8598  */
   8599 static void
   8600 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8601 {
   8602 	struct wm_softc *sc = device_private(self);
   8603 	int sem;
   8604 
   8605 	if (phy != 1) /* only one PHY on kumeran bus */
   8606 		return;
   8607 
   8608 	sem = swfwphysem[sc->sc_funcid];
   8609 	if (wm_get_swfw_semaphore(sc, sem)) {
   8610 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8611 		    __func__);
   8612 		return;
   8613 	}
   8614 
   8615 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8616 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8617 		    reg >> GG82563_PAGE_SHIFT);
   8618 	} else {
   8619 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8620 		    reg >> GG82563_PAGE_SHIFT);
   8621 	}
   8622 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8623 	delay(200);
   8624 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8625 	delay(200);
   8626 
   8627 	wm_put_swfw_semaphore(sc, sem);
   8628 }
   8629 
   8630 /*
   8631  * wm_gmii_bm_readreg:	[mii interface function]
   8632  *
   8633  *	Read a PHY register on the kumeran
   8634  * This could be handled by the PHY layer if we didn't have to lock the
   8635  * ressource ...
   8636  */
   8637 static int
   8638 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8639 {
   8640 	struct wm_softc *sc = device_private(self);
   8641 	int sem;
   8642 	int rv;
   8643 
   8644 	sem = swfwphysem[sc->sc_funcid];
   8645 	if (wm_get_swfw_semaphore(sc, sem)) {
   8646 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8647 		    __func__);
   8648 		return 0;
   8649 	}
   8650 
   8651 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8652 		if (phy == 1)
   8653 			wm_gmii_i82544_writereg(self, phy,
   8654 			    MII_IGPHY_PAGE_SELECT, reg);
   8655 		else
   8656 			wm_gmii_i82544_writereg(self, phy,
   8657 			    GG82563_PHY_PAGE_SELECT,
   8658 			    reg >> GG82563_PAGE_SHIFT);
   8659 	}
   8660 
   8661 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8662 	wm_put_swfw_semaphore(sc, sem);
   8663 	return rv;
   8664 }
   8665 
   8666 /*
   8667  * wm_gmii_bm_writereg:	[mii interface function]
   8668  *
   8669  *	Write a PHY register on the kumeran.
   8670  * This could be handled by the PHY layer if we didn't have to lock the
   8671  * ressource ...
   8672  */
   8673 static void
   8674 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8675 {
   8676 	struct wm_softc *sc = device_private(self);
   8677 	int sem;
   8678 
   8679 	sem = swfwphysem[sc->sc_funcid];
   8680 	if (wm_get_swfw_semaphore(sc, sem)) {
   8681 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8682 		    __func__);
   8683 		return;
   8684 	}
   8685 
   8686 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8687 		if (phy == 1)
   8688 			wm_gmii_i82544_writereg(self, phy,
   8689 			    MII_IGPHY_PAGE_SELECT, reg);
   8690 		else
   8691 			wm_gmii_i82544_writereg(self, phy,
   8692 			    GG82563_PHY_PAGE_SELECT,
   8693 			    reg >> GG82563_PAGE_SHIFT);
   8694 	}
   8695 
   8696 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8697 	wm_put_swfw_semaphore(sc, sem);
   8698 }
   8699 
   8700 static void
   8701 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8702 {
   8703 	struct wm_softc *sc = device_private(self);
   8704 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8705 	uint16_t wuce;
   8706 
   8707 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8708 	if (sc->sc_type == WM_T_PCH) {
   8709 		/* XXX e1000 driver do nothing... why? */
   8710 	}
   8711 
   8712 	/* Set page 769 */
   8713 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8714 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8715 
   8716 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8717 
   8718 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8719 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8720 	    wuce | BM_WUC_ENABLE_BIT);
   8721 
   8722 	/* Select page 800 */
   8723 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8724 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8725 
   8726 	/* Write page 800 */
   8727 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8728 
   8729 	if (rd)
   8730 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8731 	else
   8732 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8733 
   8734 	/* Set page 769 */
   8735 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8736 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8737 
   8738 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8739 }
   8740 
   8741 /*
   8742  * wm_gmii_hv_readreg:	[mii interface function]
   8743  *
   8744  *	Read a PHY register on the kumeran
   8745  * This could be handled by the PHY layer if we didn't have to lock the
   8746  * ressource ...
   8747  */
   8748 static int
   8749 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8750 {
   8751 	struct wm_softc *sc = device_private(self);
   8752 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8753 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8754 	uint16_t val;
   8755 	int rv;
   8756 
   8757 	if (wm_get_swfwhw_semaphore(sc)) {
   8758 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8759 		    __func__);
   8760 		return 0;
   8761 	}
   8762 
   8763 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8764 	if (sc->sc_phytype == WMPHY_82577) {
   8765 		/* XXX must write */
   8766 	}
   8767 
   8768 	/* Page 800 works differently than the rest so it has its own func */
   8769 	if (page == BM_WUC_PAGE) {
   8770 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8771 		return val;
   8772 	}
   8773 
   8774 	/*
   8775 	 * Lower than page 768 works differently than the rest so it has its
   8776 	 * own func
   8777 	 */
   8778 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8779 		printf("gmii_hv_readreg!!!\n");
   8780 		return 0;
   8781 	}
   8782 
   8783 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8784 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8785 		    page << BME1000_PAGE_SHIFT);
   8786 	}
   8787 
   8788 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8789 	wm_put_swfwhw_semaphore(sc);
   8790 	return rv;
   8791 }
   8792 
   8793 /*
   8794  * wm_gmii_hv_writereg:	[mii interface function]
   8795  *
   8796  *	Write a PHY register on the kumeran.
   8797  * This could be handled by the PHY layer if we didn't have to lock the
   8798  * ressource ...
   8799  */
   8800 static void
   8801 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8802 {
   8803 	struct wm_softc *sc = device_private(self);
   8804 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8805 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8806 
   8807 	if (wm_get_swfwhw_semaphore(sc)) {
   8808 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8809 		    __func__);
   8810 		return;
   8811 	}
   8812 
   8813 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8814 
   8815 	/* Page 800 works differently than the rest so it has its own func */
   8816 	if (page == BM_WUC_PAGE) {
   8817 		uint16_t tmp;
   8818 
   8819 		tmp = val;
   8820 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8821 		return;
   8822 	}
   8823 
   8824 	/*
   8825 	 * Lower than page 768 works differently than the rest so it has its
   8826 	 * own func
   8827 	 */
   8828 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8829 		printf("gmii_hv_writereg!!!\n");
   8830 		return;
   8831 	}
   8832 
   8833 	/*
   8834 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8835 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8836 	 */
   8837 
   8838 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8839 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8840 		    page << BME1000_PAGE_SHIFT);
   8841 	}
   8842 
   8843 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8844 	wm_put_swfwhw_semaphore(sc);
   8845 }
   8846 
   8847 /*
   8848  * wm_gmii_82580_readreg:	[mii interface function]
   8849  *
   8850  *	Read a PHY register on the 82580 and I350.
   8851  * This could be handled by the PHY layer if we didn't have to lock the
   8852  * ressource ...
   8853  */
   8854 static int
   8855 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8856 {
   8857 	struct wm_softc *sc = device_private(self);
   8858 	int sem;
   8859 	int rv;
   8860 
   8861 	sem = swfwphysem[sc->sc_funcid];
   8862 	if (wm_get_swfw_semaphore(sc, sem)) {
   8863 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8864 		    __func__);
   8865 		return 0;
   8866 	}
   8867 
   8868 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8869 
   8870 	wm_put_swfw_semaphore(sc, sem);
   8871 	return rv;
   8872 }
   8873 
   8874 /*
   8875  * wm_gmii_82580_writereg:	[mii interface function]
   8876  *
   8877  *	Write a PHY register on the 82580 and I350.
   8878  * This could be handled by the PHY layer if we didn't have to lock the
   8879  * ressource ...
   8880  */
   8881 static void
   8882 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8883 {
   8884 	struct wm_softc *sc = device_private(self);
   8885 	int sem;
   8886 
   8887 	sem = swfwphysem[sc->sc_funcid];
   8888 	if (wm_get_swfw_semaphore(sc, sem)) {
   8889 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8890 		    __func__);
   8891 		return;
   8892 	}
   8893 
   8894 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8895 
   8896 	wm_put_swfw_semaphore(sc, sem);
   8897 }
   8898 
   8899 /*
   8900  * wm_gmii_gs40g_readreg:	[mii interface function]
   8901  *
   8902  *	Read a PHY register on the I2100 and I211.
   8903  * This could be handled by the PHY layer if we didn't have to lock the
   8904  * ressource ...
   8905  */
   8906 static int
   8907 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8908 {
   8909 	struct wm_softc *sc = device_private(self);
   8910 	int sem;
   8911 	int page, offset;
   8912 	int rv;
   8913 
   8914 	/* Acquire semaphore */
   8915 	sem = swfwphysem[sc->sc_funcid];
   8916 	if (wm_get_swfw_semaphore(sc, sem)) {
   8917 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8918 		    __func__);
   8919 		return 0;
   8920 	}
   8921 
   8922 	/* Page select */
   8923 	page = reg >> GS40G_PAGE_SHIFT;
   8924 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8925 
   8926 	/* Read reg */
   8927 	offset = reg & GS40G_OFFSET_MASK;
   8928 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8929 
   8930 	wm_put_swfw_semaphore(sc, sem);
   8931 	return rv;
   8932 }
   8933 
   8934 /*
   8935  * wm_gmii_gs40g_writereg:	[mii interface function]
   8936  *
   8937  *	Write a PHY register on the I210 and I211.
   8938  * This could be handled by the PHY layer if we didn't have to lock the
   8939  * ressource ...
   8940  */
   8941 static void
   8942 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8943 {
   8944 	struct wm_softc *sc = device_private(self);
   8945 	int sem;
   8946 	int page, offset;
   8947 
   8948 	/* Acquire semaphore */
   8949 	sem = swfwphysem[sc->sc_funcid];
   8950 	if (wm_get_swfw_semaphore(sc, sem)) {
   8951 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8952 		    __func__);
   8953 		return;
   8954 	}
   8955 
   8956 	/* Page select */
   8957 	page = reg >> GS40G_PAGE_SHIFT;
   8958 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8959 
   8960 	/* Write reg */
   8961 	offset = reg & GS40G_OFFSET_MASK;
   8962 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8963 
   8964 	/* Release semaphore */
   8965 	wm_put_swfw_semaphore(sc, sem);
   8966 }
   8967 
   8968 /*
   8969  * wm_gmii_statchg:	[mii interface function]
   8970  *
   8971  *	Callback from MII layer when media changes.
   8972  */
   8973 static void
   8974 wm_gmii_statchg(struct ifnet *ifp)
   8975 {
   8976 	struct wm_softc *sc = ifp->if_softc;
   8977 	struct mii_data *mii = &sc->sc_mii;
   8978 
   8979 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8980 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8981 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8982 
   8983 	/*
   8984 	 * Get flow control negotiation result.
   8985 	 */
   8986 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8987 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8988 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8989 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8990 	}
   8991 
   8992 	if (sc->sc_flowflags & IFM_FLOW) {
   8993 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8994 			sc->sc_ctrl |= CTRL_TFCE;
   8995 			sc->sc_fcrtl |= FCRTL_XONE;
   8996 		}
   8997 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8998 			sc->sc_ctrl |= CTRL_RFCE;
   8999 	}
   9000 
   9001 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9002 		DPRINTF(WM_DEBUG_LINK,
   9003 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9004 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9005 	} else {
   9006 		DPRINTF(WM_DEBUG_LINK,
   9007 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9008 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9009 	}
   9010 
   9011 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9012 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9013 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9014 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9015 	if (sc->sc_type == WM_T_80003) {
   9016 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9017 		case IFM_1000_T:
   9018 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9019 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9020 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9021 			break;
   9022 		default:
   9023 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9024 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9025 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9026 			break;
   9027 		}
   9028 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9029 	}
   9030 }
   9031 
   9032 /*
   9033  * wm_kmrn_readreg:
   9034  *
   9035  *	Read a kumeran register
   9036  */
   9037 static int
   9038 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9039 {
   9040 	int rv;
   9041 
   9042 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9043 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   9044 			aprint_error_dev(sc->sc_dev,
   9045 			    "%s: failed to get semaphore\n", __func__);
   9046 			return 0;
   9047 		}
   9048 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9049 		if (wm_get_swfwhw_semaphore(sc)) {
   9050 			aprint_error_dev(sc->sc_dev,
   9051 			    "%s: failed to get semaphore\n", __func__);
   9052 			return 0;
   9053 		}
   9054 	}
   9055 
   9056 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9057 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9058 	    KUMCTRLSTA_REN);
   9059 	CSR_WRITE_FLUSH(sc);
   9060 	delay(2);
   9061 
   9062 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9063 
   9064 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9065 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9066 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9067 		wm_put_swfwhw_semaphore(sc);
   9068 
   9069 	return rv;
   9070 }
   9071 
   9072 /*
   9073  * wm_kmrn_writereg:
   9074  *
   9075  *	Write a kumeran register
   9076  */
   9077 static void
   9078 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9079 {
   9080 
   9081 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9082 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   9083 			aprint_error_dev(sc->sc_dev,
   9084 			    "%s: failed to get semaphore\n", __func__);
   9085 			return;
   9086 		}
   9087 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9088 		if (wm_get_swfwhw_semaphore(sc)) {
   9089 			aprint_error_dev(sc->sc_dev,
   9090 			    "%s: failed to get semaphore\n", __func__);
   9091 			return;
   9092 		}
   9093 	}
   9094 
   9095 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9096 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9097 	    (val & KUMCTRLSTA_MASK));
   9098 
   9099 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9100 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9101 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9102 		wm_put_swfwhw_semaphore(sc);
   9103 }
   9104 
   9105 /* SGMII related */
   9106 
   9107 /*
   9108  * wm_sgmii_uses_mdio
   9109  *
   9110  * Check whether the transaction is to the internal PHY or the external
   9111  * MDIO interface. Return true if it's MDIO.
   9112  */
   9113 static bool
   9114 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9115 {
   9116 	uint32_t reg;
   9117 	bool ismdio = false;
   9118 
   9119 	switch (sc->sc_type) {
   9120 	case WM_T_82575:
   9121 	case WM_T_82576:
   9122 		reg = CSR_READ(sc, WMREG_MDIC);
   9123 		ismdio = ((reg & MDIC_DEST) != 0);
   9124 		break;
   9125 	case WM_T_82580:
   9126 	case WM_T_I350:
   9127 	case WM_T_I354:
   9128 	case WM_T_I210:
   9129 	case WM_T_I211:
   9130 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9131 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9132 		break;
   9133 	default:
   9134 		break;
   9135 	}
   9136 
   9137 	return ismdio;
   9138 }
   9139 
   9140 /*
   9141  * wm_sgmii_readreg:	[mii interface function]
   9142  *
   9143  *	Read a PHY register on the SGMII
   9144  * This could be handled by the PHY layer if we didn't have to lock the
   9145  * ressource ...
   9146  */
   9147 static int
   9148 wm_sgmii_readreg(device_t self, int phy, int reg)
   9149 {
   9150 	struct wm_softc *sc = device_private(self);
   9151 	uint32_t i2ccmd;
   9152 	int i, rv;
   9153 
   9154 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9155 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9156 		    __func__);
   9157 		return 0;
   9158 	}
   9159 
   9160 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9161 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9162 	    | I2CCMD_OPCODE_READ;
   9163 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9164 
   9165 	/* Poll the ready bit */
   9166 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9167 		delay(50);
   9168 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9169 		if (i2ccmd & I2CCMD_READY)
   9170 			break;
   9171 	}
   9172 	if ((i2ccmd & I2CCMD_READY) == 0)
   9173 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9174 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9175 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9176 
   9177 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9178 
   9179 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   9180 	return rv;
   9181 }
   9182 
   9183 /*
   9184  * wm_sgmii_writereg:	[mii interface function]
   9185  *
   9186  *	Write a PHY register on the SGMII.
   9187  * This could be handled by the PHY layer if we didn't have to lock the
   9188  * ressource ...
   9189  */
   9190 static void
   9191 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9192 {
   9193 	struct wm_softc *sc = device_private(self);
   9194 	uint32_t i2ccmd;
   9195 	int i;
   9196 	int val_swapped;
   9197 
   9198 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9199 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9200 		    __func__);
   9201 		return;
   9202 	}
   9203 	/* Swap the data bytes for the I2C interface */
   9204 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9205 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9206 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9207 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9208 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9209 
   9210 	/* Poll the ready bit */
   9211 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9212 		delay(50);
   9213 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9214 		if (i2ccmd & I2CCMD_READY)
   9215 			break;
   9216 	}
   9217 	if ((i2ccmd & I2CCMD_READY) == 0)
   9218 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9219 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9220 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9221 
   9222 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9223 }
   9224 
   9225 /* TBI related */
   9226 
   9227 /*
   9228  * wm_tbi_mediainit:
   9229  *
   9230  *	Initialize media for use on 1000BASE-X devices.
   9231  */
   9232 static void
   9233 wm_tbi_mediainit(struct wm_softc *sc)
   9234 {
   9235 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9236 	const char *sep = "";
   9237 
   9238 	if (sc->sc_type < WM_T_82543)
   9239 		sc->sc_tipg = TIPG_WM_DFLT;
   9240 	else
   9241 		sc->sc_tipg = TIPG_LG_DFLT;
   9242 
   9243 	sc->sc_tbi_serdes_anegticks = 5;
   9244 
   9245 	/* Initialize our media structures */
   9246 	sc->sc_mii.mii_ifp = ifp;
   9247 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9248 
   9249 	if ((sc->sc_type >= WM_T_82575)
   9250 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9251 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9252 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9253 	else
   9254 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9255 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9256 
   9257 	/*
   9258 	 * SWD Pins:
   9259 	 *
   9260 	 *	0 = Link LED (output)
   9261 	 *	1 = Loss Of Signal (input)
   9262 	 */
   9263 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9264 
   9265 	/* XXX Perhaps this is only for TBI */
   9266 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9267 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9268 
   9269 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9270 		sc->sc_ctrl &= ~CTRL_LRST;
   9271 
   9272 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9273 
   9274 #define	ADD(ss, mm, dd)							\
   9275 do {									\
   9276 	aprint_normal("%s%s", sep, ss);					\
   9277 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9278 	sep = ", ";							\
   9279 } while (/*CONSTCOND*/0)
   9280 
   9281 	aprint_normal_dev(sc->sc_dev, "");
   9282 
   9283 	/* Only 82545 is LX */
   9284 	if (sc->sc_type == WM_T_82545) {
   9285 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9286 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9287 	} else {
   9288 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9289 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9290 	}
   9291 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9292 	aprint_normal("\n");
   9293 
   9294 #undef ADD
   9295 
   9296 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9297 }
   9298 
   9299 /*
   9300  * wm_tbi_mediachange:	[ifmedia interface function]
   9301  *
   9302  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9303  */
   9304 static int
   9305 wm_tbi_mediachange(struct ifnet *ifp)
   9306 {
   9307 	struct wm_softc *sc = ifp->if_softc;
   9308 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9309 	uint32_t status;
   9310 	int i;
   9311 
   9312 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9313 		/* XXX need some work for >= 82571 and < 82575 */
   9314 		if (sc->sc_type < WM_T_82575)
   9315 			return 0;
   9316 	}
   9317 
   9318 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9319 	    || (sc->sc_type >= WM_T_82575))
   9320 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9321 
   9322 	sc->sc_ctrl &= ~CTRL_LRST;
   9323 	sc->sc_txcw = TXCW_ANE;
   9324 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9325 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9326 	else if (ife->ifm_media & IFM_FDX)
   9327 		sc->sc_txcw |= TXCW_FD;
   9328 	else
   9329 		sc->sc_txcw |= TXCW_HD;
   9330 
   9331 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9332 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9333 
   9334 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9335 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9336 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9337 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9338 	CSR_WRITE_FLUSH(sc);
   9339 	delay(1000);
   9340 
   9341 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9342 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9343 
   9344 	/*
   9345 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9346 	 * optics detect a signal, 0 if they don't.
   9347 	 */
   9348 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9349 		/* Have signal; wait for the link to come up. */
   9350 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9351 			delay(10000);
   9352 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9353 				break;
   9354 		}
   9355 
   9356 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9357 			    device_xname(sc->sc_dev),i));
   9358 
   9359 		status = CSR_READ(sc, WMREG_STATUS);
   9360 		DPRINTF(WM_DEBUG_LINK,
   9361 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9362 			device_xname(sc->sc_dev),status, STATUS_LU));
   9363 		if (status & STATUS_LU) {
   9364 			/* Link is up. */
   9365 			DPRINTF(WM_DEBUG_LINK,
   9366 			    ("%s: LINK: set media -> link up %s\n",
   9367 			    device_xname(sc->sc_dev),
   9368 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9369 
   9370 			/*
   9371 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9372 			 * so we should update sc->sc_ctrl
   9373 			 */
   9374 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9375 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9376 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9377 			if (status & STATUS_FD)
   9378 				sc->sc_tctl |=
   9379 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9380 			else
   9381 				sc->sc_tctl |=
   9382 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9383 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9384 				sc->sc_fcrtl |= FCRTL_XONE;
   9385 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9386 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9387 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9388 				      sc->sc_fcrtl);
   9389 			sc->sc_tbi_linkup = 1;
   9390 		} else {
   9391 			if (i == WM_LINKUP_TIMEOUT)
   9392 				wm_check_for_link(sc);
   9393 			/* Link is down. */
   9394 			DPRINTF(WM_DEBUG_LINK,
   9395 			    ("%s: LINK: set media -> link down\n",
   9396 			    device_xname(sc->sc_dev)));
   9397 			sc->sc_tbi_linkup = 0;
   9398 		}
   9399 	} else {
   9400 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9401 		    device_xname(sc->sc_dev)));
   9402 		sc->sc_tbi_linkup = 0;
   9403 	}
   9404 
   9405 	wm_tbi_serdes_set_linkled(sc);
   9406 
   9407 	return 0;
   9408 }
   9409 
   9410 /*
   9411  * wm_tbi_mediastatus:	[ifmedia interface function]
   9412  *
   9413  *	Get the current interface media status on a 1000BASE-X device.
   9414  */
   9415 static void
   9416 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9417 {
   9418 	struct wm_softc *sc = ifp->if_softc;
   9419 	uint32_t ctrl, status;
   9420 
   9421 	ifmr->ifm_status = IFM_AVALID;
   9422 	ifmr->ifm_active = IFM_ETHER;
   9423 
   9424 	status = CSR_READ(sc, WMREG_STATUS);
   9425 	if ((status & STATUS_LU) == 0) {
   9426 		ifmr->ifm_active |= IFM_NONE;
   9427 		return;
   9428 	}
   9429 
   9430 	ifmr->ifm_status |= IFM_ACTIVE;
   9431 	/* Only 82545 is LX */
   9432 	if (sc->sc_type == WM_T_82545)
   9433 		ifmr->ifm_active |= IFM_1000_LX;
   9434 	else
   9435 		ifmr->ifm_active |= IFM_1000_SX;
   9436 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9437 		ifmr->ifm_active |= IFM_FDX;
   9438 	else
   9439 		ifmr->ifm_active |= IFM_HDX;
   9440 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9441 	if (ctrl & CTRL_RFCE)
   9442 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9443 	if (ctrl & CTRL_TFCE)
   9444 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9445 }
   9446 
   9447 /* XXX TBI only */
   9448 static int
   9449 wm_check_for_link(struct wm_softc *sc)
   9450 {
   9451 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9452 	uint32_t rxcw;
   9453 	uint32_t ctrl;
   9454 	uint32_t status;
   9455 	uint32_t sig;
   9456 
   9457 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9458 		/* XXX need some work for >= 82571 */
   9459 		if (sc->sc_type >= WM_T_82571) {
   9460 			sc->sc_tbi_linkup = 1;
   9461 			return 0;
   9462 		}
   9463 	}
   9464 
   9465 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9466 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9467 	status = CSR_READ(sc, WMREG_STATUS);
   9468 
   9469 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9470 
   9471 	DPRINTF(WM_DEBUG_LINK,
   9472 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9473 		device_xname(sc->sc_dev), __func__,
   9474 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9475 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9476 
   9477 	/*
   9478 	 * SWDPIN   LU RXCW
   9479 	 *      0    0    0
   9480 	 *      0    0    1	(should not happen)
   9481 	 *      0    1    0	(should not happen)
   9482 	 *      0    1    1	(should not happen)
   9483 	 *      1    0    0	Disable autonego and force linkup
   9484 	 *      1    0    1	got /C/ but not linkup yet
   9485 	 *      1    1    0	(linkup)
   9486 	 *      1    1    1	If IFM_AUTO, back to autonego
   9487 	 *
   9488 	 */
   9489 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9490 	    && ((status & STATUS_LU) == 0)
   9491 	    && ((rxcw & RXCW_C) == 0)) {
   9492 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9493 			__func__));
   9494 		sc->sc_tbi_linkup = 0;
   9495 		/* Disable auto-negotiation in the TXCW register */
   9496 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9497 
   9498 		/*
   9499 		 * Force link-up and also force full-duplex.
   9500 		 *
   9501 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9502 		 * so we should update sc->sc_ctrl
   9503 		 */
   9504 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9505 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9506 	} else if (((status & STATUS_LU) != 0)
   9507 	    && ((rxcw & RXCW_C) != 0)
   9508 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9509 		sc->sc_tbi_linkup = 1;
   9510 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9511 			__func__));
   9512 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9513 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9514 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9515 	    && ((rxcw & RXCW_C) != 0)) {
   9516 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9517 	} else {
   9518 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9519 			status));
   9520 	}
   9521 
   9522 	return 0;
   9523 }
   9524 
   9525 /*
   9526  * wm_tbi_tick:
   9527  *
   9528  *	Check the link on TBI devices.
   9529  *	This function acts as mii_tick().
   9530  */
   9531 static void
   9532 wm_tbi_tick(struct wm_softc *sc)
   9533 {
   9534 	struct mii_data *mii = &sc->sc_mii;
   9535 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9536 	uint32_t status;
   9537 
   9538 	KASSERT(WM_CORE_LOCKED(sc));
   9539 
   9540 	status = CSR_READ(sc, WMREG_STATUS);
   9541 
   9542 	/* XXX is this needed? */
   9543 	(void)CSR_READ(sc, WMREG_RXCW);
   9544 	(void)CSR_READ(sc, WMREG_CTRL);
   9545 
   9546 	/* set link status */
   9547 	if ((status & STATUS_LU) == 0) {
   9548 		DPRINTF(WM_DEBUG_LINK,
   9549 		    ("%s: LINK: checklink -> down\n",
   9550 			device_xname(sc->sc_dev)));
   9551 		sc->sc_tbi_linkup = 0;
   9552 	} else if (sc->sc_tbi_linkup == 0) {
   9553 		DPRINTF(WM_DEBUG_LINK,
   9554 		    ("%s: LINK: checklink -> up %s\n",
   9555 			device_xname(sc->sc_dev),
   9556 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9557 		sc->sc_tbi_linkup = 1;
   9558 		sc->sc_tbi_serdes_ticks = 0;
   9559 	}
   9560 
   9561 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9562 		goto setled;
   9563 
   9564 	if ((status & STATUS_LU) == 0) {
   9565 		sc->sc_tbi_linkup = 0;
   9566 		/* If the timer expired, retry autonegotiation */
   9567 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9568 		    && (++sc->sc_tbi_serdes_ticks
   9569 			>= sc->sc_tbi_serdes_anegticks)) {
   9570 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9571 			sc->sc_tbi_serdes_ticks = 0;
   9572 			/*
   9573 			 * Reset the link, and let autonegotiation do
   9574 			 * its thing
   9575 			 */
   9576 			sc->sc_ctrl |= CTRL_LRST;
   9577 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9578 			CSR_WRITE_FLUSH(sc);
   9579 			delay(1000);
   9580 			sc->sc_ctrl &= ~CTRL_LRST;
   9581 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9582 			CSR_WRITE_FLUSH(sc);
   9583 			delay(1000);
   9584 			CSR_WRITE(sc, WMREG_TXCW,
   9585 			    sc->sc_txcw & ~TXCW_ANE);
   9586 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9587 		}
   9588 	}
   9589 
   9590 setled:
   9591 	wm_tbi_serdes_set_linkled(sc);
   9592 }
   9593 
   9594 /* SERDES related */
   9595 static void
   9596 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9597 {
   9598 	uint32_t reg;
   9599 
   9600 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9601 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9602 		return;
   9603 
   9604 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9605 	reg |= PCS_CFG_PCS_EN;
   9606 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9607 
   9608 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9609 	reg &= ~CTRL_EXT_SWDPIN(3);
   9610 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9611 	CSR_WRITE_FLUSH(sc);
   9612 }
   9613 
   9614 static int
   9615 wm_serdes_mediachange(struct ifnet *ifp)
   9616 {
   9617 	struct wm_softc *sc = ifp->if_softc;
   9618 	bool pcs_autoneg = true; /* XXX */
   9619 	uint32_t ctrl_ext, pcs_lctl, reg;
   9620 
   9621 	/* XXX Currently, this function is not called on 8257[12] */
   9622 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9623 	    || (sc->sc_type >= WM_T_82575))
   9624 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9625 
   9626 	wm_serdes_power_up_link_82575(sc);
   9627 
   9628 	sc->sc_ctrl |= CTRL_SLU;
   9629 
   9630 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9631 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9632 
   9633 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9634 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9635 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9636 	case CTRL_EXT_LINK_MODE_SGMII:
   9637 		pcs_autoneg = true;
   9638 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9639 		break;
   9640 	case CTRL_EXT_LINK_MODE_1000KX:
   9641 		pcs_autoneg = false;
   9642 		/* FALLTHROUGH */
   9643 	default:
   9644 		if ((sc->sc_type == WM_T_82575)
   9645 		    || (sc->sc_type == WM_T_82576)) {
   9646 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9647 				pcs_autoneg = false;
   9648 		}
   9649 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9650 		    | CTRL_FRCFDX;
   9651 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9652 	}
   9653 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9654 
   9655 	if (pcs_autoneg) {
   9656 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9657 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9658 
   9659 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9660 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9661 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9662 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9663 	} else
   9664 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9665 
   9666 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9667 
   9668 
   9669 	return 0;
   9670 }
   9671 
   9672 static void
   9673 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9674 {
   9675 	struct wm_softc *sc = ifp->if_softc;
   9676 	struct mii_data *mii = &sc->sc_mii;
   9677 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9678 	uint32_t pcs_adv, pcs_lpab, reg;
   9679 
   9680 	ifmr->ifm_status = IFM_AVALID;
   9681 	ifmr->ifm_active = IFM_ETHER;
   9682 
   9683 	/* Check PCS */
   9684 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9685 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9686 		ifmr->ifm_active |= IFM_NONE;
   9687 		sc->sc_tbi_linkup = 0;
   9688 		goto setled;
   9689 	}
   9690 
   9691 	sc->sc_tbi_linkup = 1;
   9692 	ifmr->ifm_status |= IFM_ACTIVE;
   9693 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9694 	if ((reg & PCS_LSTS_FDX) != 0)
   9695 		ifmr->ifm_active |= IFM_FDX;
   9696 	else
   9697 		ifmr->ifm_active |= IFM_HDX;
   9698 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9699 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9700 		/* Check flow */
   9701 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9702 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9703 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9704 			goto setled;
   9705 		}
   9706 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9707 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9708 		DPRINTF(WM_DEBUG_LINK,
   9709 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9710 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9711 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9712 			mii->mii_media_active |= IFM_FLOW
   9713 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9714 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9715 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9716 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9717 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9718 			mii->mii_media_active |= IFM_FLOW
   9719 			    | IFM_ETH_TXPAUSE;
   9720 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9721 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9722 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9723 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9724 			mii->mii_media_active |= IFM_FLOW
   9725 			    | IFM_ETH_RXPAUSE;
   9726 		} else {
   9727 		}
   9728 	}
   9729 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9730 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9731 setled:
   9732 	wm_tbi_serdes_set_linkled(sc);
   9733 }
   9734 
   9735 /*
   9736  * wm_serdes_tick:
   9737  *
   9738  *	Check the link on serdes devices.
   9739  */
   9740 static void
   9741 wm_serdes_tick(struct wm_softc *sc)
   9742 {
   9743 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9744 	struct mii_data *mii = &sc->sc_mii;
   9745 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9746 	uint32_t reg;
   9747 
   9748 	KASSERT(WM_CORE_LOCKED(sc));
   9749 
   9750 	mii->mii_media_status = IFM_AVALID;
   9751 	mii->mii_media_active = IFM_ETHER;
   9752 
   9753 	/* Check PCS */
   9754 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9755 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9756 		mii->mii_media_status |= IFM_ACTIVE;
   9757 		sc->sc_tbi_linkup = 1;
   9758 		sc->sc_tbi_serdes_ticks = 0;
   9759 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9760 		if ((reg & PCS_LSTS_FDX) != 0)
   9761 			mii->mii_media_active |= IFM_FDX;
   9762 		else
   9763 			mii->mii_media_active |= IFM_HDX;
   9764 	} else {
   9765 		mii->mii_media_status |= IFM_NONE;
   9766 		sc->sc_tbi_linkup = 0;
   9767 		    /* If the timer expired, retry autonegotiation */
   9768 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9769 		    && (++sc->sc_tbi_serdes_ticks
   9770 			>= sc->sc_tbi_serdes_anegticks)) {
   9771 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9772 			sc->sc_tbi_serdes_ticks = 0;
   9773 			/* XXX */
   9774 			wm_serdes_mediachange(ifp);
   9775 		}
   9776 	}
   9777 
   9778 	wm_tbi_serdes_set_linkled(sc);
   9779 }
   9780 
   9781 /* SFP related */
   9782 
   9783 static int
   9784 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9785 {
   9786 	uint32_t i2ccmd;
   9787 	int i;
   9788 
   9789 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9790 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9791 
   9792 	/* Poll the ready bit */
   9793 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9794 		delay(50);
   9795 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9796 		if (i2ccmd & I2CCMD_READY)
   9797 			break;
   9798 	}
   9799 	if ((i2ccmd & I2CCMD_READY) == 0)
   9800 		return -1;
   9801 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9802 		return -1;
   9803 
   9804 	*data = i2ccmd & 0x00ff;
   9805 
   9806 	return 0;
   9807 }
   9808 
   9809 static uint32_t
   9810 wm_sfp_get_media_type(struct wm_softc *sc)
   9811 {
   9812 	uint32_t ctrl_ext;
   9813 	uint8_t val = 0;
   9814 	int timeout = 3;
   9815 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9816 	int rv = -1;
   9817 
   9818 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9819 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9820 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9821 	CSR_WRITE_FLUSH(sc);
   9822 
   9823 	/* Read SFP module data */
   9824 	while (timeout) {
   9825 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9826 		if (rv == 0)
   9827 			break;
   9828 		delay(100*1000); /* XXX too big */
   9829 		timeout--;
   9830 	}
   9831 	if (rv != 0)
   9832 		goto out;
   9833 	switch (val) {
   9834 	case SFF_SFP_ID_SFF:
   9835 		aprint_normal_dev(sc->sc_dev,
   9836 		    "Module/Connector soldered to board\n");
   9837 		break;
   9838 	case SFF_SFP_ID_SFP:
   9839 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9840 		break;
   9841 	case SFF_SFP_ID_UNKNOWN:
   9842 		goto out;
   9843 	default:
   9844 		break;
   9845 	}
   9846 
   9847 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9848 	if (rv != 0) {
   9849 		goto out;
   9850 	}
   9851 
   9852 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9853 		mediatype = WM_MEDIATYPE_SERDES;
   9854 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9855 		sc->sc_flags |= WM_F_SGMII;
   9856 		mediatype = WM_MEDIATYPE_COPPER;
   9857 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9858 		sc->sc_flags |= WM_F_SGMII;
   9859 		mediatype = WM_MEDIATYPE_SERDES;
   9860 	}
   9861 
   9862 out:
   9863 	/* Restore I2C interface setting */
   9864 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9865 
   9866 	return mediatype;
   9867 }
   9868 /*
   9869  * NVM related.
   9870  * Microwire, SPI (w/wo EERD) and Flash.
   9871  */
   9872 
   9873 /* Both spi and uwire */
   9874 
   9875 /*
   9876  * wm_eeprom_sendbits:
   9877  *
   9878  *	Send a series of bits to the EEPROM.
   9879  */
   9880 static void
   9881 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9882 {
   9883 	uint32_t reg;
   9884 	int x;
   9885 
   9886 	reg = CSR_READ(sc, WMREG_EECD);
   9887 
   9888 	for (x = nbits; x > 0; x--) {
   9889 		if (bits & (1U << (x - 1)))
   9890 			reg |= EECD_DI;
   9891 		else
   9892 			reg &= ~EECD_DI;
   9893 		CSR_WRITE(sc, WMREG_EECD, reg);
   9894 		CSR_WRITE_FLUSH(sc);
   9895 		delay(2);
   9896 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9897 		CSR_WRITE_FLUSH(sc);
   9898 		delay(2);
   9899 		CSR_WRITE(sc, WMREG_EECD, reg);
   9900 		CSR_WRITE_FLUSH(sc);
   9901 		delay(2);
   9902 	}
   9903 }
   9904 
   9905 /*
   9906  * wm_eeprom_recvbits:
   9907  *
   9908  *	Receive a series of bits from the EEPROM.
   9909  */
   9910 static void
   9911 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9912 {
   9913 	uint32_t reg, val;
   9914 	int x;
   9915 
   9916 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9917 
   9918 	val = 0;
   9919 	for (x = nbits; x > 0; x--) {
   9920 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9921 		CSR_WRITE_FLUSH(sc);
   9922 		delay(2);
   9923 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9924 			val |= (1U << (x - 1));
   9925 		CSR_WRITE(sc, WMREG_EECD, reg);
   9926 		CSR_WRITE_FLUSH(sc);
   9927 		delay(2);
   9928 	}
   9929 	*valp = val;
   9930 }
   9931 
   9932 /* Microwire */
   9933 
   9934 /*
   9935  * wm_nvm_read_uwire:
   9936  *
   9937  *	Read a word from the EEPROM using the MicroWire protocol.
   9938  */
   9939 static int
   9940 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9941 {
   9942 	uint32_t reg, val;
   9943 	int i;
   9944 
   9945 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   9946 		device_xname(sc->sc_dev), __func__));
   9947 
   9948 	for (i = 0; i < wordcnt; i++) {
   9949 		/* Clear SK and DI. */
   9950 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9951 		CSR_WRITE(sc, WMREG_EECD, reg);
   9952 
   9953 		/*
   9954 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9955 		 * and Xen.
   9956 		 *
   9957 		 * We use this workaround only for 82540 because qemu's
   9958 		 * e1000 act as 82540.
   9959 		 */
   9960 		if (sc->sc_type == WM_T_82540) {
   9961 			reg |= EECD_SK;
   9962 			CSR_WRITE(sc, WMREG_EECD, reg);
   9963 			reg &= ~EECD_SK;
   9964 			CSR_WRITE(sc, WMREG_EECD, reg);
   9965 			CSR_WRITE_FLUSH(sc);
   9966 			delay(2);
   9967 		}
   9968 		/* XXX: end of workaround */
   9969 
   9970 		/* Set CHIP SELECT. */
   9971 		reg |= EECD_CS;
   9972 		CSR_WRITE(sc, WMREG_EECD, reg);
   9973 		CSR_WRITE_FLUSH(sc);
   9974 		delay(2);
   9975 
   9976 		/* Shift in the READ command. */
   9977 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9978 
   9979 		/* Shift in address. */
   9980 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9981 
   9982 		/* Shift out the data. */
   9983 		wm_eeprom_recvbits(sc, &val, 16);
   9984 		data[i] = val & 0xffff;
   9985 
   9986 		/* Clear CHIP SELECT. */
   9987 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9988 		CSR_WRITE(sc, WMREG_EECD, reg);
   9989 		CSR_WRITE_FLUSH(sc);
   9990 		delay(2);
   9991 	}
   9992 
   9993 	return 0;
   9994 }
   9995 
   9996 /* SPI */
   9997 
   9998 /*
   9999  * Set SPI and FLASH related information from the EECD register.
   10000  * For 82541 and 82547, the word size is taken from EEPROM.
   10001  */
   10002 static int
   10003 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10004 {
   10005 	int size;
   10006 	uint32_t reg;
   10007 	uint16_t data;
   10008 
   10009 	reg = CSR_READ(sc, WMREG_EECD);
   10010 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10011 
   10012 	/* Read the size of NVM from EECD by default */
   10013 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10014 	switch (sc->sc_type) {
   10015 	case WM_T_82541:
   10016 	case WM_T_82541_2:
   10017 	case WM_T_82547:
   10018 	case WM_T_82547_2:
   10019 		/* Set dummy value to access EEPROM */
   10020 		sc->sc_nvm_wordsize = 64;
   10021 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10022 		reg = data;
   10023 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10024 		if (size == 0)
   10025 			size = 6; /* 64 word size */
   10026 		else
   10027 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10028 		break;
   10029 	case WM_T_80003:
   10030 	case WM_T_82571:
   10031 	case WM_T_82572:
   10032 	case WM_T_82573: /* SPI case */
   10033 	case WM_T_82574: /* SPI case */
   10034 	case WM_T_82583: /* SPI case */
   10035 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10036 		if (size > 14)
   10037 			size = 14;
   10038 		break;
   10039 	case WM_T_82575:
   10040 	case WM_T_82576:
   10041 	case WM_T_82580:
   10042 	case WM_T_I350:
   10043 	case WM_T_I354:
   10044 	case WM_T_I210:
   10045 	case WM_T_I211:
   10046 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10047 		if (size > 15)
   10048 			size = 15;
   10049 		break;
   10050 	default:
   10051 		aprint_error_dev(sc->sc_dev,
   10052 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10053 		return -1;
   10054 		break;
   10055 	}
   10056 
   10057 	sc->sc_nvm_wordsize = 1 << size;
   10058 
   10059 	return 0;
   10060 }
   10061 
   10062 /*
   10063  * wm_nvm_ready_spi:
   10064  *
   10065  *	Wait for a SPI EEPROM to be ready for commands.
   10066  */
   10067 static int
   10068 wm_nvm_ready_spi(struct wm_softc *sc)
   10069 {
   10070 	uint32_t val;
   10071 	int usec;
   10072 
   10073 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10074 		device_xname(sc->sc_dev), __func__));
   10075 
   10076 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10077 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10078 		wm_eeprom_recvbits(sc, &val, 8);
   10079 		if ((val & SPI_SR_RDY) == 0)
   10080 			break;
   10081 	}
   10082 	if (usec >= SPI_MAX_RETRIES) {
   10083 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10084 		return 1;
   10085 	}
   10086 	return 0;
   10087 }
   10088 
   10089 /*
   10090  * wm_nvm_read_spi:
   10091  *
   10092  *	Read a work from the EEPROM using the SPI protocol.
   10093  */
   10094 static int
   10095 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10096 {
   10097 	uint32_t reg, val;
   10098 	int i;
   10099 	uint8_t opc;
   10100 
   10101 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10102 		device_xname(sc->sc_dev), __func__));
   10103 
   10104 	/* Clear SK and CS. */
   10105 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10106 	CSR_WRITE(sc, WMREG_EECD, reg);
   10107 	CSR_WRITE_FLUSH(sc);
   10108 	delay(2);
   10109 
   10110 	if (wm_nvm_ready_spi(sc))
   10111 		return 1;
   10112 
   10113 	/* Toggle CS to flush commands. */
   10114 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10115 	CSR_WRITE_FLUSH(sc);
   10116 	delay(2);
   10117 	CSR_WRITE(sc, WMREG_EECD, reg);
   10118 	CSR_WRITE_FLUSH(sc);
   10119 	delay(2);
   10120 
   10121 	opc = SPI_OPC_READ;
   10122 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10123 		opc |= SPI_OPC_A8;
   10124 
   10125 	wm_eeprom_sendbits(sc, opc, 8);
   10126 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10127 
   10128 	for (i = 0; i < wordcnt; i++) {
   10129 		wm_eeprom_recvbits(sc, &val, 16);
   10130 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10131 	}
   10132 
   10133 	/* Raise CS and clear SK. */
   10134 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10135 	CSR_WRITE(sc, WMREG_EECD, reg);
   10136 	CSR_WRITE_FLUSH(sc);
   10137 	delay(2);
   10138 
   10139 	return 0;
   10140 }
   10141 
   10142 /* Using with EERD */
   10143 
   10144 static int
   10145 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10146 {
   10147 	uint32_t attempts = 100000;
   10148 	uint32_t i, reg = 0;
   10149 	int32_t done = -1;
   10150 
   10151 	for (i = 0; i < attempts; i++) {
   10152 		reg = CSR_READ(sc, rw);
   10153 
   10154 		if (reg & EERD_DONE) {
   10155 			done = 0;
   10156 			break;
   10157 		}
   10158 		delay(5);
   10159 	}
   10160 
   10161 	return done;
   10162 }
   10163 
   10164 static int
   10165 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10166     uint16_t *data)
   10167 {
   10168 	int i, eerd = 0;
   10169 	int error = 0;
   10170 
   10171 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10172 		device_xname(sc->sc_dev), __func__));
   10173 
   10174 	for (i = 0; i < wordcnt; i++) {
   10175 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10176 
   10177 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10178 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10179 		if (error != 0)
   10180 			break;
   10181 
   10182 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10183 	}
   10184 
   10185 	return error;
   10186 }
   10187 
   10188 /* Flash */
   10189 
   10190 static int
   10191 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10192 {
   10193 	uint32_t eecd;
   10194 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10195 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10196 	uint8_t sig_byte = 0;
   10197 
   10198 	switch (sc->sc_type) {
   10199 	case WM_T_PCH_SPT:
   10200 		/*
   10201 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10202 		 * sector valid bits from the NVM.
   10203 		 */
   10204 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10205 		if ((*bank == 0) || (*bank == 1)) {
   10206 			aprint_error_dev(sc->sc_dev,
   10207 					 "%s: no valid NVM bank present\n",
   10208 				__func__);
   10209 			return -1;
   10210 		} else {
   10211 			*bank = *bank - 2;
   10212 			return 0;
   10213 		}
   10214 	case WM_T_ICH8:
   10215 	case WM_T_ICH9:
   10216 		eecd = CSR_READ(sc, WMREG_EECD);
   10217 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10218 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10219 			return 0;
   10220 		}
   10221 		/* FALLTHROUGH */
   10222 	default:
   10223 		/* Default to 0 */
   10224 		*bank = 0;
   10225 
   10226 		/* Check bank 0 */
   10227 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10228 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10229 			*bank = 0;
   10230 			return 0;
   10231 		}
   10232 
   10233 		/* Check bank 1 */
   10234 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10235 		    &sig_byte);
   10236 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10237 			*bank = 1;
   10238 			return 0;
   10239 		}
   10240 	}
   10241 
   10242 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10243 		device_xname(sc->sc_dev)));
   10244 	return -1;
   10245 }
   10246 
   10247 /******************************************************************************
   10248  * This function does initial flash setup so that a new read/write/erase cycle
   10249  * can be started.
   10250  *
   10251  * sc - The pointer to the hw structure
   10252  ****************************************************************************/
   10253 static int32_t
   10254 wm_ich8_cycle_init(struct wm_softc *sc)
   10255 {
   10256 	uint16_t hsfsts;
   10257 	int32_t error = 1;
   10258 	int32_t i     = 0;
   10259 
   10260 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10261 
   10262 	/* May be check the Flash Des Valid bit in Hw status */
   10263 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10264 		return error;
   10265 	}
   10266 
   10267 	/* Clear FCERR in Hw status by writing 1 */
   10268 	/* Clear DAEL in Hw status by writing a 1 */
   10269 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10270 
   10271 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10272 
   10273 	/*
   10274 	 * Either we should have a hardware SPI cycle in progress bit to check
   10275 	 * against, in order to start a new cycle or FDONE bit should be
   10276 	 * changed in the hardware so that it is 1 after harware reset, which
   10277 	 * can then be used as an indication whether a cycle is in progress or
   10278 	 * has been completed .. we should also have some software semaphore
   10279 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10280 	 * threads access to those bits can be sequentiallized or a way so that
   10281 	 * 2 threads dont start the cycle at the same time
   10282 	 */
   10283 
   10284 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10285 		/*
   10286 		 * There is no cycle running at present, so we can start a
   10287 		 * cycle
   10288 		 */
   10289 
   10290 		/* Begin by setting Flash Cycle Done. */
   10291 		hsfsts |= HSFSTS_DONE;
   10292 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10293 		error = 0;
   10294 	} else {
   10295 		/*
   10296 		 * otherwise poll for sometime so the current cycle has a
   10297 		 * chance to end before giving up.
   10298 		 */
   10299 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10300 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10301 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10302 				error = 0;
   10303 				break;
   10304 			}
   10305 			delay(1);
   10306 		}
   10307 		if (error == 0) {
   10308 			/*
   10309 			 * Successful in waiting for previous cycle to timeout,
   10310 			 * now set the Flash Cycle Done.
   10311 			 */
   10312 			hsfsts |= HSFSTS_DONE;
   10313 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10314 		}
   10315 	}
   10316 	return error;
   10317 }
   10318 
   10319 /******************************************************************************
   10320  * This function starts a flash cycle and waits for its completion
   10321  *
   10322  * sc - The pointer to the hw structure
   10323  ****************************************************************************/
   10324 static int32_t
   10325 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10326 {
   10327 	uint16_t hsflctl;
   10328 	uint16_t hsfsts;
   10329 	int32_t error = 1;
   10330 	uint32_t i = 0;
   10331 
   10332 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10333 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10334 	hsflctl |= HSFCTL_GO;
   10335 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10336 
   10337 	/* Wait till FDONE bit is set to 1 */
   10338 	do {
   10339 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10340 		if (hsfsts & HSFSTS_DONE)
   10341 			break;
   10342 		delay(1);
   10343 		i++;
   10344 	} while (i < timeout);
   10345 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10346 		error = 0;
   10347 
   10348 	return error;
   10349 }
   10350 
   10351 /******************************************************************************
   10352  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10353  *
   10354  * sc - The pointer to the hw structure
   10355  * index - The index of the byte or word to read.
   10356  * size - Size of data to read, 1=byte 2=word, 4=dword
   10357  * data - Pointer to the word to store the value read.
   10358  *****************************************************************************/
   10359 static int32_t
   10360 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10361     uint32_t size, uint32_t *data)
   10362 {
   10363 	uint16_t hsfsts;
   10364 	uint16_t hsflctl;
   10365 	uint32_t flash_linear_address;
   10366 	uint32_t flash_data = 0;
   10367 	int32_t error = 1;
   10368 	int32_t count = 0;
   10369 
   10370 	if (size < 1  || size > 4 || data == 0x0 ||
   10371 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10372 		return error;
   10373 
   10374 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10375 	    sc->sc_ich8_flash_base;
   10376 
   10377 	do {
   10378 		delay(1);
   10379 		/* Steps */
   10380 		error = wm_ich8_cycle_init(sc);
   10381 		if (error)
   10382 			break;
   10383 
   10384 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10385 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10386 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10387 		    & HSFCTL_BCOUNT_MASK;
   10388 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10389 		if (sc->sc_type == WM_T_PCH_SPT) {
   10390 			/*
   10391 			 * In SPT, This register is in Lan memory space, not
   10392 			 * flash. Therefore, only 32 bit access is supported.
   10393 			 */
   10394 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10395 			    (uint32_t)hsflctl);
   10396 		} else
   10397 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10398 
   10399 		/*
   10400 		 * Write the last 24 bits of index into Flash Linear address
   10401 		 * field in Flash Address
   10402 		 */
   10403 		/* TODO: TBD maybe check the index against the size of flash */
   10404 
   10405 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10406 
   10407 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10408 
   10409 		/*
   10410 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10411 		 * the whole sequence a few more times, else read in (shift in)
   10412 		 * the Flash Data0, the order is least significant byte first
   10413 		 * msb to lsb
   10414 		 */
   10415 		if (error == 0) {
   10416 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10417 			if (size == 1)
   10418 				*data = (uint8_t)(flash_data & 0x000000FF);
   10419 			else if (size == 2)
   10420 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10421 			else if (size == 4)
   10422 				*data = (uint32_t)flash_data;
   10423 			break;
   10424 		} else {
   10425 			/*
   10426 			 * If we've gotten here, then things are probably
   10427 			 * completely hosed, but if the error condition is
   10428 			 * detected, it won't hurt to give it another try...
   10429 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10430 			 */
   10431 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10432 			if (hsfsts & HSFSTS_ERR) {
   10433 				/* Repeat for some time before giving up. */
   10434 				continue;
   10435 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10436 				break;
   10437 		}
   10438 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10439 
   10440 	return error;
   10441 }
   10442 
   10443 /******************************************************************************
   10444  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10445  *
   10446  * sc - pointer to wm_hw structure
   10447  * index - The index of the byte to read.
   10448  * data - Pointer to a byte to store the value read.
   10449  *****************************************************************************/
   10450 static int32_t
   10451 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10452 {
   10453 	int32_t status;
   10454 	uint32_t word = 0;
   10455 
   10456 	status = wm_read_ich8_data(sc, index, 1, &word);
   10457 	if (status == 0)
   10458 		*data = (uint8_t)word;
   10459 	else
   10460 		*data = 0;
   10461 
   10462 	return status;
   10463 }
   10464 
   10465 /******************************************************************************
   10466  * Reads a word from the NVM using the ICH8 flash access registers.
   10467  *
   10468  * sc - pointer to wm_hw structure
   10469  * index - The starting byte index of the word to read.
   10470  * data - Pointer to a word to store the value read.
   10471  *****************************************************************************/
   10472 static int32_t
   10473 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10474 {
   10475 	int32_t status;
   10476 	uint32_t word = 0;
   10477 
   10478 	status = wm_read_ich8_data(sc, index, 2, &word);
   10479 	if (status == 0)
   10480 		*data = (uint16_t)word;
   10481 	else
   10482 		*data = 0;
   10483 
   10484 	return status;
   10485 }
   10486 
   10487 /******************************************************************************
   10488  * Reads a dword from the NVM using the ICH8 flash access registers.
   10489  *
   10490  * sc - pointer to wm_hw structure
   10491  * index - The starting byte index of the word to read.
   10492  * data - Pointer to a word to store the value read.
   10493  *****************************************************************************/
   10494 static int32_t
   10495 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10496 {
   10497 	int32_t status;
   10498 
   10499 	status = wm_read_ich8_data(sc, index, 4, data);
   10500 	return status;
   10501 }
   10502 
   10503 /******************************************************************************
   10504  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10505  * register.
   10506  *
   10507  * sc - Struct containing variables accessed by shared code
   10508  * offset - offset of word in the EEPROM to read
   10509  * data - word read from the EEPROM
   10510  * words - number of words to read
   10511  *****************************************************************************/
   10512 static int
   10513 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10514 {
   10515 	int32_t  error = 0;
   10516 	uint32_t flash_bank = 0;
   10517 	uint32_t act_offset = 0;
   10518 	uint32_t bank_offset = 0;
   10519 	uint16_t word = 0;
   10520 	uint16_t i = 0;
   10521 
   10522 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10523 		device_xname(sc->sc_dev), __func__));
   10524 
   10525 	/*
   10526 	 * We need to know which is the valid flash bank.  In the event
   10527 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10528 	 * managing flash_bank.  So it cannot be trusted and needs
   10529 	 * to be updated with each read.
   10530 	 */
   10531 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10532 	if (error) {
   10533 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10534 			device_xname(sc->sc_dev)));
   10535 		flash_bank = 0;
   10536 	}
   10537 
   10538 	/*
   10539 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10540 	 * size
   10541 	 */
   10542 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10543 
   10544 	error = wm_get_swfwhw_semaphore(sc);
   10545 	if (error) {
   10546 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10547 		    __func__);
   10548 		return error;
   10549 	}
   10550 
   10551 	for (i = 0; i < words; i++) {
   10552 		/* The NVM part needs a byte offset, hence * 2 */
   10553 		act_offset = bank_offset + ((offset + i) * 2);
   10554 		error = wm_read_ich8_word(sc, act_offset, &word);
   10555 		if (error) {
   10556 			aprint_error_dev(sc->sc_dev,
   10557 			    "%s: failed to read NVM\n", __func__);
   10558 			break;
   10559 		}
   10560 		data[i] = word;
   10561 	}
   10562 
   10563 	wm_put_swfwhw_semaphore(sc);
   10564 	return error;
   10565 }
   10566 
   10567 /******************************************************************************
   10568  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10569  * register.
   10570  *
   10571  * sc - Struct containing variables accessed by shared code
   10572  * offset - offset of word in the EEPROM to read
   10573  * data - word read from the EEPROM
   10574  * words - number of words to read
   10575  *****************************************************************************/
   10576 static int
   10577 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10578 {
   10579 	int32_t  error = 0;
   10580 	uint32_t flash_bank = 0;
   10581 	uint32_t act_offset = 0;
   10582 	uint32_t bank_offset = 0;
   10583 	uint32_t dword = 0;
   10584 	uint16_t i = 0;
   10585 
   10586 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10587 		device_xname(sc->sc_dev), __func__));
   10588 
   10589 	/*
   10590 	 * We need to know which is the valid flash bank.  In the event
   10591 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10592 	 * managing flash_bank.  So it cannot be trusted and needs
   10593 	 * to be updated with each read.
   10594 	 */
   10595 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10596 	if (error) {
   10597 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10598 			device_xname(sc->sc_dev)));
   10599 		flash_bank = 0;
   10600 	}
   10601 
   10602 	/*
   10603 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10604 	 * size
   10605 	 */
   10606 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10607 
   10608 	error = wm_get_swfwhw_semaphore(sc);
   10609 	if (error) {
   10610 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10611 		    __func__);
   10612 		return error;
   10613 	}
   10614 
   10615 	for (i = 0; i < words; i++) {
   10616 		/* The NVM part needs a byte offset, hence * 2 */
   10617 		act_offset = bank_offset + ((offset + i) * 2);
   10618 		/* but we must read dword aligned, so mask ... */
   10619 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10620 		if (error) {
   10621 			aprint_error_dev(sc->sc_dev,
   10622 			    "%s: failed to read NVM\n", __func__);
   10623 			break;
   10624 		}
   10625 		/* ... and pick out low or high word */
   10626 		if ((act_offset & 0x2) == 0)
   10627 			data[i] = (uint16_t)(dword & 0xFFFF);
   10628 		else
   10629 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10630 	}
   10631 
   10632 	wm_put_swfwhw_semaphore(sc);
   10633 	return error;
   10634 }
   10635 
   10636 /* iNVM */
   10637 
   10638 static int
   10639 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10640 {
   10641 	int32_t  rv = 0;
   10642 	uint32_t invm_dword;
   10643 	uint16_t i;
   10644 	uint8_t record_type, word_address;
   10645 
   10646 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10647 		device_xname(sc->sc_dev), __func__));
   10648 
   10649 	for (i = 0; i < INVM_SIZE; i++) {
   10650 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10651 		/* Get record type */
   10652 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10653 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10654 			break;
   10655 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10656 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10657 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10658 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10659 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10660 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10661 			if (word_address == address) {
   10662 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10663 				rv = 0;
   10664 				break;
   10665 			}
   10666 		}
   10667 	}
   10668 
   10669 	return rv;
   10670 }
   10671 
   10672 static int
   10673 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10674 {
   10675 	int rv = 0;
   10676 	int i;
   10677 
   10678 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10679 		device_xname(sc->sc_dev), __func__));
   10680 
   10681 	for (i = 0; i < words; i++) {
   10682 		switch (offset + i) {
   10683 		case NVM_OFF_MACADDR:
   10684 		case NVM_OFF_MACADDR1:
   10685 		case NVM_OFF_MACADDR2:
   10686 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10687 			if (rv != 0) {
   10688 				data[i] = 0xffff;
   10689 				rv = -1;
   10690 			}
   10691 			break;
   10692 		case NVM_OFF_CFG2:
   10693 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10694 			if (rv != 0) {
   10695 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10696 				rv = 0;
   10697 			}
   10698 			break;
   10699 		case NVM_OFF_CFG4:
   10700 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10701 			if (rv != 0) {
   10702 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10703 				rv = 0;
   10704 			}
   10705 			break;
   10706 		case NVM_OFF_LED_1_CFG:
   10707 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10708 			if (rv != 0) {
   10709 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10710 				rv = 0;
   10711 			}
   10712 			break;
   10713 		case NVM_OFF_LED_0_2_CFG:
   10714 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10715 			if (rv != 0) {
   10716 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10717 				rv = 0;
   10718 			}
   10719 			break;
   10720 		case NVM_OFF_ID_LED_SETTINGS:
   10721 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10722 			if (rv != 0) {
   10723 				*data = ID_LED_RESERVED_FFFF;
   10724 				rv = 0;
   10725 			}
   10726 			break;
   10727 		default:
   10728 			DPRINTF(WM_DEBUG_NVM,
   10729 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10730 			*data = NVM_RESERVED_WORD;
   10731 			break;
   10732 		}
   10733 	}
   10734 
   10735 	return rv;
   10736 }
   10737 
   10738 /* Lock, detecting NVM type, validate checksum, version and read */
   10739 
   10740 /*
   10741  * wm_nvm_acquire:
   10742  *
   10743  *	Perform the EEPROM handshake required on some chips.
   10744  */
   10745 static int
   10746 wm_nvm_acquire(struct wm_softc *sc)
   10747 {
   10748 	uint32_t reg;
   10749 	int x;
   10750 	int ret = 0;
   10751 
   10752 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10753 		device_xname(sc->sc_dev), __func__));
   10754 
   10755 	if (sc->sc_type >= WM_T_ICH8) {
   10756 		ret = wm_get_nvm_ich8lan(sc);
   10757 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10758 		ret = wm_get_swfwhw_semaphore(sc);
   10759 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10760 		/* This will also do wm_get_swsm_semaphore() if needed */
   10761 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10762 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10763 		ret = wm_get_swsm_semaphore(sc);
   10764 	}
   10765 
   10766 	if (ret) {
   10767 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10768 			__func__);
   10769 		return 1;
   10770 	}
   10771 
   10772 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10773 		reg = CSR_READ(sc, WMREG_EECD);
   10774 
   10775 		/* Request EEPROM access. */
   10776 		reg |= EECD_EE_REQ;
   10777 		CSR_WRITE(sc, WMREG_EECD, reg);
   10778 
   10779 		/* ..and wait for it to be granted. */
   10780 		for (x = 0; x < 1000; x++) {
   10781 			reg = CSR_READ(sc, WMREG_EECD);
   10782 			if (reg & EECD_EE_GNT)
   10783 				break;
   10784 			delay(5);
   10785 		}
   10786 		if ((reg & EECD_EE_GNT) == 0) {
   10787 			aprint_error_dev(sc->sc_dev,
   10788 			    "could not acquire EEPROM GNT\n");
   10789 			reg &= ~EECD_EE_REQ;
   10790 			CSR_WRITE(sc, WMREG_EECD, reg);
   10791 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10792 				wm_put_swfwhw_semaphore(sc);
   10793 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10794 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10795 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10796 				wm_put_swsm_semaphore(sc);
   10797 			return 1;
   10798 		}
   10799 	}
   10800 
   10801 	return 0;
   10802 }
   10803 
   10804 /*
   10805  * wm_nvm_release:
   10806  *
   10807  *	Release the EEPROM mutex.
   10808  */
   10809 static void
   10810 wm_nvm_release(struct wm_softc *sc)
   10811 {
   10812 	uint32_t reg;
   10813 
   10814 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10815 		device_xname(sc->sc_dev), __func__));
   10816 
   10817 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10818 		reg = CSR_READ(sc, WMREG_EECD);
   10819 		reg &= ~EECD_EE_REQ;
   10820 		CSR_WRITE(sc, WMREG_EECD, reg);
   10821 	}
   10822 
   10823 	if (sc->sc_type >= WM_T_ICH8) {
   10824 		wm_put_nvm_ich8lan(sc);
   10825 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10826 		wm_put_swfwhw_semaphore(sc);
   10827 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10828 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10829 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10830 		wm_put_swsm_semaphore(sc);
   10831 }
   10832 
   10833 static int
   10834 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10835 {
   10836 	uint32_t eecd = 0;
   10837 
   10838 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10839 	    || sc->sc_type == WM_T_82583) {
   10840 		eecd = CSR_READ(sc, WMREG_EECD);
   10841 
   10842 		/* Isolate bits 15 & 16 */
   10843 		eecd = ((eecd >> 15) & 0x03);
   10844 
   10845 		/* If both bits are set, device is Flash type */
   10846 		if (eecd == 0x03)
   10847 			return 0;
   10848 	}
   10849 	return 1;
   10850 }
   10851 
   10852 static int
   10853 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10854 {
   10855 	uint32_t eec;
   10856 
   10857 	eec = CSR_READ(sc, WMREG_EEC);
   10858 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10859 		return 1;
   10860 
   10861 	return 0;
   10862 }
   10863 
   10864 /*
   10865  * wm_nvm_validate_checksum
   10866  *
   10867  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10868  */
   10869 static int
   10870 wm_nvm_validate_checksum(struct wm_softc *sc)
   10871 {
   10872 	uint16_t checksum;
   10873 	uint16_t eeprom_data;
   10874 #ifdef WM_DEBUG
   10875 	uint16_t csum_wordaddr, valid_checksum;
   10876 #endif
   10877 	int i;
   10878 
   10879 	checksum = 0;
   10880 
   10881 	/* Don't check for I211 */
   10882 	if (sc->sc_type == WM_T_I211)
   10883 		return 0;
   10884 
   10885 #ifdef WM_DEBUG
   10886 	if (sc->sc_type == WM_T_PCH_LPT) {
   10887 		csum_wordaddr = NVM_OFF_COMPAT;
   10888 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10889 	} else {
   10890 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10891 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10892 	}
   10893 
   10894 	/* Dump EEPROM image for debug */
   10895 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10896 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10897 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10898 		/* XXX PCH_SPT? */
   10899 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10900 		if ((eeprom_data & valid_checksum) == 0) {
   10901 			DPRINTF(WM_DEBUG_NVM,
   10902 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10903 				device_xname(sc->sc_dev), eeprom_data,
   10904 				    valid_checksum));
   10905 		}
   10906 	}
   10907 
   10908 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10909 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10910 		for (i = 0; i < NVM_SIZE; i++) {
   10911 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10912 				printf("XXXX ");
   10913 			else
   10914 				printf("%04hx ", eeprom_data);
   10915 			if (i % 8 == 7)
   10916 				printf("\n");
   10917 		}
   10918 	}
   10919 
   10920 #endif /* WM_DEBUG */
   10921 
   10922 	for (i = 0; i < NVM_SIZE; i++) {
   10923 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10924 			return 1;
   10925 		checksum += eeprom_data;
   10926 	}
   10927 
   10928 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10929 #ifdef WM_DEBUG
   10930 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10931 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10932 #endif
   10933 	}
   10934 
   10935 	return 0;
   10936 }
   10937 
   10938 static void
   10939 wm_nvm_version_invm(struct wm_softc *sc)
   10940 {
   10941 	uint32_t dword;
   10942 
   10943 	/*
   10944 	 * Linux's code to decode version is very strange, so we don't
   10945 	 * obey that algorithm and just use word 61 as the document.
   10946 	 * Perhaps it's not perfect though...
   10947 	 *
   10948 	 * Example:
   10949 	 *
   10950 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10951 	 */
   10952 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10953 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10954 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10955 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10956 }
   10957 
   10958 static void
   10959 wm_nvm_version(struct wm_softc *sc)
   10960 {
   10961 	uint16_t major, minor, build, patch;
   10962 	uint16_t uid0, uid1;
   10963 	uint16_t nvm_data;
   10964 	uint16_t off;
   10965 	bool check_version = false;
   10966 	bool check_optionrom = false;
   10967 	bool have_build = false;
   10968 
   10969 	/*
   10970 	 * Version format:
   10971 	 *
   10972 	 * XYYZ
   10973 	 * X0YZ
   10974 	 * X0YY
   10975 	 *
   10976 	 * Example:
   10977 	 *
   10978 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10979 	 *	82571	0x50a6	5.10.6?
   10980 	 *	82572	0x506a	5.6.10?
   10981 	 *	82572EI	0x5069	5.6.9?
   10982 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10983 	 *		0x2013	2.1.3?
   10984 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10985 	 */
   10986 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10987 	switch (sc->sc_type) {
   10988 	case WM_T_82571:
   10989 	case WM_T_82572:
   10990 	case WM_T_82574:
   10991 	case WM_T_82583:
   10992 		check_version = true;
   10993 		check_optionrom = true;
   10994 		have_build = true;
   10995 		break;
   10996 	case WM_T_82575:
   10997 	case WM_T_82576:
   10998 	case WM_T_82580:
   10999 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11000 			check_version = true;
   11001 		break;
   11002 	case WM_T_I211:
   11003 		wm_nvm_version_invm(sc);
   11004 		goto printver;
   11005 	case WM_T_I210:
   11006 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11007 			wm_nvm_version_invm(sc);
   11008 			goto printver;
   11009 		}
   11010 		/* FALLTHROUGH */
   11011 	case WM_T_I350:
   11012 	case WM_T_I354:
   11013 		check_version = true;
   11014 		check_optionrom = true;
   11015 		break;
   11016 	default:
   11017 		return;
   11018 	}
   11019 	if (check_version) {
   11020 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11021 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11022 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11023 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11024 			build = nvm_data & NVM_BUILD_MASK;
   11025 			have_build = true;
   11026 		} else
   11027 			minor = nvm_data & 0x00ff;
   11028 
   11029 		/* Decimal */
   11030 		minor = (minor / 16) * 10 + (minor % 16);
   11031 		sc->sc_nvm_ver_major = major;
   11032 		sc->sc_nvm_ver_minor = minor;
   11033 
   11034 printver:
   11035 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11036 		    sc->sc_nvm_ver_minor);
   11037 		if (have_build) {
   11038 			sc->sc_nvm_ver_build = build;
   11039 			aprint_verbose(".%d", build);
   11040 		}
   11041 	}
   11042 	if (check_optionrom) {
   11043 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11044 		/* Option ROM Version */
   11045 		if ((off != 0x0000) && (off != 0xffff)) {
   11046 			off += NVM_COMBO_VER_OFF;
   11047 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11048 			wm_nvm_read(sc, off, 1, &uid0);
   11049 			if ((uid0 != 0) && (uid0 != 0xffff)
   11050 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11051 				/* 16bits */
   11052 				major = uid0 >> 8;
   11053 				build = (uid0 << 8) | (uid1 >> 8);
   11054 				patch = uid1 & 0x00ff;
   11055 				aprint_verbose(", option ROM Version %d.%d.%d",
   11056 				    major, build, patch);
   11057 			}
   11058 		}
   11059 	}
   11060 
   11061 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11062 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11063 }
   11064 
   11065 /*
   11066  * wm_nvm_read:
   11067  *
   11068  *	Read data from the serial EEPROM.
   11069  */
   11070 static int
   11071 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11072 {
   11073 	int rv;
   11074 
   11075 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11076 		device_xname(sc->sc_dev), __func__));
   11077 
   11078 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11079 		return 1;
   11080 
   11081 	if (wm_nvm_acquire(sc))
   11082 		return 1;
   11083 
   11084 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11085 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11086 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11087 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11088 	else if (sc->sc_type == WM_T_PCH_SPT)
   11089 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11090 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11091 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11092 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11093 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11094 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11095 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11096 	else
   11097 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11098 
   11099 	wm_nvm_release(sc);
   11100 	return rv;
   11101 }
   11102 
   11103 /*
   11104  * Hardware semaphores.
   11105  * Very complexed...
   11106  */
   11107 
   11108 static int
   11109 wm_get_swsm_semaphore(struct wm_softc *sc)
   11110 {
   11111 	int32_t timeout;
   11112 	uint32_t swsm;
   11113 
   11114 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11115 		device_xname(sc->sc_dev), __func__));
   11116 
   11117 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11118 		/* Get the SW semaphore. */
   11119 		timeout = sc->sc_nvm_wordsize + 1;
   11120 		while (timeout) {
   11121 			swsm = CSR_READ(sc, WMREG_SWSM);
   11122 
   11123 			if ((swsm & SWSM_SMBI) == 0)
   11124 				break;
   11125 
   11126 			delay(50);
   11127 			timeout--;
   11128 		}
   11129 
   11130 		if (timeout == 0) {
   11131 			aprint_error_dev(sc->sc_dev,
   11132 			    "could not acquire SWSM SMBI\n");
   11133 			return 1;
   11134 		}
   11135 	}
   11136 
   11137 	/* Get the FW semaphore. */
   11138 	timeout = sc->sc_nvm_wordsize + 1;
   11139 	while (timeout) {
   11140 		swsm = CSR_READ(sc, WMREG_SWSM);
   11141 		swsm |= SWSM_SWESMBI;
   11142 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11143 		/* If we managed to set the bit we got the semaphore. */
   11144 		swsm = CSR_READ(sc, WMREG_SWSM);
   11145 		if (swsm & SWSM_SWESMBI)
   11146 			break;
   11147 
   11148 		delay(50);
   11149 		timeout--;
   11150 	}
   11151 
   11152 	if (timeout == 0) {
   11153 		aprint_error_dev(sc->sc_dev,
   11154 		    "could not acquire SWSM SWESMBI\n");
   11155 		/* Release semaphores */
   11156 		wm_put_swsm_semaphore(sc);
   11157 		return 1;
   11158 	}
   11159 	return 0;
   11160 }
   11161 
   11162 /*
   11163  * Put hardware semaphore.
   11164  * Same as e1000_put_hw_semaphore_generic()
   11165  */
   11166 static void
   11167 wm_put_swsm_semaphore(struct wm_softc *sc)
   11168 {
   11169 	uint32_t swsm;
   11170 
   11171 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11172 		device_xname(sc->sc_dev), __func__));
   11173 
   11174 	swsm = CSR_READ(sc, WMREG_SWSM);
   11175 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11176 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11177 }
   11178 
   11179 /*
   11180  * Get SW/FW semaphore.
   11181  * Same as e1000_acquire_swfw_sync_82575().
   11182  */
   11183 static int
   11184 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11185 {
   11186 	uint32_t swfw_sync;
   11187 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11188 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11189 	int timeout = 200;
   11190 
   11191 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11192 		device_xname(sc->sc_dev), __func__));
   11193 
   11194 	for (timeout = 0; timeout < 200; timeout++) {
   11195 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11196 			if (wm_get_swsm_semaphore(sc)) {
   11197 				aprint_error_dev(sc->sc_dev,
   11198 				    "%s: failed to get semaphore\n",
   11199 				    __func__);
   11200 				return 1;
   11201 			}
   11202 		}
   11203 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11204 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11205 			swfw_sync |= swmask;
   11206 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11207 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11208 				wm_put_swsm_semaphore(sc);
   11209 			return 0;
   11210 		}
   11211 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11212 			wm_put_swsm_semaphore(sc);
   11213 		delay(5000);
   11214 	}
   11215 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11216 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11217 	return 1;
   11218 }
   11219 
   11220 static void
   11221 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11222 {
   11223 	uint32_t swfw_sync;
   11224 
   11225 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11226 		device_xname(sc->sc_dev), __func__));
   11227 
   11228 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11229 		while (wm_get_swsm_semaphore(sc) != 0)
   11230 			continue;
   11231 	}
   11232 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11233 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11234 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11235 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11236 		wm_put_swsm_semaphore(sc);
   11237 }
   11238 
   11239 static int
   11240 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11241 {
   11242 	uint32_t ext_ctrl;
   11243 	int timeout = 200;
   11244 
   11245 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11246 		device_xname(sc->sc_dev), __func__));
   11247 
   11248 	for (timeout = 0; timeout < 200; timeout++) {
   11249 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11250 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11251 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11252 
   11253 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11254 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11255 			return 0;
   11256 		delay(5000);
   11257 	}
   11258 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11259 	    device_xname(sc->sc_dev), ext_ctrl);
   11260 	return 1;
   11261 }
   11262 
   11263 static void
   11264 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11265 {
   11266 	uint32_t ext_ctrl;
   11267 
   11268 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11269 		device_xname(sc->sc_dev), __func__));
   11270 
   11271 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11272 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11273 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11274 }
   11275 
   11276 static int
   11277 wm_get_nvm_ich8lan(struct wm_softc *sc)
   11278 {
   11279 
   11280 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11281 		device_xname(sc->sc_dev), __func__));
   11282 	mutex_enter(sc->sc_ich_nvmmtx);
   11283 
   11284 	return 0;
   11285 }
   11286 
   11287 static void
   11288 wm_put_nvm_ich8lan(struct wm_softc *sc)
   11289 {
   11290 
   11291 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11292 		device_xname(sc->sc_dev), __func__));
   11293 	mutex_exit(sc->sc_ich_nvmmtx);
   11294 }
   11295 
   11296 static int
   11297 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11298 {
   11299 	int i = 0;
   11300 	uint32_t reg;
   11301 
   11302 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11303 		device_xname(sc->sc_dev), __func__));
   11304 
   11305 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11306 	do {
   11307 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11308 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11309 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11310 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11311 			break;
   11312 		delay(2*1000);
   11313 		i++;
   11314 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11315 
   11316 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11317 		wm_put_hw_semaphore_82573(sc);
   11318 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11319 		    device_xname(sc->sc_dev));
   11320 		return -1;
   11321 	}
   11322 
   11323 	return 0;
   11324 }
   11325 
   11326 static void
   11327 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11328 {
   11329 	uint32_t reg;
   11330 
   11331 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11332 		device_xname(sc->sc_dev), __func__));
   11333 
   11334 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11335 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11336 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11337 }
   11338 
   11339 /*
   11340  * Management mode and power management related subroutines.
   11341  * BMC, AMT, suspend/resume and EEE.
   11342  */
   11343 
   11344 #ifdef WM_WOL
   11345 static int
   11346 wm_check_mng_mode(struct wm_softc *sc)
   11347 {
   11348 	int rv;
   11349 
   11350 	switch (sc->sc_type) {
   11351 	case WM_T_ICH8:
   11352 	case WM_T_ICH9:
   11353 	case WM_T_ICH10:
   11354 	case WM_T_PCH:
   11355 	case WM_T_PCH2:
   11356 	case WM_T_PCH_LPT:
   11357 	case WM_T_PCH_SPT:
   11358 		rv = wm_check_mng_mode_ich8lan(sc);
   11359 		break;
   11360 	case WM_T_82574:
   11361 	case WM_T_82583:
   11362 		rv = wm_check_mng_mode_82574(sc);
   11363 		break;
   11364 	case WM_T_82571:
   11365 	case WM_T_82572:
   11366 	case WM_T_82573:
   11367 	case WM_T_80003:
   11368 		rv = wm_check_mng_mode_generic(sc);
   11369 		break;
   11370 	default:
   11371 		/* noting to do */
   11372 		rv = 0;
   11373 		break;
   11374 	}
   11375 
   11376 	return rv;
   11377 }
   11378 
   11379 static int
   11380 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11381 {
   11382 	uint32_t fwsm;
   11383 
   11384 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11385 
   11386 	if (((fwsm & FWSM_FW_VALID) != 0)
   11387 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11388 		return 1;
   11389 
   11390 	return 0;
   11391 }
   11392 
   11393 static int
   11394 wm_check_mng_mode_82574(struct wm_softc *sc)
   11395 {
   11396 	uint16_t data;
   11397 
   11398 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11399 
   11400 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11401 		return 1;
   11402 
   11403 	return 0;
   11404 }
   11405 
   11406 static int
   11407 wm_check_mng_mode_generic(struct wm_softc *sc)
   11408 {
   11409 	uint32_t fwsm;
   11410 
   11411 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11412 
   11413 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11414 		return 1;
   11415 
   11416 	return 0;
   11417 }
   11418 #endif /* WM_WOL */
   11419 
   11420 static int
   11421 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11422 {
   11423 	uint32_t manc, fwsm, factps;
   11424 
   11425 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11426 		return 0;
   11427 
   11428 	manc = CSR_READ(sc, WMREG_MANC);
   11429 
   11430 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11431 		device_xname(sc->sc_dev), manc));
   11432 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11433 		return 0;
   11434 
   11435 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11436 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11437 		factps = CSR_READ(sc, WMREG_FACTPS);
   11438 		if (((factps & FACTPS_MNGCG) == 0)
   11439 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11440 			return 1;
   11441 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11442 		uint16_t data;
   11443 
   11444 		factps = CSR_READ(sc, WMREG_FACTPS);
   11445 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11446 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11447 			device_xname(sc->sc_dev), factps, data));
   11448 		if (((factps & FACTPS_MNGCG) == 0)
   11449 		    && ((data & NVM_CFG2_MNGM_MASK)
   11450 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11451 			return 1;
   11452 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11453 	    && ((manc & MANC_ASF_EN) == 0))
   11454 		return 1;
   11455 
   11456 	return 0;
   11457 }
   11458 
   11459 static bool
   11460 wm_phy_resetisblocked(struct wm_softc *sc)
   11461 {
   11462 	bool blocked = false;
   11463 	uint32_t reg;
   11464 	int i = 0;
   11465 
   11466 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11467 		device_xname(sc->sc_dev), __func__));
   11468 
   11469 	switch (sc->sc_type) {
   11470 	case WM_T_ICH8:
   11471 	case WM_T_ICH9:
   11472 	case WM_T_ICH10:
   11473 	case WM_T_PCH:
   11474 	case WM_T_PCH2:
   11475 	case WM_T_PCH_LPT:
   11476 	case WM_T_PCH_SPT:
   11477 		do {
   11478 			reg = CSR_READ(sc, WMREG_FWSM);
   11479 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11480 				blocked = true;
   11481 				delay(10*1000);
   11482 				continue;
   11483 			}
   11484 			blocked = false;
   11485 		} while (blocked && (i++ < 10));
   11486 		return blocked;
   11487 		break;
   11488 	case WM_T_82571:
   11489 	case WM_T_82572:
   11490 	case WM_T_82573:
   11491 	case WM_T_82574:
   11492 	case WM_T_82583:
   11493 	case WM_T_80003:
   11494 		reg = CSR_READ(sc, WMREG_MANC);
   11495 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11496 			return true;
   11497 		else
   11498 			return false;
   11499 		break;
   11500 	default:
   11501 		/* no problem */
   11502 		break;
   11503 	}
   11504 
   11505 	return false;
   11506 }
   11507 
   11508 static void
   11509 wm_get_hw_control(struct wm_softc *sc)
   11510 {
   11511 	uint32_t reg;
   11512 
   11513 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11514 		device_xname(sc->sc_dev), __func__));
   11515 
   11516 	switch (sc->sc_type) {
   11517 	case WM_T_82573:
   11518 		reg = CSR_READ(sc, WMREG_SWSM);
   11519 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11520 		break;
   11521 	case WM_T_82571:
   11522 	case WM_T_82572:
   11523 	case WM_T_82574:
   11524 	case WM_T_82583:
   11525 	case WM_T_80003:
   11526 	case WM_T_ICH8:
   11527 	case WM_T_ICH9:
   11528 	case WM_T_ICH10:
   11529 	case WM_T_PCH:
   11530 	case WM_T_PCH2:
   11531 	case WM_T_PCH_LPT:
   11532 	case WM_T_PCH_SPT:
   11533 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11534 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11535 		break;
   11536 	default:
   11537 		break;
   11538 	}
   11539 }
   11540 
   11541 static void
   11542 wm_release_hw_control(struct wm_softc *sc)
   11543 {
   11544 	uint32_t reg;
   11545 
   11546 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11547 		device_xname(sc->sc_dev), __func__));
   11548 
   11549 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11550 		return;
   11551 
   11552 	if (sc->sc_type == WM_T_82573) {
   11553 		reg = CSR_READ(sc, WMREG_SWSM);
   11554 		reg &= ~SWSM_DRV_LOAD;
   11555 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11556 	} else {
   11557 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11558 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11559 	}
   11560 }
   11561 
   11562 static void
   11563 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11564 {
   11565 	uint32_t reg;
   11566 
   11567 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11568 		device_xname(sc->sc_dev), __func__));
   11569 
   11570 	if (sc->sc_type < WM_T_PCH2)
   11571 		return;
   11572 
   11573 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11574 
   11575 	if (gate)
   11576 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11577 	else
   11578 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11579 
   11580 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11581 }
   11582 
   11583 static void
   11584 wm_smbustopci(struct wm_softc *sc)
   11585 {
   11586 	uint32_t fwsm, reg;
   11587 
   11588 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11589 		device_xname(sc->sc_dev), __func__));
   11590 
   11591 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11592 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11593 
   11594 	/* Acquire semaphore */
   11595 	wm_get_swfwhw_semaphore(sc);
   11596 
   11597 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11598 	if (((fwsm & FWSM_FW_VALID) == 0)
   11599 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11600 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11601 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11602 			reg |= CTRL_EXT_FORCE_SMBUS;
   11603 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11604 			CSR_WRITE_FLUSH(sc);
   11605 			delay(50*1000);
   11606 		}
   11607 
   11608 		/* Toggle LANPHYPC */
   11609 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11610 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11611 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11612 		CSR_WRITE_FLUSH(sc);
   11613 		delay(1000);
   11614 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11615 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11616 		CSR_WRITE_FLUSH(sc);
   11617 		delay(50*1000);
   11618 
   11619 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11620 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11621 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11622 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11623 		}
   11624 	}
   11625 
   11626 	/* Release semaphore */
   11627 	wm_put_swfwhw_semaphore(sc);
   11628 
   11629 	/*
   11630 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11631 	 */
   11632 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11633 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11634 }
   11635 
   11636 static void
   11637 wm_init_manageability(struct wm_softc *sc)
   11638 {
   11639 
   11640 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11641 		device_xname(sc->sc_dev), __func__));
   11642 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11643 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11644 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11645 
   11646 		/* Disable hardware interception of ARP */
   11647 		manc &= ~MANC_ARP_EN;
   11648 
   11649 		/* Enable receiving management packets to the host */
   11650 		if (sc->sc_type >= WM_T_82571) {
   11651 			manc |= MANC_EN_MNG2HOST;
   11652 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11653 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11654 		}
   11655 
   11656 		CSR_WRITE(sc, WMREG_MANC, manc);
   11657 	}
   11658 }
   11659 
   11660 static void
   11661 wm_release_manageability(struct wm_softc *sc)
   11662 {
   11663 
   11664 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11665 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11666 
   11667 		manc |= MANC_ARP_EN;
   11668 		if (sc->sc_type >= WM_T_82571)
   11669 			manc &= ~MANC_EN_MNG2HOST;
   11670 
   11671 		CSR_WRITE(sc, WMREG_MANC, manc);
   11672 	}
   11673 }
   11674 
   11675 static void
   11676 wm_get_wakeup(struct wm_softc *sc)
   11677 {
   11678 
   11679 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11680 	switch (sc->sc_type) {
   11681 	case WM_T_82573:
   11682 	case WM_T_82583:
   11683 		sc->sc_flags |= WM_F_HAS_AMT;
   11684 		/* FALLTHROUGH */
   11685 	case WM_T_80003:
   11686 	case WM_T_82541:
   11687 	case WM_T_82547:
   11688 	case WM_T_82571:
   11689 	case WM_T_82572:
   11690 	case WM_T_82574:
   11691 	case WM_T_82575:
   11692 	case WM_T_82576:
   11693 	case WM_T_82580:
   11694 	case WM_T_I350:
   11695 	case WM_T_I354:
   11696 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11697 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11698 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11699 		break;
   11700 	case WM_T_ICH8:
   11701 	case WM_T_ICH9:
   11702 	case WM_T_ICH10:
   11703 	case WM_T_PCH:
   11704 	case WM_T_PCH2:
   11705 	case WM_T_PCH_LPT:
   11706 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   11707 		sc->sc_flags |= WM_F_HAS_AMT;
   11708 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11709 		break;
   11710 	default:
   11711 		break;
   11712 	}
   11713 
   11714 	/* 1: HAS_MANAGE */
   11715 	if (wm_enable_mng_pass_thru(sc) != 0)
   11716 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11717 
   11718 #ifdef WM_DEBUG
   11719 	printf("\n");
   11720 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11721 		printf("HAS_AMT,");
   11722 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11723 		printf("ARC_SUBSYS_VALID,");
   11724 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11725 		printf("ASF_FIRMWARE_PRES,");
   11726 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11727 		printf("HAS_MANAGE,");
   11728 	printf("\n");
   11729 #endif
   11730 	/*
   11731 	 * Note that the WOL flags is set after the resetting of the eeprom
   11732 	 * stuff
   11733 	 */
   11734 }
   11735 
   11736 #ifdef WM_WOL
   11737 /* WOL in the newer chipset interfaces (pchlan) */
   11738 static void
   11739 wm_enable_phy_wakeup(struct wm_softc *sc)
   11740 {
   11741 #if 0
   11742 	uint16_t preg;
   11743 
   11744 	/* Copy MAC RARs to PHY RARs */
   11745 
   11746 	/* Copy MAC MTA to PHY MTA */
   11747 
   11748 	/* Configure PHY Rx Control register */
   11749 
   11750 	/* Enable PHY wakeup in MAC register */
   11751 
   11752 	/* Configure and enable PHY wakeup in PHY registers */
   11753 
   11754 	/* Activate PHY wakeup */
   11755 
   11756 	/* XXX */
   11757 #endif
   11758 }
   11759 
   11760 /* Power down workaround on D3 */
   11761 static void
   11762 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11763 {
   11764 	uint32_t reg;
   11765 	int i;
   11766 
   11767 	for (i = 0; i < 2; i++) {
   11768 		/* Disable link */
   11769 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11770 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11771 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11772 
   11773 		/*
   11774 		 * Call gig speed drop workaround on Gig disable before
   11775 		 * accessing any PHY registers
   11776 		 */
   11777 		if (sc->sc_type == WM_T_ICH8)
   11778 			wm_gig_downshift_workaround_ich8lan(sc);
   11779 
   11780 		/* Write VR power-down enable */
   11781 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11782 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11783 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11784 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11785 
   11786 		/* Read it back and test */
   11787 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11788 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11789 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11790 			break;
   11791 
   11792 		/* Issue PHY reset and repeat at most one more time */
   11793 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11794 	}
   11795 }
   11796 
   11797 static void
   11798 wm_enable_wakeup(struct wm_softc *sc)
   11799 {
   11800 	uint32_t reg, pmreg;
   11801 	pcireg_t pmode;
   11802 
   11803 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11804 		&pmreg, NULL) == 0)
   11805 		return;
   11806 
   11807 	/* Advertise the wakeup capability */
   11808 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11809 	    | CTRL_SWDPIN(3));
   11810 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11811 
   11812 	/* ICH workaround */
   11813 	switch (sc->sc_type) {
   11814 	case WM_T_ICH8:
   11815 	case WM_T_ICH9:
   11816 	case WM_T_ICH10:
   11817 	case WM_T_PCH:
   11818 	case WM_T_PCH2:
   11819 	case WM_T_PCH_LPT:
   11820 	case WM_T_PCH_SPT:
   11821 		/* Disable gig during WOL */
   11822 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11823 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11824 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11825 		if (sc->sc_type == WM_T_PCH)
   11826 			wm_gmii_reset(sc);
   11827 
   11828 		/* Power down workaround */
   11829 		if (sc->sc_phytype == WMPHY_82577) {
   11830 			struct mii_softc *child;
   11831 
   11832 			/* Assume that the PHY is copper */
   11833 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11834 			if (child->mii_mpd_rev <= 2)
   11835 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11836 				    (768 << 5) | 25, 0x0444); /* magic num */
   11837 		}
   11838 		break;
   11839 	default:
   11840 		break;
   11841 	}
   11842 
   11843 	/* Keep the laser running on fiber adapters */
   11844 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11845 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11846 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11847 		reg |= CTRL_EXT_SWDPIN(3);
   11848 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11849 	}
   11850 
   11851 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11852 #if 0	/* for the multicast packet */
   11853 	reg |= WUFC_MC;
   11854 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11855 #endif
   11856 
   11857 	if (sc->sc_type == WM_T_PCH) {
   11858 		wm_enable_phy_wakeup(sc);
   11859 	} else {
   11860 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11861 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11862 	}
   11863 
   11864 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11865 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11866 		|| (sc->sc_type == WM_T_PCH2))
   11867 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11868 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11869 
   11870 	/* Request PME */
   11871 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11872 #if 0
   11873 	/* Disable WOL */
   11874 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11875 #else
   11876 	/* For WOL */
   11877 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11878 #endif
   11879 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11880 }
   11881 #endif /* WM_WOL */
   11882 
   11883 /* LPLU */
   11884 
   11885 static void
   11886 wm_lplu_d0_disable(struct wm_softc *sc)
   11887 {
   11888 	uint32_t reg;
   11889 
   11890 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11891 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   11892 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11893 }
   11894 
   11895 static void
   11896 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   11897 {
   11898 	uint32_t reg;
   11899 
   11900 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   11901 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   11902 	reg |= HV_OEM_BITS_ANEGNOW;
   11903 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   11904 }
   11905 
   11906 /* EEE */
   11907 
   11908 static void
   11909 wm_set_eee_i350(struct wm_softc *sc)
   11910 {
   11911 	uint32_t ipcnfg, eeer;
   11912 
   11913 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11914 	eeer = CSR_READ(sc, WMREG_EEER);
   11915 
   11916 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11917 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11918 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11919 		    | EEER_LPI_FC);
   11920 	} else {
   11921 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11922 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11923 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11924 		    | EEER_LPI_FC);
   11925 	}
   11926 
   11927 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11928 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11929 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11930 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11931 }
   11932 
   11933 /*
   11934  * Workarounds (mainly PHY related).
   11935  * Basically, PHY's workarounds are in the PHY drivers.
   11936  */
   11937 
   11938 /* Work-around for 82566 Kumeran PCS lock loss */
   11939 static void
   11940 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11941 {
   11942 #if 0
   11943 	int miistatus, active, i;
   11944 	int reg;
   11945 
   11946 	miistatus = sc->sc_mii.mii_media_status;
   11947 
   11948 	/* If the link is not up, do nothing */
   11949 	if ((miistatus & IFM_ACTIVE) == 0)
   11950 		return;
   11951 
   11952 	active = sc->sc_mii.mii_media_active;
   11953 
   11954 	/* Nothing to do if the link is other than 1Gbps */
   11955 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11956 		return;
   11957 
   11958 	for (i = 0; i < 10; i++) {
   11959 		/* read twice */
   11960 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11961 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11962 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   11963 			goto out;	/* GOOD! */
   11964 
   11965 		/* Reset the PHY */
   11966 		wm_gmii_reset(sc);
   11967 		delay(5*1000);
   11968 	}
   11969 
   11970 	/* Disable GigE link negotiation */
   11971 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11972 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11973 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11974 
   11975 	/*
   11976 	 * Call gig speed drop workaround on Gig disable before accessing
   11977 	 * any PHY registers.
   11978 	 */
   11979 	wm_gig_downshift_workaround_ich8lan(sc);
   11980 
   11981 out:
   11982 	return;
   11983 #endif
   11984 }
   11985 
   11986 /* WOL from S5 stops working */
   11987 static void
   11988 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11989 {
   11990 	uint16_t kmrn_reg;
   11991 
   11992 	/* Only for igp3 */
   11993 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11994 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11995 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11996 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11997 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11998 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11999 	}
   12000 }
   12001 
   12002 /*
   12003  * Workaround for pch's PHYs
   12004  * XXX should be moved to new PHY driver?
   12005  */
   12006 static void
   12007 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12008 {
   12009 
   12010 	KASSERT(sc->sc_type == WM_T_PCH);
   12011 
   12012 	if (sc->sc_phytype == WMPHY_82577)
   12013 		wm_set_mdio_slow_mode_hv(sc);
   12014 
   12015 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12016 
   12017 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12018 
   12019 	/* 82578 */
   12020 	if (sc->sc_phytype == WMPHY_82578) {
   12021 		/* PCH rev. < 3 */
   12022 		if (sc->sc_rev < 3) {
   12023 			/* XXX 6 bit shift? Why? Is it page2? */
   12024 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   12025 			    0x66c0);
   12026 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   12027 			    0xffff);
   12028 		}
   12029 
   12030 		/* XXX phy rev. < 2 */
   12031 	}
   12032 
   12033 	/* Select page 0 */
   12034 
   12035 	/* XXX acquire semaphore */
   12036 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12037 	/* XXX release semaphore */
   12038 
   12039 	/*
   12040 	 * Configure the K1 Si workaround during phy reset assuming there is
   12041 	 * link so that it disables K1 if link is in 1Gbps.
   12042 	 */
   12043 	wm_k1_gig_workaround_hv(sc, 1);
   12044 }
   12045 
   12046 static void
   12047 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12048 {
   12049 
   12050 	KASSERT(sc->sc_type == WM_T_PCH2);
   12051 
   12052 	wm_set_mdio_slow_mode_hv(sc);
   12053 }
   12054 
   12055 static void
   12056 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12057 {
   12058 	int k1_enable = sc->sc_nvm_k1_enabled;
   12059 
   12060 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12061 		device_xname(sc->sc_dev), __func__));
   12062 
   12063 	/* XXX acquire semaphore */
   12064 
   12065 	if (link) {
   12066 		k1_enable = 0;
   12067 
   12068 		/* Link stall fix for link up */
   12069 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12070 	} else {
   12071 		/* Link stall fix for link down */
   12072 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12073 	}
   12074 
   12075 	wm_configure_k1_ich8lan(sc, k1_enable);
   12076 
   12077 	/* XXX release semaphore */
   12078 }
   12079 
   12080 static void
   12081 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12082 {
   12083 	uint32_t reg;
   12084 
   12085 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12086 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12087 	    reg | HV_KMRN_MDIO_SLOW);
   12088 }
   12089 
   12090 static void
   12091 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12092 {
   12093 	uint32_t ctrl, ctrl_ext, tmp;
   12094 	uint16_t kmrn_reg;
   12095 
   12096 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12097 
   12098 	if (k1_enable)
   12099 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12100 	else
   12101 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12102 
   12103 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12104 
   12105 	delay(20);
   12106 
   12107 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12108 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12109 
   12110 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12111 	tmp |= CTRL_FRCSPD;
   12112 
   12113 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12114 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12115 	CSR_WRITE_FLUSH(sc);
   12116 	delay(20);
   12117 
   12118 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12119 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12120 	CSR_WRITE_FLUSH(sc);
   12121 	delay(20);
   12122 }
   12123 
   12124 /* special case - for 82575 - need to do manual init ... */
   12125 static void
   12126 wm_reset_init_script_82575(struct wm_softc *sc)
   12127 {
   12128 	/*
   12129 	 * remark: this is untested code - we have no board without EEPROM
   12130 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12131 	 */
   12132 
   12133 	/* SerDes configuration via SERDESCTRL */
   12134 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12135 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12136 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12137 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12138 
   12139 	/* CCM configuration via CCMCTL register */
   12140 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12141 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12142 
   12143 	/* PCIe lanes configuration */
   12144 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12145 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12146 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12147 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12148 
   12149 	/* PCIe PLL Configuration */
   12150 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12151 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12152 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12153 }
   12154 
   12155 static void
   12156 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12157 {
   12158 	uint32_t reg;
   12159 	uint16_t nvmword;
   12160 	int rv;
   12161 
   12162 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12163 		return;
   12164 
   12165 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12166 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12167 	if (rv != 0) {
   12168 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12169 		    __func__);
   12170 		return;
   12171 	}
   12172 
   12173 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12174 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12175 		reg |= MDICNFG_DEST;
   12176 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12177 		reg |= MDICNFG_COM_MDIO;
   12178 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12179 }
   12180 
   12181 /*
   12182  * I210 Errata 25 and I211 Errata 10
   12183  * Slow System Clock.
   12184  */
   12185 static void
   12186 wm_pll_workaround_i210(struct wm_softc *sc)
   12187 {
   12188 	uint32_t mdicnfg, wuc;
   12189 	uint32_t reg;
   12190 	pcireg_t pcireg;
   12191 	uint32_t pmreg;
   12192 	uint16_t nvmword, tmp_nvmword;
   12193 	int phyval;
   12194 	bool wa_done = false;
   12195 	int i;
   12196 
   12197 	/* Save WUC and MDICNFG registers */
   12198 	wuc = CSR_READ(sc, WMREG_WUC);
   12199 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12200 
   12201 	reg = mdicnfg & ~MDICNFG_DEST;
   12202 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12203 
   12204 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12205 		nvmword = INVM_DEFAULT_AL;
   12206 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12207 
   12208 	/* Get Power Management cap offset */
   12209 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12210 		&pmreg, NULL) == 0)
   12211 		return;
   12212 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12213 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12214 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12215 
   12216 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12217 			break; /* OK */
   12218 		}
   12219 
   12220 		wa_done = true;
   12221 		/* Directly reset the internal PHY */
   12222 		reg = CSR_READ(sc, WMREG_CTRL);
   12223 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12224 
   12225 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12226 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12227 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12228 
   12229 		CSR_WRITE(sc, WMREG_WUC, 0);
   12230 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12231 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12232 
   12233 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12234 		    pmreg + PCI_PMCSR);
   12235 		pcireg |= PCI_PMCSR_STATE_D3;
   12236 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12237 		    pmreg + PCI_PMCSR, pcireg);
   12238 		delay(1000);
   12239 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12240 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12241 		    pmreg + PCI_PMCSR, pcireg);
   12242 
   12243 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12244 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12245 
   12246 		/* Restore WUC register */
   12247 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12248 	}
   12249 
   12250 	/* Restore MDICNFG setting */
   12251 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12252 	if (wa_done)
   12253 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12254 }
   12255