Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.442
      1 /*	$NetBSD: if_wm.c,v 1.442 2016/11/10 06:57:15 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.442 2016/11/10 06:57:15 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 	bool txq_stopping;
    328 
    329 #ifdef WM_EVENT_COUNTERS
    330 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    331 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    332 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    333 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    334 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    335 						/* XXX not used? */
    336 
    337 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    339 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    340 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    341 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    342 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    345 
    346 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    347 
    348 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    349 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    350 #endif /* WM_EVENT_COUNTERS */
    351 };
    352 
    353 struct wm_rxqueue {
    354 	kmutex_t *rxq_lock;		/* lock for rx operations */
    355 
    356 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    357 
    358 	/* Software state for the receive descriptors. */
    359 	wiseman_rxdesc_t *rxq_descs;
    360 
    361 	/* RX control data structures. */
    362 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    363 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    364 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    365 	int rxq_desc_rseg;		/* real number of control segment */
    366 	size_t rxq_desc_size;		/* control data size */
    367 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    368 
    369 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    370 
    371 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    372 	int rxq_discard;
    373 	int rxq_len;
    374 	struct mbuf *rxq_head;
    375 	struct mbuf *rxq_tail;
    376 	struct mbuf **rxq_tailp;
    377 
    378 	bool rxq_stopping;
    379 
    380 #ifdef WM_EVENT_COUNTERS
    381 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    382 
    383 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    384 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    385 #endif
    386 };
    387 
    388 struct wm_queue {
    389 	int wmq_id;			/* index of transmit and receive queues */
    390 	int wmq_intr_idx;		/* index of MSI-X tables */
    391 
    392 	struct wm_txqueue wmq_txq;
    393 	struct wm_rxqueue wmq_rxq;
    394 };
    395 
    396 struct wm_phyop {
    397 	int (*acquire)(struct wm_softc *);
    398 	void (*release)(struct wm_softc *);
    399 };
    400 
    401 /*
    402  * Software state per device.
    403  */
    404 struct wm_softc {
    405 	device_t sc_dev;		/* generic device information */
    406 	bus_space_tag_t sc_st;		/* bus space tag */
    407 	bus_space_handle_t sc_sh;	/* bus space handle */
    408 	bus_size_t sc_ss;		/* bus space size */
    409 	bus_space_tag_t sc_iot;		/* I/O space tag */
    410 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    411 	bus_size_t sc_ios;		/* I/O space size */
    412 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    413 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    414 	bus_size_t sc_flashs;		/* flash registers space size */
    415 	off_t sc_flashreg_offset;	/*
    416 					 * offset to flash registers from
    417 					 * start of BAR
    418 					 */
    419 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    420 
    421 	struct ethercom sc_ethercom;	/* ethernet common data */
    422 	struct mii_data sc_mii;		/* MII/media information */
    423 
    424 	pci_chipset_tag_t sc_pc;
    425 	pcitag_t sc_pcitag;
    426 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    427 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    428 
    429 	uint16_t sc_pcidevid;		/* PCI device ID */
    430 	wm_chip_type sc_type;		/* MAC type */
    431 	int sc_rev;			/* MAC revision */
    432 	wm_phy_type sc_phytype;		/* PHY type */
    433 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    434 #define	WM_MEDIATYPE_UNKNOWN		0x00
    435 #define	WM_MEDIATYPE_FIBER		0x01
    436 #define	WM_MEDIATYPE_COPPER		0x02
    437 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    438 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    439 	int sc_flags;			/* flags; see below */
    440 	int sc_if_flags;		/* last if_flags */
    441 	int sc_flowflags;		/* 802.3x flow control flags */
    442 	int sc_align_tweak;
    443 
    444 	void *sc_ihs[WM_MAX_NINTR];	/*
    445 					 * interrupt cookie.
    446 					 * legacy and msi use sc_ihs[0].
    447 					 */
    448 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    449 	int sc_nintrs;			/* number of interrupts */
    450 
    451 	int sc_link_intr_idx;		/* index of MSI-X tables */
    452 
    453 	callout_t sc_tick_ch;		/* tick callout */
    454 	bool sc_core_stopping;
    455 
    456 	int sc_nvm_ver_major;
    457 	int sc_nvm_ver_minor;
    458 	int sc_nvm_ver_build;
    459 	int sc_nvm_addrbits;		/* NVM address bits */
    460 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    461 	int sc_ich8_flash_base;
    462 	int sc_ich8_flash_bank_size;
    463 	int sc_nvm_k1_enabled;
    464 
    465 	int sc_nqueues;
    466 	struct wm_queue *sc_queue;
    467 
    468 	int sc_affinity_offset;
    469 
    470 #ifdef WM_EVENT_COUNTERS
    471 	/* Event counters. */
    472 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    473 
    474         /* WM_T_82542_2_1 only */
    475 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    476 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    477 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    478 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    479 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    480 #endif /* WM_EVENT_COUNTERS */
    481 
    482 	/* This variable are used only on the 82547. */
    483 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    484 
    485 	uint32_t sc_ctrl;		/* prototype CTRL register */
    486 #if 0
    487 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    488 #endif
    489 	uint32_t sc_icr;		/* prototype interrupt bits */
    490 	uint32_t sc_itr;		/* prototype intr throttling reg */
    491 	uint32_t sc_tctl;		/* prototype TCTL register */
    492 	uint32_t sc_rctl;		/* prototype RCTL register */
    493 	uint32_t sc_txcw;		/* prototype TXCW register */
    494 	uint32_t sc_tipg;		/* prototype TIPG register */
    495 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    496 	uint32_t sc_pba;		/* prototype PBA register */
    497 
    498 	int sc_tbi_linkup;		/* TBI link status */
    499 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    500 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    501 
    502 	int sc_mchash_type;		/* multicast filter offset */
    503 
    504 	krndsource_t rnd_source;	/* random source */
    505 
    506 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    507 
    508 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    509 	kmutex_t *sc_ich_phymtx;	/*
    510 					 * 82574/82583/ICH/PCH specific PHY
    511 					 * mutex. For 82574/82583, the mutex
    512 					 * is used for both PHY and NVM.
    513 					 */
    514 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    515 
    516 	struct wm_phyop phy;
    517 };
    518 
    519 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    520 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    521 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    522 
    523 #ifdef WM_MPSAFE
    524 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    525 #else
    526 #define CALLOUT_FLAGS	0
    527 #endif
    528 
    529 #define	WM_RXCHAIN_RESET(rxq)						\
    530 do {									\
    531 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    532 	*(rxq)->rxq_tailp = NULL;					\
    533 	(rxq)->rxq_len = 0;						\
    534 } while (/*CONSTCOND*/0)
    535 
    536 #define	WM_RXCHAIN_LINK(rxq, m)						\
    537 do {									\
    538 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    539 	(rxq)->rxq_tailp = &(m)->m_next;				\
    540 } while (/*CONSTCOND*/0)
    541 
    542 #ifdef WM_EVENT_COUNTERS
    543 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    544 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    545 
    546 #define WM_Q_EVCNT_INCR(qname, evname)			\
    547 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    548 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    549 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    550 #else /* !WM_EVENT_COUNTERS */
    551 #define	WM_EVCNT_INCR(ev)	/* nothing */
    552 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    553 
    554 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    555 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    556 #endif /* !WM_EVENT_COUNTERS */
    557 
    558 #define	CSR_READ(sc, reg)						\
    559 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    560 #define	CSR_WRITE(sc, reg, val)						\
    561 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    562 #define	CSR_WRITE_FLUSH(sc)						\
    563 	(void) CSR_READ((sc), WMREG_STATUS)
    564 
    565 #define ICH8_FLASH_READ32(sc, reg)					\
    566 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    567 	    (reg) + sc->sc_flashreg_offset)
    568 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    569 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    570 	    (reg) + sc->sc_flashreg_offset, (data))
    571 
    572 #define ICH8_FLASH_READ16(sc, reg)					\
    573 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    574 	    (reg) + sc->sc_flashreg_offset)
    575 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    576 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    577 	    (reg) + sc->sc_flashreg_offset, (data))
    578 
    579 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    580 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    581 
    582 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    583 #define	WM_CDTXADDR_HI(txq, x)						\
    584 	(sizeof(bus_addr_t) == 8 ?					\
    585 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    586 
    587 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    588 #define	WM_CDRXADDR_HI(rxq, x)						\
    589 	(sizeof(bus_addr_t) == 8 ?					\
    590 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    591 
    592 /*
    593  * Register read/write functions.
    594  * Other than CSR_{READ|WRITE}().
    595  */
    596 #if 0
    597 static inline uint32_t wm_io_read(struct wm_softc *, int);
    598 #endif
    599 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    600 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    601 	uint32_t, uint32_t);
    602 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    603 
    604 /*
    605  * Descriptor sync/init functions.
    606  */
    607 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    608 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    609 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    610 
    611 /*
    612  * Device driver interface functions and commonly used functions.
    613  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    614  */
    615 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    616 static int	wm_match(device_t, cfdata_t, void *);
    617 static void	wm_attach(device_t, device_t, void *);
    618 static int	wm_detach(device_t, int);
    619 static bool	wm_suspend(device_t, const pmf_qual_t *);
    620 static bool	wm_resume(device_t, const pmf_qual_t *);
    621 static void	wm_watchdog(struct ifnet *);
    622 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    623 static void	wm_tick(void *);
    624 static int	wm_ifflags_cb(struct ethercom *);
    625 static int	wm_ioctl(struct ifnet *, u_long, void *);
    626 /* MAC address related */
    627 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    628 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    629 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    630 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    631 static void	wm_set_filter(struct wm_softc *);
    632 /* Reset and init related */
    633 static void	wm_set_vlan(struct wm_softc *);
    634 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    635 static void	wm_get_auto_rd_done(struct wm_softc *);
    636 static void	wm_lan_init_done(struct wm_softc *);
    637 static void	wm_get_cfg_done(struct wm_softc *);
    638 static void	wm_initialize_hardware_bits(struct wm_softc *);
    639 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    640 static void	wm_reset(struct wm_softc *);
    641 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    642 static void	wm_rxdrain(struct wm_rxqueue *);
    643 static void	wm_rss_getkey(uint8_t *);
    644 static void	wm_init_rss(struct wm_softc *);
    645 static void	wm_adjust_qnum(struct wm_softc *, int);
    646 static int	wm_setup_legacy(struct wm_softc *);
    647 static int	wm_setup_msix(struct wm_softc *);
    648 static int	wm_init(struct ifnet *);
    649 static int	wm_init_locked(struct ifnet *);
    650 static void	wm_turnon(struct wm_softc *);
    651 static void	wm_turnoff(struct wm_softc *);
    652 static void	wm_stop(struct ifnet *, int);
    653 static void	wm_stop_locked(struct ifnet *, int);
    654 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    655 static void	wm_82547_txfifo_stall(void *);
    656 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    657 /* DMA related */
    658 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    659 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    660 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    661 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    662     struct wm_txqueue *);
    663 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    664 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    665 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    666     struct wm_rxqueue *);
    667 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    668 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    669 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    670 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    671 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    672 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    673 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    674     struct wm_txqueue *);
    675 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    676     struct wm_rxqueue *);
    677 static int	wm_alloc_txrx_queues(struct wm_softc *);
    678 static void	wm_free_txrx_queues(struct wm_softc *);
    679 static int	wm_init_txrx_queues(struct wm_softc *);
    680 /* Start */
    681 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    682     uint32_t *, uint8_t *);
    683 static void	wm_start(struct ifnet *);
    684 static void	wm_start_locked(struct ifnet *);
    685 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    686     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    687 static void	wm_nq_start(struct ifnet *);
    688 static void	wm_nq_start_locked(struct ifnet *);
    689 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    690 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
    691 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    692 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    693 /* Interrupt */
    694 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    695 static void	wm_rxeof(struct wm_rxqueue *);
    696 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    697 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    698 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    699 static void	wm_linkintr(struct wm_softc *, uint32_t);
    700 static int	wm_intr_legacy(void *);
    701 static int	wm_txrxintr_msix(void *);
    702 static int	wm_linkintr_msix(void *);
    703 
    704 /*
    705  * Media related.
    706  * GMII, SGMII, TBI, SERDES and SFP.
    707  */
    708 /* Common */
    709 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    710 /* GMII related */
    711 static void	wm_gmii_reset(struct wm_softc *);
    712 static int	wm_get_phy_id_82575(struct wm_softc *);
    713 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    714 static int	wm_gmii_mediachange(struct ifnet *);
    715 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    716 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    717 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    718 static int	wm_gmii_i82543_readreg(device_t, int, int);
    719 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    720 static int	wm_gmii_mdic_readreg(device_t, int, int);
    721 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    722 static int	wm_gmii_i82544_readreg(device_t, int, int);
    723 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    724 static int	wm_gmii_i80003_readreg(device_t, int, int);
    725 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    726 static int	wm_gmii_bm_readreg(device_t, int, int);
    727 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    728 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    729 static int	wm_gmii_hv_readreg(device_t, int, int);
    730 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    731 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    732 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    733 static int	wm_gmii_82580_readreg(device_t, int, int);
    734 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    735 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    736 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    737 static void	wm_gmii_statchg(struct ifnet *);
    738 static int	wm_kmrn_readreg(struct wm_softc *, int);
    739 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    740 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    741 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    742 /* SGMII */
    743 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    744 static int	wm_sgmii_readreg(device_t, int, int);
    745 static void	wm_sgmii_writereg(device_t, int, int, int);
    746 /* TBI related */
    747 static void	wm_tbi_mediainit(struct wm_softc *);
    748 static int	wm_tbi_mediachange(struct ifnet *);
    749 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    750 static int	wm_check_for_link(struct wm_softc *);
    751 static void	wm_tbi_tick(struct wm_softc *);
    752 /* SERDES related */
    753 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    754 static int	wm_serdes_mediachange(struct ifnet *);
    755 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    756 static void	wm_serdes_tick(struct wm_softc *);
    757 /* SFP related */
    758 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    759 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    760 
    761 /*
    762  * NVM related.
    763  * Microwire, SPI (w/wo EERD) and Flash.
    764  */
    765 /* Misc functions */
    766 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    767 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    768 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    769 /* Microwire */
    770 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    771 /* SPI */
    772 static int	wm_nvm_ready_spi(struct wm_softc *);
    773 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    774 /* Using with EERD */
    775 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    776 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    777 /* Flash */
    778 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    779     unsigned int *);
    780 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    781 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    782 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    783 	uint32_t *);
    784 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    785 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    786 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    787 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    788 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    789 /* iNVM */
    790 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    791 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    792 /* Lock, detecting NVM type, validate checksum and read */
    793 static int	wm_nvm_acquire(struct wm_softc *);
    794 static void	wm_nvm_release(struct wm_softc *);
    795 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    796 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    797 static int	wm_nvm_validate_checksum(struct wm_softc *);
    798 static void	wm_nvm_version_invm(struct wm_softc *);
    799 static void	wm_nvm_version(struct wm_softc *);
    800 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    801 
    802 /*
    803  * Hardware semaphores.
    804  * Very complexed...
    805  */
    806 static int	wm_get_null(struct wm_softc *);
    807 static void	wm_put_null(struct wm_softc *);
    808 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    809 static void	wm_put_swsm_semaphore(struct wm_softc *);
    810 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    811 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    812 static int	wm_get_phy_82575(struct wm_softc *);
    813 static void	wm_put_phy_82575(struct wm_softc *);
    814 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    815 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    816 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    817 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    818 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    819 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    820 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    821 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    822 
    823 /*
    824  * Management mode and power management related subroutines.
    825  * BMC, AMT, suspend/resume and EEE.
    826  */
    827 #if 0
    828 static int	wm_check_mng_mode(struct wm_softc *);
    829 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    830 static int	wm_check_mng_mode_82574(struct wm_softc *);
    831 static int	wm_check_mng_mode_generic(struct wm_softc *);
    832 #endif
    833 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    834 static bool	wm_phy_resetisblocked(struct wm_softc *);
    835 static void	wm_get_hw_control(struct wm_softc *);
    836 static void	wm_release_hw_control(struct wm_softc *);
    837 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    838 static void	wm_smbustopci(struct wm_softc *);
    839 static void	wm_init_manageability(struct wm_softc *);
    840 static void	wm_release_manageability(struct wm_softc *);
    841 static void	wm_get_wakeup(struct wm_softc *);
    842 static void	wm_enable_phy_wakeup(struct wm_softc *);
    843 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    844 static void	wm_enable_wakeup(struct wm_softc *);
    845 /* LPLU (Low Power Link Up) */
    846 static void	wm_lplu_d0_disable(struct wm_softc *);
    847 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    848 /* EEE */
    849 static void	wm_set_eee_i350(struct wm_softc *);
    850 
    851 /*
    852  * Workarounds (mainly PHY related).
    853  * Basically, PHY's workarounds are in the PHY drivers.
    854  */
    855 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    856 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    857 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    858 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    859 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    860 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    861 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    862 static void	wm_reset_init_script_82575(struct wm_softc *);
    863 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    864 static void	wm_pll_workaround_i210(struct wm_softc *);
    865 
    866 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    867     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    868 
    869 /*
    870  * Devices supported by this driver.
    871  */
    872 static const struct wm_product {
    873 	pci_vendor_id_t		wmp_vendor;
    874 	pci_product_id_t	wmp_product;
    875 	const char		*wmp_name;
    876 	wm_chip_type		wmp_type;
    877 	uint32_t		wmp_flags;
    878 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    879 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    880 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    881 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    882 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    883 } wm_products[] = {
    884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    885 	  "Intel i82542 1000BASE-X Ethernet",
    886 	  WM_T_82542_2_1,	WMP_F_FIBER },
    887 
    888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    889 	  "Intel i82543GC 1000BASE-X Ethernet",
    890 	  WM_T_82543,		WMP_F_FIBER },
    891 
    892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    893 	  "Intel i82543GC 1000BASE-T Ethernet",
    894 	  WM_T_82543,		WMP_F_COPPER },
    895 
    896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    897 	  "Intel i82544EI 1000BASE-T Ethernet",
    898 	  WM_T_82544,		WMP_F_COPPER },
    899 
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    901 	  "Intel i82544EI 1000BASE-X Ethernet",
    902 	  WM_T_82544,		WMP_F_FIBER },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    905 	  "Intel i82544GC 1000BASE-T Ethernet",
    906 	  WM_T_82544,		WMP_F_COPPER },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    909 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    910 	  WM_T_82544,		WMP_F_COPPER },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    913 	  "Intel i82540EM 1000BASE-T Ethernet",
    914 	  WM_T_82540,		WMP_F_COPPER },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    917 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    918 	  WM_T_82540,		WMP_F_COPPER },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    921 	  "Intel i82540EP 1000BASE-T Ethernet",
    922 	  WM_T_82540,		WMP_F_COPPER },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    925 	  "Intel i82540EP 1000BASE-T Ethernet",
    926 	  WM_T_82540,		WMP_F_COPPER },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    929 	  "Intel i82540EP 1000BASE-T Ethernet",
    930 	  WM_T_82540,		WMP_F_COPPER },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    933 	  "Intel i82545EM 1000BASE-T Ethernet",
    934 	  WM_T_82545,		WMP_F_COPPER },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    937 	  "Intel i82545GM 1000BASE-T Ethernet",
    938 	  WM_T_82545_3,		WMP_F_COPPER },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    941 	  "Intel i82545GM 1000BASE-X Ethernet",
    942 	  WM_T_82545_3,		WMP_F_FIBER },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    945 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    946 	  WM_T_82545_3,		WMP_F_SERDES },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    949 	  "Intel i82546EB 1000BASE-T Ethernet",
    950 	  WM_T_82546,		WMP_F_COPPER },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    953 	  "Intel i82546EB 1000BASE-T Ethernet",
    954 	  WM_T_82546,		WMP_F_COPPER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    957 	  "Intel i82545EM 1000BASE-X Ethernet",
    958 	  WM_T_82545,		WMP_F_FIBER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    961 	  "Intel i82546EB 1000BASE-X Ethernet",
    962 	  WM_T_82546,		WMP_F_FIBER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    965 	  "Intel i82546GB 1000BASE-T Ethernet",
    966 	  WM_T_82546_3,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    969 	  "Intel i82546GB 1000BASE-X Ethernet",
    970 	  WM_T_82546_3,		WMP_F_FIBER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    973 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    974 	  WM_T_82546_3,		WMP_F_SERDES },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    977 	  "i82546GB quad-port Gigabit Ethernet",
    978 	  WM_T_82546_3,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    981 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    982 	  WM_T_82546_3,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    985 	  "Intel PRO/1000MT (82546GB)",
    986 	  WM_T_82546_3,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    989 	  "Intel i82541EI 1000BASE-T Ethernet",
    990 	  WM_T_82541,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    993 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    994 	  WM_T_82541,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    997 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    998 	  WM_T_82541,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1001 	  "Intel i82541ER 1000BASE-T Ethernet",
   1002 	  WM_T_82541_2,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1005 	  "Intel i82541GI 1000BASE-T Ethernet",
   1006 	  WM_T_82541_2,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1009 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1010 	  WM_T_82541_2,		WMP_F_COPPER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1013 	  "Intel i82541PI 1000BASE-T Ethernet",
   1014 	  WM_T_82541_2,		WMP_F_COPPER },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1017 	  "Intel i82547EI 1000BASE-T Ethernet",
   1018 	  WM_T_82547,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1021 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1022 	  WM_T_82547,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1025 	  "Intel i82547GI 1000BASE-T Ethernet",
   1026 	  WM_T_82547_2,		WMP_F_COPPER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1029 	  "Intel PRO/1000 PT (82571EB)",
   1030 	  WM_T_82571,		WMP_F_COPPER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1033 	  "Intel PRO/1000 PF (82571EB)",
   1034 	  WM_T_82571,		WMP_F_FIBER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1037 	  "Intel PRO/1000 PB (82571EB)",
   1038 	  WM_T_82571,		WMP_F_SERDES },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1041 	  "Intel PRO/1000 QT (82571EB)",
   1042 	  WM_T_82571,		WMP_F_COPPER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1045 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1046 	  WM_T_82571,		WMP_F_COPPER, },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1049 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1050 	  WM_T_82571,		WMP_F_COPPER, },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1053 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1054 	  WM_T_82571,		WMP_F_SERDES, },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1057 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1058 	  WM_T_82571,		WMP_F_SERDES, },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1061 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1062 	  WM_T_82571,		WMP_F_FIBER, },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1065 	  "Intel i82572EI 1000baseT Ethernet",
   1066 	  WM_T_82572,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1069 	  "Intel i82572EI 1000baseX Ethernet",
   1070 	  WM_T_82572,		WMP_F_FIBER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1073 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1074 	  WM_T_82572,		WMP_F_SERDES },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1077 	  "Intel i82572EI 1000baseT Ethernet",
   1078 	  WM_T_82572,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1081 	  "Intel i82573E",
   1082 	  WM_T_82573,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1085 	  "Intel i82573E IAMT",
   1086 	  WM_T_82573,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1089 	  "Intel i82573L Gigabit Ethernet",
   1090 	  WM_T_82573,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1093 	  "Intel i82574L",
   1094 	  WM_T_82574,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1097 	  "Intel i82574L",
   1098 	  WM_T_82574,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1101 	  "Intel i82583V",
   1102 	  WM_T_82583,		WMP_F_COPPER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1105 	  "i80003 dual 1000baseT Ethernet",
   1106 	  WM_T_80003,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1109 	  "i80003 dual 1000baseX Ethernet",
   1110 	  WM_T_80003,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1113 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1114 	  WM_T_80003,		WMP_F_SERDES },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1117 	  "Intel i80003 1000baseT Ethernet",
   1118 	  WM_T_80003,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1121 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1122 	  WM_T_80003,		WMP_F_SERDES },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1125 	  "Intel i82801H (M_AMT) LAN Controller",
   1126 	  WM_T_ICH8,		WMP_F_COPPER },
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1128 	  "Intel i82801H (AMT) LAN Controller",
   1129 	  WM_T_ICH8,		WMP_F_COPPER },
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1131 	  "Intel i82801H LAN Controller",
   1132 	  WM_T_ICH8,		WMP_F_COPPER },
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1134 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1135 	  WM_T_ICH8,		WMP_F_COPPER },
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1137 	  "Intel i82801H (M) LAN Controller",
   1138 	  WM_T_ICH8,		WMP_F_COPPER },
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1140 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1141 	  WM_T_ICH8,		WMP_F_COPPER },
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1143 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1144 	  WM_T_ICH8,		WMP_F_COPPER },
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1146 	  "82567V-3 LAN Controller",
   1147 	  WM_T_ICH8,		WMP_F_COPPER },
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1149 	  "82801I (AMT) LAN Controller",
   1150 	  WM_T_ICH9,		WMP_F_COPPER },
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1152 	  "82801I 10/100 LAN Controller",
   1153 	  WM_T_ICH9,		WMP_F_COPPER },
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1155 	  "82801I (G) 10/100 LAN Controller",
   1156 	  WM_T_ICH9,		WMP_F_COPPER },
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1158 	  "82801I (GT) 10/100 LAN Controller",
   1159 	  WM_T_ICH9,		WMP_F_COPPER },
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1161 	  "82801I (C) LAN Controller",
   1162 	  WM_T_ICH9,		WMP_F_COPPER },
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1164 	  "82801I mobile LAN Controller",
   1165 	  WM_T_ICH9,		WMP_F_COPPER },
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1167 	  "82801I mobile (V) LAN Controller",
   1168 	  WM_T_ICH9,		WMP_F_COPPER },
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1170 	  "82801I mobile (AMT) LAN Controller",
   1171 	  WM_T_ICH9,		WMP_F_COPPER },
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1173 	  "82567LM-4 LAN Controller",
   1174 	  WM_T_ICH9,		WMP_F_COPPER },
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1176 	  "82567LM-2 LAN Controller",
   1177 	  WM_T_ICH10,		WMP_F_COPPER },
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1179 	  "82567LF-2 LAN Controller",
   1180 	  WM_T_ICH10,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1182 	  "82567LM-3 LAN Controller",
   1183 	  WM_T_ICH10,		WMP_F_COPPER },
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1185 	  "82567LF-3 LAN Controller",
   1186 	  WM_T_ICH10,		WMP_F_COPPER },
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1188 	  "82567V-2 LAN Controller",
   1189 	  WM_T_ICH10,		WMP_F_COPPER },
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1191 	  "82567V-3? LAN Controller",
   1192 	  WM_T_ICH10,		WMP_F_COPPER },
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1194 	  "HANKSVILLE LAN Controller",
   1195 	  WM_T_ICH10,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1197 	  "PCH LAN (82577LM) Controller",
   1198 	  WM_T_PCH,		WMP_F_COPPER },
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1200 	  "PCH LAN (82577LC) Controller",
   1201 	  WM_T_PCH,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1203 	  "PCH LAN (82578DM) Controller",
   1204 	  WM_T_PCH,		WMP_F_COPPER },
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1206 	  "PCH LAN (82578DC) Controller",
   1207 	  WM_T_PCH,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1209 	  "PCH2 LAN (82579LM) Controller",
   1210 	  WM_T_PCH2,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1212 	  "PCH2 LAN (82579V) Controller",
   1213 	  WM_T_PCH2,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1215 	  "82575EB dual-1000baseT Ethernet",
   1216 	  WM_T_82575,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1218 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1219 	  WM_T_82575,		WMP_F_SERDES },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1221 	  "82575GB quad-1000baseT Ethernet",
   1222 	  WM_T_82575,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1224 	  "82575GB quad-1000baseT Ethernet (PM)",
   1225 	  WM_T_82575,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1227 	  "82576 1000BaseT Ethernet",
   1228 	  WM_T_82576,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1230 	  "82576 1000BaseX Ethernet",
   1231 	  WM_T_82576,		WMP_F_FIBER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1234 	  "82576 gigabit Ethernet (SERDES)",
   1235 	  WM_T_82576,		WMP_F_SERDES },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1238 	  "82576 quad-1000BaseT Ethernet",
   1239 	  WM_T_82576,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1242 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1243 	  WM_T_82576,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1246 	  "82576 gigabit Ethernet",
   1247 	  WM_T_82576,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1250 	  "82576 gigabit Ethernet (SERDES)",
   1251 	  WM_T_82576,		WMP_F_SERDES },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1253 	  "82576 quad-gigabit Ethernet (SERDES)",
   1254 	  WM_T_82576,		WMP_F_SERDES },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1257 	  "82580 1000BaseT Ethernet",
   1258 	  WM_T_82580,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1260 	  "82580 1000BaseX Ethernet",
   1261 	  WM_T_82580,		WMP_F_FIBER },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1264 	  "82580 1000BaseT Ethernet (SERDES)",
   1265 	  WM_T_82580,		WMP_F_SERDES },
   1266 
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1268 	  "82580 gigabit Ethernet (SGMII)",
   1269 	  WM_T_82580,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1271 	  "82580 dual-1000BaseT Ethernet",
   1272 	  WM_T_82580,		WMP_F_COPPER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1275 	  "82580 quad-1000BaseX Ethernet",
   1276 	  WM_T_82580,		WMP_F_FIBER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1279 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1280 	  WM_T_82580,		WMP_F_COPPER },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1283 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1284 	  WM_T_82580,		WMP_F_SERDES },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1287 	  "DH89XXCC 1000BASE-KX Ethernet",
   1288 	  WM_T_82580,		WMP_F_SERDES },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1291 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1292 	  WM_T_82580,		WMP_F_SERDES },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1295 	  "I350 Gigabit Network Connection",
   1296 	  WM_T_I350,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1299 	  "I350 Gigabit Fiber Network Connection",
   1300 	  WM_T_I350,		WMP_F_FIBER },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1303 	  "I350 Gigabit Backplane Connection",
   1304 	  WM_T_I350,		WMP_F_SERDES },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1307 	  "I350 Quad Port Gigabit Ethernet",
   1308 	  WM_T_I350,		WMP_F_SERDES },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1311 	  "I350 Gigabit Connection",
   1312 	  WM_T_I350,		WMP_F_COPPER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1315 	  "I354 Gigabit Ethernet (KX)",
   1316 	  WM_T_I354,		WMP_F_SERDES },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1319 	  "I354 Gigabit Ethernet (SGMII)",
   1320 	  WM_T_I354,		WMP_F_COPPER },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1323 	  "I354 Gigabit Ethernet (2.5G)",
   1324 	  WM_T_I354,		WMP_F_COPPER },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1327 	  "I210-T1 Ethernet Server Adapter",
   1328 	  WM_T_I210,		WMP_F_COPPER },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1331 	  "I210 Ethernet (Copper OEM)",
   1332 	  WM_T_I210,		WMP_F_COPPER },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1335 	  "I210 Ethernet (Copper IT)",
   1336 	  WM_T_I210,		WMP_F_COPPER },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1339 	  "I210 Ethernet (FLASH less)",
   1340 	  WM_T_I210,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1343 	  "I210 Gigabit Ethernet (Fiber)",
   1344 	  WM_T_I210,		WMP_F_FIBER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1347 	  "I210 Gigabit Ethernet (SERDES)",
   1348 	  WM_T_I210,		WMP_F_SERDES },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1351 	  "I210 Gigabit Ethernet (FLASH less)",
   1352 	  WM_T_I210,		WMP_F_SERDES },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1355 	  "I210 Gigabit Ethernet (SGMII)",
   1356 	  WM_T_I210,		WMP_F_COPPER },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1359 	  "I211 Ethernet (COPPER)",
   1360 	  WM_T_I211,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1362 	  "I217 V Ethernet Connection",
   1363 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1365 	  "I217 LM Ethernet Connection",
   1366 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1368 	  "I218 V Ethernet Connection",
   1369 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1371 	  "I218 V Ethernet Connection",
   1372 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1374 	  "I218 V Ethernet Connection",
   1375 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1377 	  "I218 LM Ethernet Connection",
   1378 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1380 	  "I218 LM Ethernet Connection",
   1381 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1383 	  "I218 LM Ethernet Connection",
   1384 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1385 #if 0
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1387 	  "I219 V Ethernet Connection",
   1388 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1390 	  "I219 V Ethernet Connection",
   1391 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1393 	  "I219 V Ethernet Connection",
   1394 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1396 	  "I219 V Ethernet Connection",
   1397 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1399 	  "I219 LM Ethernet Connection",
   1400 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1402 	  "I219 LM Ethernet Connection",
   1403 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1405 	  "I219 LM Ethernet Connection",
   1406 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1408 	  "I219 LM Ethernet Connection",
   1409 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1411 	  "I219 LM Ethernet Connection",
   1412 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1413 #endif
   1414 	{ 0,			0,
   1415 	  NULL,
   1416 	  0,			0 },
   1417 };
   1418 
   1419 /*
   1420  * Register read/write functions.
   1421  * Other than CSR_{READ|WRITE}().
   1422  */
   1423 
   1424 #if 0 /* Not currently used */
   1425 static inline uint32_t
   1426 wm_io_read(struct wm_softc *sc, int reg)
   1427 {
   1428 
   1429 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1430 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1431 }
   1432 #endif
   1433 
   1434 static inline void
   1435 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1436 {
   1437 
   1438 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1439 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1440 }
   1441 
   1442 static inline void
   1443 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1444     uint32_t data)
   1445 {
   1446 	uint32_t regval;
   1447 	int i;
   1448 
   1449 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1450 
   1451 	CSR_WRITE(sc, reg, regval);
   1452 
   1453 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1454 		delay(5);
   1455 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1456 			break;
   1457 	}
   1458 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1459 		aprint_error("%s: WARNING:"
   1460 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1461 		    device_xname(sc->sc_dev), reg);
   1462 	}
   1463 }
   1464 
   1465 static inline void
   1466 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1467 {
   1468 	wa->wa_low = htole32(v & 0xffffffffU);
   1469 	if (sizeof(bus_addr_t) == 8)
   1470 		wa->wa_high = htole32((uint64_t) v >> 32);
   1471 	else
   1472 		wa->wa_high = 0;
   1473 }
   1474 
   1475 /*
   1476  * Descriptor sync/init functions.
   1477  */
   1478 static inline void
   1479 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1480 {
   1481 	struct wm_softc *sc = txq->txq_sc;
   1482 
   1483 	/* If it will wrap around, sync to the end of the ring. */
   1484 	if ((start + num) > WM_NTXDESC(txq)) {
   1485 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1486 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1487 		    (WM_NTXDESC(txq) - start), ops);
   1488 		num -= (WM_NTXDESC(txq) - start);
   1489 		start = 0;
   1490 	}
   1491 
   1492 	/* Now sync whatever is left. */
   1493 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1494 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1495 }
   1496 
   1497 static inline void
   1498 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1499 {
   1500 	struct wm_softc *sc = rxq->rxq_sc;
   1501 
   1502 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1503 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1504 }
   1505 
   1506 static inline void
   1507 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1508 {
   1509 	struct wm_softc *sc = rxq->rxq_sc;
   1510 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1511 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1512 	struct mbuf *m = rxs->rxs_mbuf;
   1513 
   1514 	/*
   1515 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1516 	 * so that the payload after the Ethernet header is aligned
   1517 	 * to a 4-byte boundary.
   1518 
   1519 	 * XXX BRAINDAMAGE ALERT!
   1520 	 * The stupid chip uses the same size for every buffer, which
   1521 	 * is set in the Receive Control register.  We are using the 2K
   1522 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1523 	 * reason, we can't "scoot" packets longer than the standard
   1524 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1525 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1526 	 * the upper layer copy the headers.
   1527 	 */
   1528 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1529 
   1530 	wm_set_dma_addr(&rxd->wrx_addr,
   1531 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1532 	rxd->wrx_len = 0;
   1533 	rxd->wrx_cksum = 0;
   1534 	rxd->wrx_status = 0;
   1535 	rxd->wrx_errors = 0;
   1536 	rxd->wrx_special = 0;
   1537 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1538 
   1539 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1540 }
   1541 
   1542 /*
   1543  * Device driver interface functions and commonly used functions.
   1544  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1545  */
   1546 
   1547 /* Lookup supported device table */
   1548 static const struct wm_product *
   1549 wm_lookup(const struct pci_attach_args *pa)
   1550 {
   1551 	const struct wm_product *wmp;
   1552 
   1553 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1554 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1555 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1556 			return wmp;
   1557 	}
   1558 	return NULL;
   1559 }
   1560 
   1561 /* The match function (ca_match) */
   1562 static int
   1563 wm_match(device_t parent, cfdata_t cf, void *aux)
   1564 {
   1565 	struct pci_attach_args *pa = aux;
   1566 
   1567 	if (wm_lookup(pa) != NULL)
   1568 		return 1;
   1569 
   1570 	return 0;
   1571 }
   1572 
   1573 /* The attach function (ca_attach) */
   1574 static void
   1575 wm_attach(device_t parent, device_t self, void *aux)
   1576 {
   1577 	struct wm_softc *sc = device_private(self);
   1578 	struct pci_attach_args *pa = aux;
   1579 	prop_dictionary_t dict;
   1580 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1581 	pci_chipset_tag_t pc = pa->pa_pc;
   1582 	int counts[PCI_INTR_TYPE_SIZE];
   1583 	pci_intr_type_t max_type;
   1584 	const char *eetype, *xname;
   1585 	bus_space_tag_t memt;
   1586 	bus_space_handle_t memh;
   1587 	bus_size_t memsize;
   1588 	int memh_valid;
   1589 	int i, error;
   1590 	const struct wm_product *wmp;
   1591 	prop_data_t ea;
   1592 	prop_number_t pn;
   1593 	uint8_t enaddr[ETHER_ADDR_LEN];
   1594 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1595 	pcireg_t preg, memtype;
   1596 	uint16_t eeprom_data, apme_mask;
   1597 	bool force_clear_smbi;
   1598 	uint32_t link_mode;
   1599 	uint32_t reg;
   1600 
   1601 	sc->sc_dev = self;
   1602 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1603 	sc->sc_core_stopping = false;
   1604 
   1605 	wmp = wm_lookup(pa);
   1606 #ifdef DIAGNOSTIC
   1607 	if (wmp == NULL) {
   1608 		printf("\n");
   1609 		panic("wm_attach: impossible");
   1610 	}
   1611 #endif
   1612 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1613 
   1614 	sc->sc_pc = pa->pa_pc;
   1615 	sc->sc_pcitag = pa->pa_tag;
   1616 
   1617 	if (pci_dma64_available(pa))
   1618 		sc->sc_dmat = pa->pa_dmat64;
   1619 	else
   1620 		sc->sc_dmat = pa->pa_dmat;
   1621 
   1622 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1623 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1624 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1625 
   1626 	sc->sc_type = wmp->wmp_type;
   1627 
   1628 	/* Set default function pointers */
   1629 	sc->phy.acquire = wm_get_null;
   1630 	sc->phy.release = wm_put_null;
   1631 
   1632 	if (sc->sc_type < WM_T_82543) {
   1633 		if (sc->sc_rev < 2) {
   1634 			aprint_error_dev(sc->sc_dev,
   1635 			    "i82542 must be at least rev. 2\n");
   1636 			return;
   1637 		}
   1638 		if (sc->sc_rev < 3)
   1639 			sc->sc_type = WM_T_82542_2_0;
   1640 	}
   1641 
   1642 	/*
   1643 	 * Disable MSI for Errata:
   1644 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1645 	 *
   1646 	 *  82544: Errata 25
   1647 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1648 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1649 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1650 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1651 	 *
   1652 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1653 	 *
   1654 	 *  82571 & 82572: Errata 63
   1655 	 */
   1656 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1657 	    || (sc->sc_type == WM_T_82572))
   1658 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1659 
   1660 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1661 	    || (sc->sc_type == WM_T_82580)
   1662 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1663 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1664 		sc->sc_flags |= WM_F_NEWQUEUE;
   1665 
   1666 	/* Set device properties (mactype) */
   1667 	dict = device_properties(sc->sc_dev);
   1668 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1669 
   1670 	/*
   1671 	 * Map the device.  All devices support memory-mapped acccess,
   1672 	 * and it is really required for normal operation.
   1673 	 */
   1674 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1675 	switch (memtype) {
   1676 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1677 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1678 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1679 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1680 		break;
   1681 	default:
   1682 		memh_valid = 0;
   1683 		break;
   1684 	}
   1685 
   1686 	if (memh_valid) {
   1687 		sc->sc_st = memt;
   1688 		sc->sc_sh = memh;
   1689 		sc->sc_ss = memsize;
   1690 	} else {
   1691 		aprint_error_dev(sc->sc_dev,
   1692 		    "unable to map device registers\n");
   1693 		return;
   1694 	}
   1695 
   1696 	/*
   1697 	 * In addition, i82544 and later support I/O mapped indirect
   1698 	 * register access.  It is not desirable (nor supported in
   1699 	 * this driver) to use it for normal operation, though it is
   1700 	 * required to work around bugs in some chip versions.
   1701 	 */
   1702 	if (sc->sc_type >= WM_T_82544) {
   1703 		/* First we have to find the I/O BAR. */
   1704 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1705 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1706 			if (memtype == PCI_MAPREG_TYPE_IO)
   1707 				break;
   1708 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1709 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1710 				i += 4;	/* skip high bits, too */
   1711 		}
   1712 		if (i < PCI_MAPREG_END) {
   1713 			/*
   1714 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1715 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1716 			 * It's no problem because newer chips has no this
   1717 			 * bug.
   1718 			 *
   1719 			 * The i8254x doesn't apparently respond when the
   1720 			 * I/O BAR is 0, which looks somewhat like it's not
   1721 			 * been configured.
   1722 			 */
   1723 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1724 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1725 				aprint_error_dev(sc->sc_dev,
   1726 				    "WARNING: I/O BAR at zero.\n");
   1727 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1728 					0, &sc->sc_iot, &sc->sc_ioh,
   1729 					NULL, &sc->sc_ios) == 0) {
   1730 				sc->sc_flags |= WM_F_IOH_VALID;
   1731 			} else {
   1732 				aprint_error_dev(sc->sc_dev,
   1733 				    "WARNING: unable to map I/O space\n");
   1734 			}
   1735 		}
   1736 
   1737 	}
   1738 
   1739 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1740 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1741 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1742 	if (sc->sc_type < WM_T_82542_2_1)
   1743 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1744 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1745 
   1746 	/* power up chip */
   1747 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1748 	    NULL)) && error != EOPNOTSUPP) {
   1749 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1750 		return;
   1751 	}
   1752 
   1753 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1754 
   1755 	/* Allocation settings */
   1756 	max_type = PCI_INTR_TYPE_MSIX;
   1757 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1758 	counts[PCI_INTR_TYPE_MSI] = 1;
   1759 	counts[PCI_INTR_TYPE_INTX] = 1;
   1760 
   1761 alloc_retry:
   1762 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1763 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1764 		return;
   1765 	}
   1766 
   1767 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1768 		error = wm_setup_msix(sc);
   1769 		if (error) {
   1770 			pci_intr_release(pc, sc->sc_intrs,
   1771 			    counts[PCI_INTR_TYPE_MSIX]);
   1772 
   1773 			/* Setup for MSI: Disable MSI-X */
   1774 			max_type = PCI_INTR_TYPE_MSI;
   1775 			counts[PCI_INTR_TYPE_MSI] = 1;
   1776 			counts[PCI_INTR_TYPE_INTX] = 1;
   1777 			goto alloc_retry;
   1778 		}
   1779 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1780 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1781 		error = wm_setup_legacy(sc);
   1782 		if (error) {
   1783 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1784 			    counts[PCI_INTR_TYPE_MSI]);
   1785 
   1786 			/* The next try is for INTx: Disable MSI */
   1787 			max_type = PCI_INTR_TYPE_INTX;
   1788 			counts[PCI_INTR_TYPE_INTX] = 1;
   1789 			goto alloc_retry;
   1790 		}
   1791 	} else {
   1792 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1793 		error = wm_setup_legacy(sc);
   1794 		if (error) {
   1795 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1796 			    counts[PCI_INTR_TYPE_INTX]);
   1797 			return;
   1798 		}
   1799 	}
   1800 
   1801 	/*
   1802 	 * Check the function ID (unit number of the chip).
   1803 	 */
   1804 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1805 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1806 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1807 	    || (sc->sc_type == WM_T_82580)
   1808 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1809 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1810 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1811 	else
   1812 		sc->sc_funcid = 0;
   1813 
   1814 	/*
   1815 	 * Determine a few things about the bus we're connected to.
   1816 	 */
   1817 	if (sc->sc_type < WM_T_82543) {
   1818 		/* We don't really know the bus characteristics here. */
   1819 		sc->sc_bus_speed = 33;
   1820 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1821 		/*
   1822 		 * CSA (Communication Streaming Architecture) is about as fast
   1823 		 * a 32-bit 66MHz PCI Bus.
   1824 		 */
   1825 		sc->sc_flags |= WM_F_CSA;
   1826 		sc->sc_bus_speed = 66;
   1827 		aprint_verbose_dev(sc->sc_dev,
   1828 		    "Communication Streaming Architecture\n");
   1829 		if (sc->sc_type == WM_T_82547) {
   1830 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1831 			callout_setfunc(&sc->sc_txfifo_ch,
   1832 					wm_82547_txfifo_stall, sc);
   1833 			aprint_verbose_dev(sc->sc_dev,
   1834 			    "using 82547 Tx FIFO stall work-around\n");
   1835 		}
   1836 	} else if (sc->sc_type >= WM_T_82571) {
   1837 		sc->sc_flags |= WM_F_PCIE;
   1838 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1839 		    && (sc->sc_type != WM_T_ICH10)
   1840 		    && (sc->sc_type != WM_T_PCH)
   1841 		    && (sc->sc_type != WM_T_PCH2)
   1842 		    && (sc->sc_type != WM_T_PCH_LPT)
   1843 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1844 			/* ICH* and PCH* have no PCIe capability registers */
   1845 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1846 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1847 				NULL) == 0)
   1848 				aprint_error_dev(sc->sc_dev,
   1849 				    "unable to find PCIe capability\n");
   1850 		}
   1851 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1852 	} else {
   1853 		reg = CSR_READ(sc, WMREG_STATUS);
   1854 		if (reg & STATUS_BUS64)
   1855 			sc->sc_flags |= WM_F_BUS64;
   1856 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1857 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1858 
   1859 			sc->sc_flags |= WM_F_PCIX;
   1860 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1861 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1862 				aprint_error_dev(sc->sc_dev,
   1863 				    "unable to find PCIX capability\n");
   1864 			else if (sc->sc_type != WM_T_82545_3 &&
   1865 				 sc->sc_type != WM_T_82546_3) {
   1866 				/*
   1867 				 * Work around a problem caused by the BIOS
   1868 				 * setting the max memory read byte count
   1869 				 * incorrectly.
   1870 				 */
   1871 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1872 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1873 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1874 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1875 
   1876 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1877 				    PCIX_CMD_BYTECNT_SHIFT;
   1878 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1879 				    PCIX_STATUS_MAXB_SHIFT;
   1880 				if (bytecnt > maxb) {
   1881 					aprint_verbose_dev(sc->sc_dev,
   1882 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1883 					    512 << bytecnt, 512 << maxb);
   1884 					pcix_cmd = (pcix_cmd &
   1885 					    ~PCIX_CMD_BYTECNT_MASK) |
   1886 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1887 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1888 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1889 					    pcix_cmd);
   1890 				}
   1891 			}
   1892 		}
   1893 		/*
   1894 		 * The quad port adapter is special; it has a PCIX-PCIX
   1895 		 * bridge on the board, and can run the secondary bus at
   1896 		 * a higher speed.
   1897 		 */
   1898 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1899 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1900 								      : 66;
   1901 		} else if (sc->sc_flags & WM_F_PCIX) {
   1902 			switch (reg & STATUS_PCIXSPD_MASK) {
   1903 			case STATUS_PCIXSPD_50_66:
   1904 				sc->sc_bus_speed = 66;
   1905 				break;
   1906 			case STATUS_PCIXSPD_66_100:
   1907 				sc->sc_bus_speed = 100;
   1908 				break;
   1909 			case STATUS_PCIXSPD_100_133:
   1910 				sc->sc_bus_speed = 133;
   1911 				break;
   1912 			default:
   1913 				aprint_error_dev(sc->sc_dev,
   1914 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1915 				    reg & STATUS_PCIXSPD_MASK);
   1916 				sc->sc_bus_speed = 66;
   1917 				break;
   1918 			}
   1919 		} else
   1920 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1921 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1922 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1923 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1924 	}
   1925 
   1926 	/* clear interesting stat counters */
   1927 	CSR_READ(sc, WMREG_COLC);
   1928 	CSR_READ(sc, WMREG_RXERRC);
   1929 
   1930 	/* get PHY control from SMBus to PCIe */
   1931 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1932 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1933 		wm_smbustopci(sc);
   1934 
   1935 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1936 	    || (sc->sc_type >= WM_T_ICH8))
   1937 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1938 	if (sc->sc_type >= WM_T_ICH8)
   1939 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1940 
   1941 	/* Set PHY, NVM mutex related stuff */
   1942 	switch (sc->sc_type) {
   1943 	case WM_T_82542_2_0:
   1944 	case WM_T_82542_2_1:
   1945 	case WM_T_82543:
   1946 	case WM_T_82544:
   1947 		/* Microwire */
   1948 		sc->sc_nvm_wordsize = 64;
   1949 		sc->sc_nvm_addrbits = 6;
   1950 		break;
   1951 	case WM_T_82540:
   1952 	case WM_T_82545:
   1953 	case WM_T_82545_3:
   1954 	case WM_T_82546:
   1955 	case WM_T_82546_3:
   1956 		/* Microwire */
   1957 		reg = CSR_READ(sc, WMREG_EECD);
   1958 		if (reg & EECD_EE_SIZE) {
   1959 			sc->sc_nvm_wordsize = 256;
   1960 			sc->sc_nvm_addrbits = 8;
   1961 		} else {
   1962 			sc->sc_nvm_wordsize = 64;
   1963 			sc->sc_nvm_addrbits = 6;
   1964 		}
   1965 		sc->sc_flags |= WM_F_LOCK_EECD;
   1966 		break;
   1967 	case WM_T_82541:
   1968 	case WM_T_82541_2:
   1969 	case WM_T_82547:
   1970 	case WM_T_82547_2:
   1971 		sc->sc_flags |= WM_F_LOCK_EECD;
   1972 		reg = CSR_READ(sc, WMREG_EECD);
   1973 		if (reg & EECD_EE_TYPE) {
   1974 			/* SPI */
   1975 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1976 			wm_nvm_set_addrbits_size_eecd(sc);
   1977 		} else {
   1978 			/* Microwire */
   1979 			if ((reg & EECD_EE_ABITS) != 0) {
   1980 				sc->sc_nvm_wordsize = 256;
   1981 				sc->sc_nvm_addrbits = 8;
   1982 			} else {
   1983 				sc->sc_nvm_wordsize = 64;
   1984 				sc->sc_nvm_addrbits = 6;
   1985 			}
   1986 		}
   1987 		break;
   1988 	case WM_T_82571:
   1989 	case WM_T_82572:
   1990 		/* SPI */
   1991 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1992 		wm_nvm_set_addrbits_size_eecd(sc);
   1993 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1994 		sc->phy.acquire = wm_get_swsm_semaphore;
   1995 		sc->phy.release = wm_put_swsm_semaphore;
   1996 		break;
   1997 	case WM_T_82573:
   1998 	case WM_T_82574:
   1999 	case WM_T_82583:
   2000 		if (sc->sc_type == WM_T_82573) {
   2001 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2002 			sc->phy.acquire = wm_get_swsm_semaphore;
   2003 			sc->phy.release = wm_put_swsm_semaphore;
   2004 		} else {
   2005 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2006 			/* Both PHY and NVM use the same semaphore. */
   2007 			sc->phy.acquire
   2008 			    = wm_get_swfwhw_semaphore;
   2009 			sc->phy.release
   2010 			    = wm_put_swfwhw_semaphore;
   2011 		}
   2012 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2013 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2014 			sc->sc_nvm_wordsize = 2048;
   2015 		} else {
   2016 			/* SPI */
   2017 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2018 			wm_nvm_set_addrbits_size_eecd(sc);
   2019 		}
   2020 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2021 		break;
   2022 	case WM_T_82575:
   2023 	case WM_T_82576:
   2024 	case WM_T_82580:
   2025 	case WM_T_I350:
   2026 	case WM_T_I354:
   2027 	case WM_T_80003:
   2028 		/* SPI */
   2029 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2030 		wm_nvm_set_addrbits_size_eecd(sc);
   2031 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2032 		    | WM_F_LOCK_SWSM;
   2033 		sc->phy.acquire = wm_get_phy_82575;
   2034 		sc->phy.release = wm_put_phy_82575;
   2035 		break;
   2036 	case WM_T_ICH8:
   2037 	case WM_T_ICH9:
   2038 	case WM_T_ICH10:
   2039 	case WM_T_PCH:
   2040 	case WM_T_PCH2:
   2041 	case WM_T_PCH_LPT:
   2042 		/* FLASH */
   2043 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2044 		sc->sc_nvm_wordsize = 2048;
   2045 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2046 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2047 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2048 			aprint_error_dev(sc->sc_dev,
   2049 			    "can't map FLASH registers\n");
   2050 			goto out;
   2051 		}
   2052 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2053 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2054 		    ICH_FLASH_SECTOR_SIZE;
   2055 		sc->sc_ich8_flash_bank_size =
   2056 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2057 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2058 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2059 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2060 		sc->sc_flashreg_offset = 0;
   2061 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2062 		sc->phy.release = wm_put_swflag_ich8lan;
   2063 		break;
   2064 	case WM_T_PCH_SPT:
   2065 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2066 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2067 		sc->sc_flasht = sc->sc_st;
   2068 		sc->sc_flashh = sc->sc_sh;
   2069 		sc->sc_ich8_flash_base = 0;
   2070 		sc->sc_nvm_wordsize =
   2071 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2072 			* NVM_SIZE_MULTIPLIER;
   2073 		/* It is size in bytes, we want words */
   2074 		sc->sc_nvm_wordsize /= 2;
   2075 		/* assume 2 banks */
   2076 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2077 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2078 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2079 		sc->phy.release = wm_put_swflag_ich8lan;
   2080 		break;
   2081 	case WM_T_I210:
   2082 	case WM_T_I211:
   2083 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2084 			wm_nvm_set_addrbits_size_eecd(sc);
   2085 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2086 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2087 		} else {
   2088 			sc->sc_nvm_wordsize = INVM_SIZE;
   2089 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2090 		}
   2091 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2092 		sc->phy.acquire = wm_get_phy_82575;
   2093 		sc->phy.release = wm_put_phy_82575;
   2094 		break;
   2095 	default:
   2096 		break;
   2097 	}
   2098 
   2099 	/* Reset the chip to a known state. */
   2100 	wm_reset(sc);
   2101 
   2102 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2103 	switch (sc->sc_type) {
   2104 	case WM_T_82571:
   2105 	case WM_T_82572:
   2106 		reg = CSR_READ(sc, WMREG_SWSM2);
   2107 		if ((reg & SWSM2_LOCK) == 0) {
   2108 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2109 			force_clear_smbi = true;
   2110 		} else
   2111 			force_clear_smbi = false;
   2112 		break;
   2113 	case WM_T_82573:
   2114 	case WM_T_82574:
   2115 	case WM_T_82583:
   2116 		force_clear_smbi = true;
   2117 		break;
   2118 	default:
   2119 		force_clear_smbi = false;
   2120 		break;
   2121 	}
   2122 	if (force_clear_smbi) {
   2123 		reg = CSR_READ(sc, WMREG_SWSM);
   2124 		if ((reg & SWSM_SMBI) != 0)
   2125 			aprint_error_dev(sc->sc_dev,
   2126 			    "Please update the Bootagent\n");
   2127 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2128 	}
   2129 
   2130 	/*
   2131 	 * Defer printing the EEPROM type until after verifying the checksum
   2132 	 * This allows the EEPROM type to be printed correctly in the case
   2133 	 * that no EEPROM is attached.
   2134 	 */
   2135 	/*
   2136 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2137 	 * this for later, so we can fail future reads from the EEPROM.
   2138 	 */
   2139 	if (wm_nvm_validate_checksum(sc)) {
   2140 		/*
   2141 		 * Read twice again because some PCI-e parts fail the
   2142 		 * first check due to the link being in sleep state.
   2143 		 */
   2144 		if (wm_nvm_validate_checksum(sc))
   2145 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2146 	}
   2147 
   2148 	/* Set device properties (macflags) */
   2149 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2150 
   2151 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2152 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2153 	else {
   2154 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2155 		    sc->sc_nvm_wordsize);
   2156 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2157 			aprint_verbose("iNVM");
   2158 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2159 			aprint_verbose("FLASH(HW)");
   2160 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2161 			aprint_verbose("FLASH");
   2162 		else {
   2163 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2164 				eetype = "SPI";
   2165 			else
   2166 				eetype = "MicroWire";
   2167 			aprint_verbose("(%d address bits) %s EEPROM",
   2168 			    sc->sc_nvm_addrbits, eetype);
   2169 		}
   2170 	}
   2171 	wm_nvm_version(sc);
   2172 	aprint_verbose("\n");
   2173 
   2174 	/* Check for I21[01] PLL workaround */
   2175 	if (sc->sc_type == WM_T_I210)
   2176 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2177 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2178 		/* NVM image release 3.25 has a workaround */
   2179 		if ((sc->sc_nvm_ver_major < 3)
   2180 		    || ((sc->sc_nvm_ver_major == 3)
   2181 			&& (sc->sc_nvm_ver_minor < 25))) {
   2182 			aprint_verbose_dev(sc->sc_dev,
   2183 			    "ROM image version %d.%d is older than 3.25\n",
   2184 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2185 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2186 		}
   2187 	}
   2188 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2189 		wm_pll_workaround_i210(sc);
   2190 
   2191 	wm_get_wakeup(sc);
   2192 	switch (sc->sc_type) {
   2193 	case WM_T_82571:
   2194 	case WM_T_82572:
   2195 	case WM_T_82573:
   2196 	case WM_T_82574:
   2197 	case WM_T_82583:
   2198 	case WM_T_80003:
   2199 	case WM_T_ICH8:
   2200 	case WM_T_ICH9:
   2201 	case WM_T_ICH10:
   2202 	case WM_T_PCH:
   2203 	case WM_T_PCH2:
   2204 	case WM_T_PCH_LPT:
   2205 	case WM_T_PCH_SPT:
   2206 		/* Non-AMT based hardware can now take control from firmware */
   2207 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2208 			wm_get_hw_control(sc);
   2209 		break;
   2210 	default:
   2211 		break;
   2212 	}
   2213 
   2214 	/*
   2215 	 * Read the Ethernet address from the EEPROM, if not first found
   2216 	 * in device properties.
   2217 	 */
   2218 	ea = prop_dictionary_get(dict, "mac-address");
   2219 	if (ea != NULL) {
   2220 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2221 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2222 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2223 	} else {
   2224 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2225 			aprint_error_dev(sc->sc_dev,
   2226 			    "unable to read Ethernet address\n");
   2227 			goto out;
   2228 		}
   2229 	}
   2230 
   2231 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2232 	    ether_sprintf(enaddr));
   2233 
   2234 	/*
   2235 	 * Read the config info from the EEPROM, and set up various
   2236 	 * bits in the control registers based on their contents.
   2237 	 */
   2238 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2239 	if (pn != NULL) {
   2240 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2241 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2242 	} else {
   2243 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2244 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2245 			goto out;
   2246 		}
   2247 	}
   2248 
   2249 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2250 	if (pn != NULL) {
   2251 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2252 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2253 	} else {
   2254 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2255 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2256 			goto out;
   2257 		}
   2258 	}
   2259 
   2260 	/* check for WM_F_WOL */
   2261 	switch (sc->sc_type) {
   2262 	case WM_T_82542_2_0:
   2263 	case WM_T_82542_2_1:
   2264 	case WM_T_82543:
   2265 		/* dummy? */
   2266 		eeprom_data = 0;
   2267 		apme_mask = NVM_CFG3_APME;
   2268 		break;
   2269 	case WM_T_82544:
   2270 		apme_mask = NVM_CFG2_82544_APM_EN;
   2271 		eeprom_data = cfg2;
   2272 		break;
   2273 	case WM_T_82546:
   2274 	case WM_T_82546_3:
   2275 	case WM_T_82571:
   2276 	case WM_T_82572:
   2277 	case WM_T_82573:
   2278 	case WM_T_82574:
   2279 	case WM_T_82583:
   2280 	case WM_T_80003:
   2281 	default:
   2282 		apme_mask = NVM_CFG3_APME;
   2283 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2284 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2285 		break;
   2286 	case WM_T_82575:
   2287 	case WM_T_82576:
   2288 	case WM_T_82580:
   2289 	case WM_T_I350:
   2290 	case WM_T_I354: /* XXX ok? */
   2291 	case WM_T_ICH8:
   2292 	case WM_T_ICH9:
   2293 	case WM_T_ICH10:
   2294 	case WM_T_PCH:
   2295 	case WM_T_PCH2:
   2296 	case WM_T_PCH_LPT:
   2297 	case WM_T_PCH_SPT:
   2298 		/* XXX The funcid should be checked on some devices */
   2299 		apme_mask = WUC_APME;
   2300 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2301 		break;
   2302 	}
   2303 
   2304 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2305 	if ((eeprom_data & apme_mask) != 0)
   2306 		sc->sc_flags |= WM_F_WOL;
   2307 #ifdef WM_DEBUG
   2308 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2309 		printf("WOL\n");
   2310 #endif
   2311 
   2312 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2313 		/* Check NVM for autonegotiation */
   2314 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2315 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2316 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2317 		}
   2318 	}
   2319 
   2320 	/*
   2321 	 * XXX need special handling for some multiple port cards
   2322 	 * to disable a paticular port.
   2323 	 */
   2324 
   2325 	if (sc->sc_type >= WM_T_82544) {
   2326 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2327 		if (pn != NULL) {
   2328 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2329 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2330 		} else {
   2331 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2332 				aprint_error_dev(sc->sc_dev,
   2333 				    "unable to read SWDPIN\n");
   2334 				goto out;
   2335 			}
   2336 		}
   2337 	}
   2338 
   2339 	if (cfg1 & NVM_CFG1_ILOS)
   2340 		sc->sc_ctrl |= CTRL_ILOS;
   2341 
   2342 	/*
   2343 	 * XXX
   2344 	 * This code isn't correct because pin 2 and 3 are located
   2345 	 * in different position on newer chips. Check all datasheet.
   2346 	 *
   2347 	 * Until resolve this problem, check if a chip < 82580
   2348 	 */
   2349 	if (sc->sc_type <= WM_T_82580) {
   2350 		if (sc->sc_type >= WM_T_82544) {
   2351 			sc->sc_ctrl |=
   2352 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2353 			    CTRL_SWDPIO_SHIFT;
   2354 			sc->sc_ctrl |=
   2355 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2356 			    CTRL_SWDPINS_SHIFT;
   2357 		} else {
   2358 			sc->sc_ctrl |=
   2359 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2360 			    CTRL_SWDPIO_SHIFT;
   2361 		}
   2362 	}
   2363 
   2364 	/* XXX For other than 82580? */
   2365 	if (sc->sc_type == WM_T_82580) {
   2366 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2367 		if (nvmword & __BIT(13))
   2368 			sc->sc_ctrl |= CTRL_ILOS;
   2369 	}
   2370 
   2371 #if 0
   2372 	if (sc->sc_type >= WM_T_82544) {
   2373 		if (cfg1 & NVM_CFG1_IPS0)
   2374 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2375 		if (cfg1 & NVM_CFG1_IPS1)
   2376 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2377 		sc->sc_ctrl_ext |=
   2378 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2379 		    CTRL_EXT_SWDPIO_SHIFT;
   2380 		sc->sc_ctrl_ext |=
   2381 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2382 		    CTRL_EXT_SWDPINS_SHIFT;
   2383 	} else {
   2384 		sc->sc_ctrl_ext |=
   2385 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2386 		    CTRL_EXT_SWDPIO_SHIFT;
   2387 	}
   2388 #endif
   2389 
   2390 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2391 #if 0
   2392 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2393 #endif
   2394 
   2395 	if (sc->sc_type == WM_T_PCH) {
   2396 		uint16_t val;
   2397 
   2398 		/* Save the NVM K1 bit setting */
   2399 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2400 
   2401 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2402 			sc->sc_nvm_k1_enabled = 1;
   2403 		else
   2404 			sc->sc_nvm_k1_enabled = 0;
   2405 	}
   2406 
   2407 	/*
   2408 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2409 	 * media structures accordingly.
   2410 	 */
   2411 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2412 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2413 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2414 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2415 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2416 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2417 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2418 	} else if (sc->sc_type < WM_T_82543 ||
   2419 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2420 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2421 			aprint_error_dev(sc->sc_dev,
   2422 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2423 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2424 		}
   2425 		wm_tbi_mediainit(sc);
   2426 	} else {
   2427 		switch (sc->sc_type) {
   2428 		case WM_T_82575:
   2429 		case WM_T_82576:
   2430 		case WM_T_82580:
   2431 		case WM_T_I350:
   2432 		case WM_T_I354:
   2433 		case WM_T_I210:
   2434 		case WM_T_I211:
   2435 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2436 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2437 			switch (link_mode) {
   2438 			case CTRL_EXT_LINK_MODE_1000KX:
   2439 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2440 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2441 				break;
   2442 			case CTRL_EXT_LINK_MODE_SGMII:
   2443 				if (wm_sgmii_uses_mdio(sc)) {
   2444 					aprint_verbose_dev(sc->sc_dev,
   2445 					    "SGMII(MDIO)\n");
   2446 					sc->sc_flags |= WM_F_SGMII;
   2447 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2448 					break;
   2449 				}
   2450 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2451 				/*FALLTHROUGH*/
   2452 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2453 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2454 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2455 					if (link_mode
   2456 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2457 						sc->sc_mediatype
   2458 						    = WM_MEDIATYPE_COPPER;
   2459 						sc->sc_flags |= WM_F_SGMII;
   2460 					} else {
   2461 						sc->sc_mediatype
   2462 						    = WM_MEDIATYPE_SERDES;
   2463 						aprint_verbose_dev(sc->sc_dev,
   2464 						    "SERDES\n");
   2465 					}
   2466 					break;
   2467 				}
   2468 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2469 					aprint_verbose_dev(sc->sc_dev,
   2470 					    "SERDES\n");
   2471 
   2472 				/* Change current link mode setting */
   2473 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2474 				switch (sc->sc_mediatype) {
   2475 				case WM_MEDIATYPE_COPPER:
   2476 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2477 					break;
   2478 				case WM_MEDIATYPE_SERDES:
   2479 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2480 					break;
   2481 				default:
   2482 					break;
   2483 				}
   2484 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2485 				break;
   2486 			case CTRL_EXT_LINK_MODE_GMII:
   2487 			default:
   2488 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2489 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2490 				break;
   2491 			}
   2492 
   2493 			reg &= ~CTRL_EXT_I2C_ENA;
   2494 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2495 				reg |= CTRL_EXT_I2C_ENA;
   2496 			else
   2497 				reg &= ~CTRL_EXT_I2C_ENA;
   2498 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2499 
   2500 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2501 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2502 			else
   2503 				wm_tbi_mediainit(sc);
   2504 			break;
   2505 		default:
   2506 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2507 				aprint_error_dev(sc->sc_dev,
   2508 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2509 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2510 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2511 		}
   2512 	}
   2513 
   2514 	ifp = &sc->sc_ethercom.ec_if;
   2515 	xname = device_xname(sc->sc_dev);
   2516 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2517 	ifp->if_softc = sc;
   2518 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2519 	ifp->if_extflags = IFEF_START_MPSAFE;
   2520 	ifp->if_ioctl = wm_ioctl;
   2521 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2522 		ifp->if_start = wm_nq_start;
   2523 		if (sc->sc_nqueues > 1)
   2524 			ifp->if_transmit = wm_nq_transmit;
   2525 	} else
   2526 		ifp->if_start = wm_start;
   2527 	ifp->if_watchdog = wm_watchdog;
   2528 	ifp->if_init = wm_init;
   2529 	ifp->if_stop = wm_stop;
   2530 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2531 	IFQ_SET_READY(&ifp->if_snd);
   2532 
   2533 	/* Check for jumbo frame */
   2534 	switch (sc->sc_type) {
   2535 	case WM_T_82573:
   2536 		/* XXX limited to 9234 if ASPM is disabled */
   2537 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2538 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2539 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2540 		break;
   2541 	case WM_T_82571:
   2542 	case WM_T_82572:
   2543 	case WM_T_82574:
   2544 	case WM_T_82575:
   2545 	case WM_T_82576:
   2546 	case WM_T_82580:
   2547 	case WM_T_I350:
   2548 	case WM_T_I354: /* XXXX ok? */
   2549 	case WM_T_I210:
   2550 	case WM_T_I211:
   2551 	case WM_T_80003:
   2552 	case WM_T_ICH9:
   2553 	case WM_T_ICH10:
   2554 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2555 	case WM_T_PCH_LPT:
   2556 	case WM_T_PCH_SPT:
   2557 		/* XXX limited to 9234 */
   2558 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2559 		break;
   2560 	case WM_T_PCH:
   2561 		/* XXX limited to 4096 */
   2562 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2563 		break;
   2564 	case WM_T_82542_2_0:
   2565 	case WM_T_82542_2_1:
   2566 	case WM_T_82583:
   2567 	case WM_T_ICH8:
   2568 		/* No support for jumbo frame */
   2569 		break;
   2570 	default:
   2571 		/* ETHER_MAX_LEN_JUMBO */
   2572 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2573 		break;
   2574 	}
   2575 
   2576 	/* If we're a i82543 or greater, we can support VLANs. */
   2577 	if (sc->sc_type >= WM_T_82543)
   2578 		sc->sc_ethercom.ec_capabilities |=
   2579 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2580 
   2581 	/*
   2582 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2583 	 * on i82543 and later.
   2584 	 */
   2585 	if (sc->sc_type >= WM_T_82543) {
   2586 		ifp->if_capabilities |=
   2587 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2588 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2589 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2590 		    IFCAP_CSUM_TCPv6_Tx |
   2591 		    IFCAP_CSUM_UDPv6_Tx;
   2592 	}
   2593 
   2594 	/*
   2595 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2596 	 *
   2597 	 *	82541GI (8086:1076) ... no
   2598 	 *	82572EI (8086:10b9) ... yes
   2599 	 */
   2600 	if (sc->sc_type >= WM_T_82571) {
   2601 		ifp->if_capabilities |=
   2602 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2603 	}
   2604 
   2605 	/*
   2606 	 * If we're a i82544 or greater (except i82547), we can do
   2607 	 * TCP segmentation offload.
   2608 	 */
   2609 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2610 		ifp->if_capabilities |= IFCAP_TSOv4;
   2611 	}
   2612 
   2613 	if (sc->sc_type >= WM_T_82571) {
   2614 		ifp->if_capabilities |= IFCAP_TSOv6;
   2615 	}
   2616 
   2617 #ifdef WM_MPSAFE
   2618 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2619 #else
   2620 	sc->sc_core_lock = NULL;
   2621 #endif
   2622 
   2623 	/* Attach the interface. */
   2624 	if_initialize(ifp);
   2625 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2626 	ether_ifattach(ifp, enaddr);
   2627 	if_register(ifp);
   2628 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2629 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2630 			  RND_FLAG_DEFAULT);
   2631 
   2632 #ifdef WM_EVENT_COUNTERS
   2633 	/* Attach event counters. */
   2634 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2635 	    NULL, xname, "linkintr");
   2636 
   2637 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2638 	    NULL, xname, "tx_xoff");
   2639 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2640 	    NULL, xname, "tx_xon");
   2641 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2642 	    NULL, xname, "rx_xoff");
   2643 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2644 	    NULL, xname, "rx_xon");
   2645 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2646 	    NULL, xname, "rx_macctl");
   2647 #endif /* WM_EVENT_COUNTERS */
   2648 
   2649 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2650 		pmf_class_network_register(self, ifp);
   2651 	else
   2652 		aprint_error_dev(self, "couldn't establish power handler\n");
   2653 
   2654 	sc->sc_flags |= WM_F_ATTACHED;
   2655  out:
   2656 	return;
   2657 }
   2658 
   2659 /* The detach function (ca_detach) */
   2660 static int
   2661 wm_detach(device_t self, int flags __unused)
   2662 {
   2663 	struct wm_softc *sc = device_private(self);
   2664 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2665 	int i;
   2666 
   2667 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2668 		return 0;
   2669 
   2670 	/* Stop the interface. Callouts are stopped in it. */
   2671 	wm_stop(ifp, 1);
   2672 
   2673 	pmf_device_deregister(self);
   2674 
   2675 	/* Tell the firmware about the release */
   2676 	WM_CORE_LOCK(sc);
   2677 	wm_release_manageability(sc);
   2678 	wm_release_hw_control(sc);
   2679 	wm_enable_wakeup(sc);
   2680 	WM_CORE_UNLOCK(sc);
   2681 
   2682 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2683 
   2684 	/* Delete all remaining media. */
   2685 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2686 
   2687 	ether_ifdetach(ifp);
   2688 	if_detach(ifp);
   2689 	if_percpuq_destroy(sc->sc_ipq);
   2690 
   2691 	/* Unload RX dmamaps and free mbufs */
   2692 	for (i = 0; i < sc->sc_nqueues; i++) {
   2693 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2694 		mutex_enter(rxq->rxq_lock);
   2695 		wm_rxdrain(rxq);
   2696 		mutex_exit(rxq->rxq_lock);
   2697 	}
   2698 	/* Must unlock here */
   2699 
   2700 	/* Disestablish the interrupt handler */
   2701 	for (i = 0; i < sc->sc_nintrs; i++) {
   2702 		if (sc->sc_ihs[i] != NULL) {
   2703 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2704 			sc->sc_ihs[i] = NULL;
   2705 		}
   2706 	}
   2707 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2708 
   2709 	wm_free_txrx_queues(sc);
   2710 
   2711 	/* Unmap the registers */
   2712 	if (sc->sc_ss) {
   2713 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2714 		sc->sc_ss = 0;
   2715 	}
   2716 	if (sc->sc_ios) {
   2717 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2718 		sc->sc_ios = 0;
   2719 	}
   2720 	if (sc->sc_flashs) {
   2721 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2722 		sc->sc_flashs = 0;
   2723 	}
   2724 
   2725 	if (sc->sc_core_lock)
   2726 		mutex_obj_free(sc->sc_core_lock);
   2727 	if (sc->sc_ich_phymtx)
   2728 		mutex_obj_free(sc->sc_ich_phymtx);
   2729 	if (sc->sc_ich_nvmmtx)
   2730 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2731 
   2732 	return 0;
   2733 }
   2734 
   2735 static bool
   2736 wm_suspend(device_t self, const pmf_qual_t *qual)
   2737 {
   2738 	struct wm_softc *sc = device_private(self);
   2739 
   2740 	wm_release_manageability(sc);
   2741 	wm_release_hw_control(sc);
   2742 	wm_enable_wakeup(sc);
   2743 
   2744 	return true;
   2745 }
   2746 
   2747 static bool
   2748 wm_resume(device_t self, const pmf_qual_t *qual)
   2749 {
   2750 	struct wm_softc *sc = device_private(self);
   2751 
   2752 	wm_init_manageability(sc);
   2753 
   2754 	return true;
   2755 }
   2756 
   2757 /*
   2758  * wm_watchdog:		[ifnet interface function]
   2759  *
   2760  *	Watchdog timer handler.
   2761  */
   2762 static void
   2763 wm_watchdog(struct ifnet *ifp)
   2764 {
   2765 	int qid;
   2766 	struct wm_softc *sc = ifp->if_softc;
   2767 
   2768 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2769 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2770 
   2771 		wm_watchdog_txq(ifp, txq);
   2772 	}
   2773 
   2774 	/* Reset the interface. */
   2775 	(void) wm_init(ifp);
   2776 
   2777 	/*
   2778 	 * There are still some upper layer processing which call
   2779 	 * ifp->if_start(). e.g. ALTQ
   2780 	 */
   2781 	/* Try to get more packets going. */
   2782 	ifp->if_start(ifp);
   2783 }
   2784 
   2785 static void
   2786 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2787 {
   2788 	struct wm_softc *sc = ifp->if_softc;
   2789 
   2790 	/*
   2791 	 * Since we're using delayed interrupts, sweep up
   2792 	 * before we report an error.
   2793 	 */
   2794 	mutex_enter(txq->txq_lock);
   2795 	wm_txeof(sc, txq);
   2796 	mutex_exit(txq->txq_lock);
   2797 
   2798 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2799 #ifdef WM_DEBUG
   2800 		int i, j;
   2801 		struct wm_txsoft *txs;
   2802 #endif
   2803 		log(LOG_ERR,
   2804 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2805 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2806 		    txq->txq_next);
   2807 		ifp->if_oerrors++;
   2808 #ifdef WM_DEBUG
   2809 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2810 		    i = WM_NEXTTXS(txq, i)) {
   2811 		    txs = &txq->txq_soft[i];
   2812 		    printf("txs %d tx %d -> %d\n",
   2813 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2814 		    for (j = txs->txs_firstdesc; ;
   2815 			j = WM_NEXTTX(txq, j)) {
   2816 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2817 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2818 			printf("\t %#08x%08x\n",
   2819 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2820 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2821 			if (j == txs->txs_lastdesc)
   2822 				break;
   2823 			}
   2824 		}
   2825 #endif
   2826 	}
   2827 }
   2828 
   2829 /*
   2830  * wm_tick:
   2831  *
   2832  *	One second timer, used to check link status, sweep up
   2833  *	completed transmit jobs, etc.
   2834  */
   2835 static void
   2836 wm_tick(void *arg)
   2837 {
   2838 	struct wm_softc *sc = arg;
   2839 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2840 #ifndef WM_MPSAFE
   2841 	int s = splnet();
   2842 #endif
   2843 
   2844 	WM_CORE_LOCK(sc);
   2845 
   2846 	if (sc->sc_core_stopping)
   2847 		goto out;
   2848 
   2849 	if (sc->sc_type >= WM_T_82542_2_1) {
   2850 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2851 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2852 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2853 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2854 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2855 	}
   2856 
   2857 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2858 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2859 	    + CSR_READ(sc, WMREG_CRCERRS)
   2860 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2861 	    + CSR_READ(sc, WMREG_SYMERRC)
   2862 	    + CSR_READ(sc, WMREG_RXERRC)
   2863 	    + CSR_READ(sc, WMREG_SEC)
   2864 	    + CSR_READ(sc, WMREG_CEXTERR)
   2865 	    + CSR_READ(sc, WMREG_RLEC);
   2866 	/*
   2867 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2868 	 * memory. It does not mean the number of dropped packet. Because
   2869 	 * ethernet controller can receive packets in such case if there is
   2870 	 * space in phy's FIFO.
   2871 	 *
   2872 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2873 	 * own EVCNT instead of if_iqdrops.
   2874 	 */
   2875 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2876 
   2877 	if (sc->sc_flags & WM_F_HAS_MII)
   2878 		mii_tick(&sc->sc_mii);
   2879 	else if ((sc->sc_type >= WM_T_82575)
   2880 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2881 		wm_serdes_tick(sc);
   2882 	else
   2883 		wm_tbi_tick(sc);
   2884 
   2885 out:
   2886 	WM_CORE_UNLOCK(sc);
   2887 #ifndef WM_MPSAFE
   2888 	splx(s);
   2889 #endif
   2890 
   2891 	if (!sc->sc_core_stopping)
   2892 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2893 }
   2894 
   2895 static int
   2896 wm_ifflags_cb(struct ethercom *ec)
   2897 {
   2898 	struct ifnet *ifp = &ec->ec_if;
   2899 	struct wm_softc *sc = ifp->if_softc;
   2900 	int rc = 0;
   2901 
   2902 	WM_CORE_LOCK(sc);
   2903 
   2904 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2905 	sc->sc_if_flags = ifp->if_flags;
   2906 
   2907 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2908 		rc = ENETRESET;
   2909 		goto out;
   2910 	}
   2911 
   2912 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2913 		wm_set_filter(sc);
   2914 
   2915 	wm_set_vlan(sc);
   2916 
   2917 out:
   2918 	WM_CORE_UNLOCK(sc);
   2919 
   2920 	return rc;
   2921 }
   2922 
   2923 /*
   2924  * wm_ioctl:		[ifnet interface function]
   2925  *
   2926  *	Handle control requests from the operator.
   2927  */
   2928 static int
   2929 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2930 {
   2931 	struct wm_softc *sc = ifp->if_softc;
   2932 	struct ifreq *ifr = (struct ifreq *) data;
   2933 	struct ifaddr *ifa = (struct ifaddr *)data;
   2934 	struct sockaddr_dl *sdl;
   2935 	int s, error;
   2936 
   2937 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2938 		device_xname(sc->sc_dev), __func__));
   2939 
   2940 #ifndef WM_MPSAFE
   2941 	s = splnet();
   2942 #endif
   2943 	switch (cmd) {
   2944 	case SIOCSIFMEDIA:
   2945 	case SIOCGIFMEDIA:
   2946 		WM_CORE_LOCK(sc);
   2947 		/* Flow control requires full-duplex mode. */
   2948 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2949 		    (ifr->ifr_media & IFM_FDX) == 0)
   2950 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2951 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2952 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2953 				/* We can do both TXPAUSE and RXPAUSE. */
   2954 				ifr->ifr_media |=
   2955 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2956 			}
   2957 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2958 		}
   2959 		WM_CORE_UNLOCK(sc);
   2960 #ifdef WM_MPSAFE
   2961 		s = splnet();
   2962 #endif
   2963 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2964 #ifdef WM_MPSAFE
   2965 		splx(s);
   2966 #endif
   2967 		break;
   2968 	case SIOCINITIFADDR:
   2969 		WM_CORE_LOCK(sc);
   2970 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2971 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2972 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2973 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2974 			/* unicast address is first multicast entry */
   2975 			wm_set_filter(sc);
   2976 			error = 0;
   2977 			WM_CORE_UNLOCK(sc);
   2978 			break;
   2979 		}
   2980 		WM_CORE_UNLOCK(sc);
   2981 		/*FALLTHROUGH*/
   2982 	default:
   2983 #ifdef WM_MPSAFE
   2984 		s = splnet();
   2985 #endif
   2986 		/* It may call wm_start, so unlock here */
   2987 		error = ether_ioctl(ifp, cmd, data);
   2988 #ifdef WM_MPSAFE
   2989 		splx(s);
   2990 #endif
   2991 		if (error != ENETRESET)
   2992 			break;
   2993 
   2994 		error = 0;
   2995 
   2996 		if (cmd == SIOCSIFCAP) {
   2997 			error = (*ifp->if_init)(ifp);
   2998 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2999 			;
   3000 		else if (ifp->if_flags & IFF_RUNNING) {
   3001 			/*
   3002 			 * Multicast list has changed; set the hardware filter
   3003 			 * accordingly.
   3004 			 */
   3005 			WM_CORE_LOCK(sc);
   3006 			wm_set_filter(sc);
   3007 			WM_CORE_UNLOCK(sc);
   3008 		}
   3009 		break;
   3010 	}
   3011 
   3012 #ifndef WM_MPSAFE
   3013 	splx(s);
   3014 #endif
   3015 	return error;
   3016 }
   3017 
   3018 /* MAC address related */
   3019 
   3020 /*
   3021  * Get the offset of MAC address and return it.
   3022  * If error occured, use offset 0.
   3023  */
   3024 static uint16_t
   3025 wm_check_alt_mac_addr(struct wm_softc *sc)
   3026 {
   3027 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3028 	uint16_t offset = NVM_OFF_MACADDR;
   3029 
   3030 	/* Try to read alternative MAC address pointer */
   3031 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3032 		return 0;
   3033 
   3034 	/* Check pointer if it's valid or not. */
   3035 	if ((offset == 0x0000) || (offset == 0xffff))
   3036 		return 0;
   3037 
   3038 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3039 	/*
   3040 	 * Check whether alternative MAC address is valid or not.
   3041 	 * Some cards have non 0xffff pointer but those don't use
   3042 	 * alternative MAC address in reality.
   3043 	 *
   3044 	 * Check whether the broadcast bit is set or not.
   3045 	 */
   3046 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3047 		if (((myea[0] & 0xff) & 0x01) == 0)
   3048 			return offset; /* Found */
   3049 
   3050 	/* Not found */
   3051 	return 0;
   3052 }
   3053 
   3054 static int
   3055 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3056 {
   3057 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3058 	uint16_t offset = NVM_OFF_MACADDR;
   3059 	int do_invert = 0;
   3060 
   3061 	switch (sc->sc_type) {
   3062 	case WM_T_82580:
   3063 	case WM_T_I350:
   3064 	case WM_T_I354:
   3065 		/* EEPROM Top Level Partitioning */
   3066 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3067 		break;
   3068 	case WM_T_82571:
   3069 	case WM_T_82575:
   3070 	case WM_T_82576:
   3071 	case WM_T_80003:
   3072 	case WM_T_I210:
   3073 	case WM_T_I211:
   3074 		offset = wm_check_alt_mac_addr(sc);
   3075 		if (offset == 0)
   3076 			if ((sc->sc_funcid & 0x01) == 1)
   3077 				do_invert = 1;
   3078 		break;
   3079 	default:
   3080 		if ((sc->sc_funcid & 0x01) == 1)
   3081 			do_invert = 1;
   3082 		break;
   3083 	}
   3084 
   3085 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3086 		goto bad;
   3087 
   3088 	enaddr[0] = myea[0] & 0xff;
   3089 	enaddr[1] = myea[0] >> 8;
   3090 	enaddr[2] = myea[1] & 0xff;
   3091 	enaddr[3] = myea[1] >> 8;
   3092 	enaddr[4] = myea[2] & 0xff;
   3093 	enaddr[5] = myea[2] >> 8;
   3094 
   3095 	/*
   3096 	 * Toggle the LSB of the MAC address on the second port
   3097 	 * of some dual port cards.
   3098 	 */
   3099 	if (do_invert != 0)
   3100 		enaddr[5] ^= 1;
   3101 
   3102 	return 0;
   3103 
   3104  bad:
   3105 	return -1;
   3106 }
   3107 
   3108 /*
   3109  * wm_set_ral:
   3110  *
   3111  *	Set an entery in the receive address list.
   3112  */
   3113 static void
   3114 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3115 {
   3116 	uint32_t ral_lo, ral_hi;
   3117 
   3118 	if (enaddr != NULL) {
   3119 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3120 		    (enaddr[3] << 24);
   3121 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3122 		ral_hi |= RAL_AV;
   3123 	} else {
   3124 		ral_lo = 0;
   3125 		ral_hi = 0;
   3126 	}
   3127 
   3128 	if (sc->sc_type >= WM_T_82544) {
   3129 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3130 		    ral_lo);
   3131 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3132 		    ral_hi);
   3133 	} else {
   3134 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3135 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3136 	}
   3137 }
   3138 
   3139 /*
   3140  * wm_mchash:
   3141  *
   3142  *	Compute the hash of the multicast address for the 4096-bit
   3143  *	multicast filter.
   3144  */
   3145 static uint32_t
   3146 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3147 {
   3148 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3149 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3150 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3151 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3152 	uint32_t hash;
   3153 
   3154 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3155 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3156 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3157 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3158 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3159 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3160 		return (hash & 0x3ff);
   3161 	}
   3162 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3163 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3164 
   3165 	return (hash & 0xfff);
   3166 }
   3167 
   3168 /*
   3169  * wm_set_filter:
   3170  *
   3171  *	Set up the receive filter.
   3172  */
   3173 static void
   3174 wm_set_filter(struct wm_softc *sc)
   3175 {
   3176 	struct ethercom *ec = &sc->sc_ethercom;
   3177 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3178 	struct ether_multi *enm;
   3179 	struct ether_multistep step;
   3180 	bus_addr_t mta_reg;
   3181 	uint32_t hash, reg, bit;
   3182 	int i, size, ralmax;
   3183 
   3184 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3185 		device_xname(sc->sc_dev), __func__));
   3186 
   3187 	if (sc->sc_type >= WM_T_82544)
   3188 		mta_reg = WMREG_CORDOVA_MTA;
   3189 	else
   3190 		mta_reg = WMREG_MTA;
   3191 
   3192 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3193 
   3194 	if (ifp->if_flags & IFF_BROADCAST)
   3195 		sc->sc_rctl |= RCTL_BAM;
   3196 	if (ifp->if_flags & IFF_PROMISC) {
   3197 		sc->sc_rctl |= RCTL_UPE;
   3198 		goto allmulti;
   3199 	}
   3200 
   3201 	/*
   3202 	 * Set the station address in the first RAL slot, and
   3203 	 * clear the remaining slots.
   3204 	 */
   3205 	if (sc->sc_type == WM_T_ICH8)
   3206 		size = WM_RAL_TABSIZE_ICH8 -1;
   3207 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3208 	    || (sc->sc_type == WM_T_PCH))
   3209 		size = WM_RAL_TABSIZE_ICH8;
   3210 	else if (sc->sc_type == WM_T_PCH2)
   3211 		size = WM_RAL_TABSIZE_PCH2;
   3212 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3213 		size = WM_RAL_TABSIZE_PCH_LPT;
   3214 	else if (sc->sc_type == WM_T_82575)
   3215 		size = WM_RAL_TABSIZE_82575;
   3216 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3217 		size = WM_RAL_TABSIZE_82576;
   3218 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3219 		size = WM_RAL_TABSIZE_I350;
   3220 	else
   3221 		size = WM_RAL_TABSIZE;
   3222 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3223 
   3224 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3225 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3226 		switch (i) {
   3227 		case 0:
   3228 			/* We can use all entries */
   3229 			ralmax = size;
   3230 			break;
   3231 		case 1:
   3232 			/* Only RAR[0] */
   3233 			ralmax = 1;
   3234 			break;
   3235 		default:
   3236 			/* available SHRA + RAR[0] */
   3237 			ralmax = i + 1;
   3238 		}
   3239 	} else
   3240 		ralmax = size;
   3241 	for (i = 1; i < size; i++) {
   3242 		if (i < ralmax)
   3243 			wm_set_ral(sc, NULL, i);
   3244 	}
   3245 
   3246 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3247 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3248 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3249 	    || (sc->sc_type == WM_T_PCH_SPT))
   3250 		size = WM_ICH8_MC_TABSIZE;
   3251 	else
   3252 		size = WM_MC_TABSIZE;
   3253 	/* Clear out the multicast table. */
   3254 	for (i = 0; i < size; i++)
   3255 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3256 
   3257 	ETHER_FIRST_MULTI(step, ec, enm);
   3258 	while (enm != NULL) {
   3259 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3260 			/*
   3261 			 * We must listen to a range of multicast addresses.
   3262 			 * For now, just accept all multicasts, rather than
   3263 			 * trying to set only those filter bits needed to match
   3264 			 * the range.  (At this time, the only use of address
   3265 			 * ranges is for IP multicast routing, for which the
   3266 			 * range is big enough to require all bits set.)
   3267 			 */
   3268 			goto allmulti;
   3269 		}
   3270 
   3271 		hash = wm_mchash(sc, enm->enm_addrlo);
   3272 
   3273 		reg = (hash >> 5);
   3274 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3275 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3276 		    || (sc->sc_type == WM_T_PCH2)
   3277 		    || (sc->sc_type == WM_T_PCH_LPT)
   3278 		    || (sc->sc_type == WM_T_PCH_SPT))
   3279 			reg &= 0x1f;
   3280 		else
   3281 			reg &= 0x7f;
   3282 		bit = hash & 0x1f;
   3283 
   3284 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3285 		hash |= 1U << bit;
   3286 
   3287 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3288 			/*
   3289 			 * 82544 Errata 9: Certain register cannot be written
   3290 			 * with particular alignments in PCI-X bus operation
   3291 			 * (FCAH, MTA and VFTA).
   3292 			 */
   3293 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3294 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3295 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3296 		} else
   3297 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3298 
   3299 		ETHER_NEXT_MULTI(step, enm);
   3300 	}
   3301 
   3302 	ifp->if_flags &= ~IFF_ALLMULTI;
   3303 	goto setit;
   3304 
   3305  allmulti:
   3306 	ifp->if_flags |= IFF_ALLMULTI;
   3307 	sc->sc_rctl |= RCTL_MPE;
   3308 
   3309  setit:
   3310 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3311 }
   3312 
   3313 /* Reset and init related */
   3314 
   3315 static void
   3316 wm_set_vlan(struct wm_softc *sc)
   3317 {
   3318 
   3319 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3320 		device_xname(sc->sc_dev), __func__));
   3321 
   3322 	/* Deal with VLAN enables. */
   3323 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3324 		sc->sc_ctrl |= CTRL_VME;
   3325 	else
   3326 		sc->sc_ctrl &= ~CTRL_VME;
   3327 
   3328 	/* Write the control registers. */
   3329 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3330 }
   3331 
   3332 static void
   3333 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3334 {
   3335 	uint32_t gcr;
   3336 	pcireg_t ctrl2;
   3337 
   3338 	gcr = CSR_READ(sc, WMREG_GCR);
   3339 
   3340 	/* Only take action if timeout value is defaulted to 0 */
   3341 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3342 		goto out;
   3343 
   3344 	if ((gcr & GCR_CAP_VER2) == 0) {
   3345 		gcr |= GCR_CMPL_TMOUT_10MS;
   3346 		goto out;
   3347 	}
   3348 
   3349 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3350 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3351 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3352 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3353 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3354 
   3355 out:
   3356 	/* Disable completion timeout resend */
   3357 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3358 
   3359 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3360 }
   3361 
   3362 void
   3363 wm_get_auto_rd_done(struct wm_softc *sc)
   3364 {
   3365 	int i;
   3366 
   3367 	/* wait for eeprom to reload */
   3368 	switch (sc->sc_type) {
   3369 	case WM_T_82571:
   3370 	case WM_T_82572:
   3371 	case WM_T_82573:
   3372 	case WM_T_82574:
   3373 	case WM_T_82583:
   3374 	case WM_T_82575:
   3375 	case WM_T_82576:
   3376 	case WM_T_82580:
   3377 	case WM_T_I350:
   3378 	case WM_T_I354:
   3379 	case WM_T_I210:
   3380 	case WM_T_I211:
   3381 	case WM_T_80003:
   3382 	case WM_T_ICH8:
   3383 	case WM_T_ICH9:
   3384 		for (i = 0; i < 10; i++) {
   3385 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3386 				break;
   3387 			delay(1000);
   3388 		}
   3389 		if (i == 10) {
   3390 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3391 			    "complete\n", device_xname(sc->sc_dev));
   3392 		}
   3393 		break;
   3394 	default:
   3395 		break;
   3396 	}
   3397 }
   3398 
   3399 void
   3400 wm_lan_init_done(struct wm_softc *sc)
   3401 {
   3402 	uint32_t reg = 0;
   3403 	int i;
   3404 
   3405 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3406 		device_xname(sc->sc_dev), __func__));
   3407 
   3408 	/* Wait for eeprom to reload */
   3409 	switch (sc->sc_type) {
   3410 	case WM_T_ICH10:
   3411 	case WM_T_PCH:
   3412 	case WM_T_PCH2:
   3413 	case WM_T_PCH_LPT:
   3414 	case WM_T_PCH_SPT:
   3415 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3416 			reg = CSR_READ(sc, WMREG_STATUS);
   3417 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3418 				break;
   3419 			delay(100);
   3420 		}
   3421 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3422 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3423 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3424 		}
   3425 		break;
   3426 	default:
   3427 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3428 		    __func__);
   3429 		break;
   3430 	}
   3431 
   3432 	reg &= ~STATUS_LAN_INIT_DONE;
   3433 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3434 }
   3435 
   3436 void
   3437 wm_get_cfg_done(struct wm_softc *sc)
   3438 {
   3439 	int mask;
   3440 	uint32_t reg;
   3441 	int i;
   3442 
   3443 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3444 		device_xname(sc->sc_dev), __func__));
   3445 
   3446 	/* Wait for eeprom to reload */
   3447 	switch (sc->sc_type) {
   3448 	case WM_T_82542_2_0:
   3449 	case WM_T_82542_2_1:
   3450 		/* null */
   3451 		break;
   3452 	case WM_T_82543:
   3453 	case WM_T_82544:
   3454 	case WM_T_82540:
   3455 	case WM_T_82545:
   3456 	case WM_T_82545_3:
   3457 	case WM_T_82546:
   3458 	case WM_T_82546_3:
   3459 	case WM_T_82541:
   3460 	case WM_T_82541_2:
   3461 	case WM_T_82547:
   3462 	case WM_T_82547_2:
   3463 	case WM_T_82573:
   3464 	case WM_T_82574:
   3465 	case WM_T_82583:
   3466 		/* generic */
   3467 		delay(10*1000);
   3468 		break;
   3469 	case WM_T_80003:
   3470 	case WM_T_82571:
   3471 	case WM_T_82572:
   3472 	case WM_T_82575:
   3473 	case WM_T_82576:
   3474 	case WM_T_82580:
   3475 	case WM_T_I350:
   3476 	case WM_T_I354:
   3477 	case WM_T_I210:
   3478 	case WM_T_I211:
   3479 		if (sc->sc_type == WM_T_82571) {
   3480 			/* Only 82571 shares port 0 */
   3481 			mask = EEMNGCTL_CFGDONE_0;
   3482 		} else
   3483 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3484 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3485 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3486 				break;
   3487 			delay(1000);
   3488 		}
   3489 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3490 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3491 				device_xname(sc->sc_dev), __func__));
   3492 		}
   3493 		break;
   3494 	case WM_T_ICH8:
   3495 	case WM_T_ICH9:
   3496 	case WM_T_ICH10:
   3497 	case WM_T_PCH:
   3498 	case WM_T_PCH2:
   3499 	case WM_T_PCH_LPT:
   3500 	case WM_T_PCH_SPT:
   3501 		delay(10*1000);
   3502 		if (sc->sc_type >= WM_T_ICH10)
   3503 			wm_lan_init_done(sc);
   3504 		else
   3505 			wm_get_auto_rd_done(sc);
   3506 
   3507 		reg = CSR_READ(sc, WMREG_STATUS);
   3508 		if ((reg & STATUS_PHYRA) != 0)
   3509 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3510 		break;
   3511 	default:
   3512 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3513 		    __func__);
   3514 		break;
   3515 	}
   3516 }
   3517 
   3518 /* Init hardware bits */
   3519 void
   3520 wm_initialize_hardware_bits(struct wm_softc *sc)
   3521 {
   3522 	uint32_t tarc0, tarc1, reg;
   3523 
   3524 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3525 		device_xname(sc->sc_dev), __func__));
   3526 
   3527 	/* For 82571 variant, 80003 and ICHs */
   3528 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3529 	    || (sc->sc_type >= WM_T_80003)) {
   3530 
   3531 		/* Transmit Descriptor Control 0 */
   3532 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3533 		reg |= TXDCTL_COUNT_DESC;
   3534 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3535 
   3536 		/* Transmit Descriptor Control 1 */
   3537 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3538 		reg |= TXDCTL_COUNT_DESC;
   3539 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3540 
   3541 		/* TARC0 */
   3542 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3543 		switch (sc->sc_type) {
   3544 		case WM_T_82571:
   3545 		case WM_T_82572:
   3546 		case WM_T_82573:
   3547 		case WM_T_82574:
   3548 		case WM_T_82583:
   3549 		case WM_T_80003:
   3550 			/* Clear bits 30..27 */
   3551 			tarc0 &= ~__BITS(30, 27);
   3552 			break;
   3553 		default:
   3554 			break;
   3555 		}
   3556 
   3557 		switch (sc->sc_type) {
   3558 		case WM_T_82571:
   3559 		case WM_T_82572:
   3560 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3561 
   3562 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3563 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3564 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3565 			/* 8257[12] Errata No.7 */
   3566 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3567 
   3568 			/* TARC1 bit 28 */
   3569 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3570 				tarc1 &= ~__BIT(28);
   3571 			else
   3572 				tarc1 |= __BIT(28);
   3573 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3574 
   3575 			/*
   3576 			 * 8257[12] Errata No.13
   3577 			 * Disable Dyamic Clock Gating.
   3578 			 */
   3579 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3580 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3581 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3582 			break;
   3583 		case WM_T_82573:
   3584 		case WM_T_82574:
   3585 		case WM_T_82583:
   3586 			if ((sc->sc_type == WM_T_82574)
   3587 			    || (sc->sc_type == WM_T_82583))
   3588 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3589 
   3590 			/* Extended Device Control */
   3591 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3592 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3593 			reg |= __BIT(22);	/* Set bit 22 */
   3594 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3595 
   3596 			/* Device Control */
   3597 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3598 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3599 
   3600 			/* PCIe Control Register */
   3601 			/*
   3602 			 * 82573 Errata (unknown).
   3603 			 *
   3604 			 * 82574 Errata 25 and 82583 Errata 12
   3605 			 * "Dropped Rx Packets":
   3606 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3607 			 */
   3608 			reg = CSR_READ(sc, WMREG_GCR);
   3609 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3610 			CSR_WRITE(sc, WMREG_GCR, reg);
   3611 
   3612 			if ((sc->sc_type == WM_T_82574)
   3613 			    || (sc->sc_type == WM_T_82583)) {
   3614 				/*
   3615 				 * Document says this bit must be set for
   3616 				 * proper operation.
   3617 				 */
   3618 				reg = CSR_READ(sc, WMREG_GCR);
   3619 				reg |= __BIT(22);
   3620 				CSR_WRITE(sc, WMREG_GCR, reg);
   3621 
   3622 				/*
   3623 				 * Apply workaround for hardware errata
   3624 				 * documented in errata docs Fixes issue where
   3625 				 * some error prone or unreliable PCIe
   3626 				 * completions are occurring, particularly
   3627 				 * with ASPM enabled. Without fix, issue can
   3628 				 * cause Tx timeouts.
   3629 				 */
   3630 				reg = CSR_READ(sc, WMREG_GCR2);
   3631 				reg |= __BIT(0);
   3632 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3633 			}
   3634 			break;
   3635 		case WM_T_80003:
   3636 			/* TARC0 */
   3637 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3638 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3639 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3640 
   3641 			/* TARC1 bit 28 */
   3642 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3643 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3644 				tarc1 &= ~__BIT(28);
   3645 			else
   3646 				tarc1 |= __BIT(28);
   3647 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3648 			break;
   3649 		case WM_T_ICH8:
   3650 		case WM_T_ICH9:
   3651 		case WM_T_ICH10:
   3652 		case WM_T_PCH:
   3653 		case WM_T_PCH2:
   3654 		case WM_T_PCH_LPT:
   3655 		case WM_T_PCH_SPT:
   3656 			/* TARC0 */
   3657 			if ((sc->sc_type == WM_T_ICH8)
   3658 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3659 				/* Set TARC0 bits 29 and 28 */
   3660 				tarc0 |= __BITS(29, 28);
   3661 			}
   3662 			/* Set TARC0 bits 23,24,26,27 */
   3663 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3664 
   3665 			/* CTRL_EXT */
   3666 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3667 			reg |= __BIT(22);	/* Set bit 22 */
   3668 			/*
   3669 			 * Enable PHY low-power state when MAC is at D3
   3670 			 * w/o WoL
   3671 			 */
   3672 			if (sc->sc_type >= WM_T_PCH)
   3673 				reg |= CTRL_EXT_PHYPDEN;
   3674 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3675 
   3676 			/* TARC1 */
   3677 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3678 			/* bit 28 */
   3679 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3680 				tarc1 &= ~__BIT(28);
   3681 			else
   3682 				tarc1 |= __BIT(28);
   3683 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3684 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3685 
   3686 			/* Device Status */
   3687 			if (sc->sc_type == WM_T_ICH8) {
   3688 				reg = CSR_READ(sc, WMREG_STATUS);
   3689 				reg &= ~__BIT(31);
   3690 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3691 
   3692 			}
   3693 
   3694 			/* IOSFPC */
   3695 			if (sc->sc_type == WM_T_PCH_SPT) {
   3696 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3697 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3698 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3699 			}
   3700 			/*
   3701 			 * Work-around descriptor data corruption issue during
   3702 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3703 			 * capability.
   3704 			 */
   3705 			reg = CSR_READ(sc, WMREG_RFCTL);
   3706 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3707 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3708 			break;
   3709 		default:
   3710 			break;
   3711 		}
   3712 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3713 
   3714 		/*
   3715 		 * 8257[12] Errata No.52 and some others.
   3716 		 * Avoid RSS Hash Value bug.
   3717 		 */
   3718 		switch (sc->sc_type) {
   3719 		case WM_T_82571:
   3720 		case WM_T_82572:
   3721 		case WM_T_82573:
   3722 		case WM_T_80003:
   3723 		case WM_T_ICH8:
   3724 			reg = CSR_READ(sc, WMREG_RFCTL);
   3725 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3726 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3727 			break;
   3728 		default:
   3729 			break;
   3730 		}
   3731 	}
   3732 }
   3733 
   3734 static uint32_t
   3735 wm_rxpbs_adjust_82580(uint32_t val)
   3736 {
   3737 	uint32_t rv = 0;
   3738 
   3739 	if (val < __arraycount(wm_82580_rxpbs_table))
   3740 		rv = wm_82580_rxpbs_table[val];
   3741 
   3742 	return rv;
   3743 }
   3744 
   3745 /*
   3746  * wm_reset:
   3747  *
   3748  *	Reset the i82542 chip.
   3749  */
   3750 static void
   3751 wm_reset(struct wm_softc *sc)
   3752 {
   3753 	int phy_reset = 0;
   3754 	int i, error = 0;
   3755 	uint32_t reg;
   3756 
   3757 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3758 		device_xname(sc->sc_dev), __func__));
   3759 	KASSERT(sc->sc_type != 0);
   3760 
   3761 	/*
   3762 	 * Allocate on-chip memory according to the MTU size.
   3763 	 * The Packet Buffer Allocation register must be written
   3764 	 * before the chip is reset.
   3765 	 */
   3766 	switch (sc->sc_type) {
   3767 	case WM_T_82547:
   3768 	case WM_T_82547_2:
   3769 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3770 		    PBA_22K : PBA_30K;
   3771 		for (i = 0; i < sc->sc_nqueues; i++) {
   3772 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3773 			txq->txq_fifo_head = 0;
   3774 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3775 			txq->txq_fifo_size =
   3776 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3777 			txq->txq_fifo_stall = 0;
   3778 		}
   3779 		break;
   3780 	case WM_T_82571:
   3781 	case WM_T_82572:
   3782 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3783 	case WM_T_80003:
   3784 		sc->sc_pba = PBA_32K;
   3785 		break;
   3786 	case WM_T_82573:
   3787 		sc->sc_pba = PBA_12K;
   3788 		break;
   3789 	case WM_T_82574:
   3790 	case WM_T_82583:
   3791 		sc->sc_pba = PBA_20K;
   3792 		break;
   3793 	case WM_T_82576:
   3794 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3795 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3796 		break;
   3797 	case WM_T_82580:
   3798 	case WM_T_I350:
   3799 	case WM_T_I354:
   3800 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3801 		break;
   3802 	case WM_T_I210:
   3803 	case WM_T_I211:
   3804 		sc->sc_pba = PBA_34K;
   3805 		break;
   3806 	case WM_T_ICH8:
   3807 		/* Workaround for a bit corruption issue in FIFO memory */
   3808 		sc->sc_pba = PBA_8K;
   3809 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3810 		break;
   3811 	case WM_T_ICH9:
   3812 	case WM_T_ICH10:
   3813 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3814 		    PBA_14K : PBA_10K;
   3815 		break;
   3816 	case WM_T_PCH:
   3817 	case WM_T_PCH2:
   3818 	case WM_T_PCH_LPT:
   3819 	case WM_T_PCH_SPT:
   3820 		sc->sc_pba = PBA_26K;
   3821 		break;
   3822 	default:
   3823 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3824 		    PBA_40K : PBA_48K;
   3825 		break;
   3826 	}
   3827 	/*
   3828 	 * Only old or non-multiqueue devices have the PBA register
   3829 	 * XXX Need special handling for 82575.
   3830 	 */
   3831 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3832 	    || (sc->sc_type == WM_T_82575))
   3833 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3834 
   3835 	/* Prevent the PCI-E bus from sticking */
   3836 	if (sc->sc_flags & WM_F_PCIE) {
   3837 		int timeout = 800;
   3838 
   3839 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3840 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3841 
   3842 		while (timeout--) {
   3843 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3844 			    == 0)
   3845 				break;
   3846 			delay(100);
   3847 		}
   3848 	}
   3849 
   3850 	/* Set the completion timeout for interface */
   3851 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3852 	    || (sc->sc_type == WM_T_82580)
   3853 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3854 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3855 		wm_set_pcie_completion_timeout(sc);
   3856 
   3857 	/* Clear interrupt */
   3858 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3859 	if (sc->sc_nintrs > 1) {
   3860 		if (sc->sc_type != WM_T_82574) {
   3861 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3862 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3863 		} else {
   3864 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3865 		}
   3866 	}
   3867 
   3868 	/* Stop the transmit and receive processes. */
   3869 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3870 	sc->sc_rctl &= ~RCTL_EN;
   3871 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3872 	CSR_WRITE_FLUSH(sc);
   3873 
   3874 	/* XXX set_tbi_sbp_82543() */
   3875 
   3876 	delay(10*1000);
   3877 
   3878 	/* Must acquire the MDIO ownership before MAC reset */
   3879 	switch (sc->sc_type) {
   3880 	case WM_T_82573:
   3881 	case WM_T_82574:
   3882 	case WM_T_82583:
   3883 		error = wm_get_hw_semaphore_82573(sc);
   3884 		break;
   3885 	default:
   3886 		break;
   3887 	}
   3888 
   3889 	/*
   3890 	 * 82541 Errata 29? & 82547 Errata 28?
   3891 	 * See also the description about PHY_RST bit in CTRL register
   3892 	 * in 8254x_GBe_SDM.pdf.
   3893 	 */
   3894 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3895 		CSR_WRITE(sc, WMREG_CTRL,
   3896 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3897 		CSR_WRITE_FLUSH(sc);
   3898 		delay(5000);
   3899 	}
   3900 
   3901 	switch (sc->sc_type) {
   3902 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3903 	case WM_T_82541:
   3904 	case WM_T_82541_2:
   3905 	case WM_T_82547:
   3906 	case WM_T_82547_2:
   3907 		/*
   3908 		 * On some chipsets, a reset through a memory-mapped write
   3909 		 * cycle can cause the chip to reset before completing the
   3910 		 * write cycle.  This causes major headache that can be
   3911 		 * avoided by issuing the reset via indirect register writes
   3912 		 * through I/O space.
   3913 		 *
   3914 		 * So, if we successfully mapped the I/O BAR at attach time,
   3915 		 * use that.  Otherwise, try our luck with a memory-mapped
   3916 		 * reset.
   3917 		 */
   3918 		if (sc->sc_flags & WM_F_IOH_VALID)
   3919 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3920 		else
   3921 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3922 		break;
   3923 	case WM_T_82545_3:
   3924 	case WM_T_82546_3:
   3925 		/* Use the shadow control register on these chips. */
   3926 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3927 		break;
   3928 	case WM_T_80003:
   3929 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3930 		sc->phy.acquire(sc);
   3931 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3932 		sc->phy.release(sc);
   3933 		break;
   3934 	case WM_T_ICH8:
   3935 	case WM_T_ICH9:
   3936 	case WM_T_ICH10:
   3937 	case WM_T_PCH:
   3938 	case WM_T_PCH2:
   3939 	case WM_T_PCH_LPT:
   3940 	case WM_T_PCH_SPT:
   3941 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3942 		if (wm_phy_resetisblocked(sc) == false) {
   3943 			/*
   3944 			 * Gate automatic PHY configuration by hardware on
   3945 			 * non-managed 82579
   3946 			 */
   3947 			if ((sc->sc_type == WM_T_PCH2)
   3948 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3949 				== 0))
   3950 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3951 
   3952 			reg |= CTRL_PHY_RESET;
   3953 			phy_reset = 1;
   3954 		} else
   3955 			printf("XXX reset is blocked!!!\n");
   3956 		sc->phy.acquire(sc);
   3957 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3958 		/* Don't insert a completion barrier when reset */
   3959 		delay(20*1000);
   3960 		mutex_exit(sc->sc_ich_phymtx);
   3961 		break;
   3962 	case WM_T_82580:
   3963 	case WM_T_I350:
   3964 	case WM_T_I354:
   3965 	case WM_T_I210:
   3966 	case WM_T_I211:
   3967 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3968 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3969 			CSR_WRITE_FLUSH(sc);
   3970 		delay(5000);
   3971 		break;
   3972 	case WM_T_82542_2_0:
   3973 	case WM_T_82542_2_1:
   3974 	case WM_T_82543:
   3975 	case WM_T_82540:
   3976 	case WM_T_82545:
   3977 	case WM_T_82546:
   3978 	case WM_T_82571:
   3979 	case WM_T_82572:
   3980 	case WM_T_82573:
   3981 	case WM_T_82574:
   3982 	case WM_T_82575:
   3983 	case WM_T_82576:
   3984 	case WM_T_82583:
   3985 	default:
   3986 		/* Everything else can safely use the documented method. */
   3987 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3988 		break;
   3989 	}
   3990 
   3991 	/* Must release the MDIO ownership after MAC reset */
   3992 	switch (sc->sc_type) {
   3993 	case WM_T_82573:
   3994 	case WM_T_82574:
   3995 	case WM_T_82583:
   3996 		if (error == 0)
   3997 			wm_put_hw_semaphore_82573(sc);
   3998 		break;
   3999 	default:
   4000 		break;
   4001 	}
   4002 
   4003 	if (phy_reset != 0)
   4004 		wm_get_cfg_done(sc);
   4005 
   4006 	/* reload EEPROM */
   4007 	switch (sc->sc_type) {
   4008 	case WM_T_82542_2_0:
   4009 	case WM_T_82542_2_1:
   4010 	case WM_T_82543:
   4011 	case WM_T_82544:
   4012 		delay(10);
   4013 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4014 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4015 		CSR_WRITE_FLUSH(sc);
   4016 		delay(2000);
   4017 		break;
   4018 	case WM_T_82540:
   4019 	case WM_T_82545:
   4020 	case WM_T_82545_3:
   4021 	case WM_T_82546:
   4022 	case WM_T_82546_3:
   4023 		delay(5*1000);
   4024 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4025 		break;
   4026 	case WM_T_82541:
   4027 	case WM_T_82541_2:
   4028 	case WM_T_82547:
   4029 	case WM_T_82547_2:
   4030 		delay(20000);
   4031 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4032 		break;
   4033 	case WM_T_82571:
   4034 	case WM_T_82572:
   4035 	case WM_T_82573:
   4036 	case WM_T_82574:
   4037 	case WM_T_82583:
   4038 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4039 			delay(10);
   4040 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4041 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4042 			CSR_WRITE_FLUSH(sc);
   4043 		}
   4044 		/* check EECD_EE_AUTORD */
   4045 		wm_get_auto_rd_done(sc);
   4046 		/*
   4047 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4048 		 * is set.
   4049 		 */
   4050 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4051 		    || (sc->sc_type == WM_T_82583))
   4052 			delay(25*1000);
   4053 		break;
   4054 	case WM_T_82575:
   4055 	case WM_T_82576:
   4056 	case WM_T_82580:
   4057 	case WM_T_I350:
   4058 	case WM_T_I354:
   4059 	case WM_T_I210:
   4060 	case WM_T_I211:
   4061 	case WM_T_80003:
   4062 		/* check EECD_EE_AUTORD */
   4063 		wm_get_auto_rd_done(sc);
   4064 		break;
   4065 	case WM_T_ICH8:
   4066 	case WM_T_ICH9:
   4067 	case WM_T_ICH10:
   4068 	case WM_T_PCH:
   4069 	case WM_T_PCH2:
   4070 	case WM_T_PCH_LPT:
   4071 	case WM_T_PCH_SPT:
   4072 		break;
   4073 	default:
   4074 		panic("%s: unknown type\n", __func__);
   4075 	}
   4076 
   4077 	/* Check whether EEPROM is present or not */
   4078 	switch (sc->sc_type) {
   4079 	case WM_T_82575:
   4080 	case WM_T_82576:
   4081 	case WM_T_82580:
   4082 	case WM_T_I350:
   4083 	case WM_T_I354:
   4084 	case WM_T_ICH8:
   4085 	case WM_T_ICH9:
   4086 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4087 			/* Not found */
   4088 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4089 			if (sc->sc_type == WM_T_82575)
   4090 				wm_reset_init_script_82575(sc);
   4091 		}
   4092 		break;
   4093 	default:
   4094 		break;
   4095 	}
   4096 
   4097 	if ((sc->sc_type == WM_T_82580)
   4098 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4099 		/* clear global device reset status bit */
   4100 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4101 	}
   4102 
   4103 	/* Clear any pending interrupt events. */
   4104 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4105 	reg = CSR_READ(sc, WMREG_ICR);
   4106 	if (sc->sc_nintrs > 1) {
   4107 		if (sc->sc_type != WM_T_82574) {
   4108 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4109 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4110 		} else
   4111 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4112 	}
   4113 
   4114 	/* reload sc_ctrl */
   4115 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4116 
   4117 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4118 		wm_set_eee_i350(sc);
   4119 
   4120 	/* Clear the host wakeup bit after lcd reset */
   4121 	if (sc->sc_type >= WM_T_PCH) {
   4122 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4123 		    BM_PORT_GEN_CFG);
   4124 		reg &= ~BM_WUC_HOST_WU_BIT;
   4125 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4126 		    BM_PORT_GEN_CFG, reg);
   4127 	}
   4128 
   4129 	/*
   4130 	 * For PCH, this write will make sure that any noise will be detected
   4131 	 * as a CRC error and be dropped rather than show up as a bad packet
   4132 	 * to the DMA engine
   4133 	 */
   4134 	if (sc->sc_type == WM_T_PCH)
   4135 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4136 
   4137 	if (sc->sc_type >= WM_T_82544)
   4138 		CSR_WRITE(sc, WMREG_WUC, 0);
   4139 
   4140 	wm_reset_mdicnfg_82580(sc);
   4141 
   4142 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4143 		wm_pll_workaround_i210(sc);
   4144 }
   4145 
   4146 /*
   4147  * wm_add_rxbuf:
   4148  *
   4149  *	Add a receive buffer to the indiciated descriptor.
   4150  */
   4151 static int
   4152 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4153 {
   4154 	struct wm_softc *sc = rxq->rxq_sc;
   4155 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4156 	struct mbuf *m;
   4157 	int error;
   4158 
   4159 	KASSERT(mutex_owned(rxq->rxq_lock));
   4160 
   4161 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4162 	if (m == NULL)
   4163 		return ENOBUFS;
   4164 
   4165 	MCLGET(m, M_DONTWAIT);
   4166 	if ((m->m_flags & M_EXT) == 0) {
   4167 		m_freem(m);
   4168 		return ENOBUFS;
   4169 	}
   4170 
   4171 	if (rxs->rxs_mbuf != NULL)
   4172 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4173 
   4174 	rxs->rxs_mbuf = m;
   4175 
   4176 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4177 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4178 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4179 	if (error) {
   4180 		/* XXX XXX XXX */
   4181 		aprint_error_dev(sc->sc_dev,
   4182 		    "unable to load rx DMA map %d, error = %d\n",
   4183 		    idx, error);
   4184 		panic("wm_add_rxbuf");
   4185 	}
   4186 
   4187 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4188 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4189 
   4190 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4191 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4192 			wm_init_rxdesc(rxq, idx);
   4193 	} else
   4194 		wm_init_rxdesc(rxq, idx);
   4195 
   4196 	return 0;
   4197 }
   4198 
   4199 /*
   4200  * wm_rxdrain:
   4201  *
   4202  *	Drain the receive queue.
   4203  */
   4204 static void
   4205 wm_rxdrain(struct wm_rxqueue *rxq)
   4206 {
   4207 	struct wm_softc *sc = rxq->rxq_sc;
   4208 	struct wm_rxsoft *rxs;
   4209 	int i;
   4210 
   4211 	KASSERT(mutex_owned(rxq->rxq_lock));
   4212 
   4213 	for (i = 0; i < WM_NRXDESC; i++) {
   4214 		rxs = &rxq->rxq_soft[i];
   4215 		if (rxs->rxs_mbuf != NULL) {
   4216 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4217 			m_freem(rxs->rxs_mbuf);
   4218 			rxs->rxs_mbuf = NULL;
   4219 		}
   4220 	}
   4221 }
   4222 
   4223 
   4224 /*
   4225  * XXX copy from FreeBSD's sys/net/rss_config.c
   4226  */
   4227 /*
   4228  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4229  * effectiveness may be limited by algorithm choice and available entropy
   4230  * during the boot.
   4231  *
   4232  * XXXRW: And that we don't randomize it yet!
   4233  *
   4234  * This is the default Microsoft RSS specification key which is also
   4235  * the Chelsio T5 firmware default key.
   4236  */
   4237 #define RSS_KEYSIZE 40
   4238 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4239 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4240 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4241 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4242 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4243 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4244 };
   4245 
   4246 /*
   4247  * Caller must pass an array of size sizeof(rss_key).
   4248  *
   4249  * XXX
   4250  * As if_ixgbe may use this function, this function should not be
   4251  * if_wm specific function.
   4252  */
   4253 static void
   4254 wm_rss_getkey(uint8_t *key)
   4255 {
   4256 
   4257 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4258 }
   4259 
   4260 /*
   4261  * Setup registers for RSS.
   4262  *
   4263  * XXX not yet VMDq support
   4264  */
   4265 static void
   4266 wm_init_rss(struct wm_softc *sc)
   4267 {
   4268 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4269 	int i;
   4270 
   4271 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4272 
   4273 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4274 		int qid, reta_ent;
   4275 
   4276 		qid  = i % sc->sc_nqueues;
   4277 		switch(sc->sc_type) {
   4278 		case WM_T_82574:
   4279 			reta_ent = __SHIFTIN(qid,
   4280 			    RETA_ENT_QINDEX_MASK_82574);
   4281 			break;
   4282 		case WM_T_82575:
   4283 			reta_ent = __SHIFTIN(qid,
   4284 			    RETA_ENT_QINDEX1_MASK_82575);
   4285 			break;
   4286 		default:
   4287 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4288 			break;
   4289 		}
   4290 
   4291 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4292 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4293 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4294 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4295 	}
   4296 
   4297 	wm_rss_getkey((uint8_t *)rss_key);
   4298 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4299 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4300 
   4301 	if (sc->sc_type == WM_T_82574)
   4302 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4303 	else
   4304 		mrqc = MRQC_ENABLE_RSS_MQ;
   4305 
   4306 	/* XXXX
   4307 	 * The same as FreeBSD igb.
   4308 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4309 	 */
   4310 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4311 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4312 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4313 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4314 
   4315 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4316 }
   4317 
   4318 /*
   4319  * Adjust TX and RX queue numbers which the system actulally uses.
   4320  *
   4321  * The numbers are affected by below parameters.
   4322  *     - The nubmer of hardware queues
   4323  *     - The number of MSI-X vectors (= "nvectors" argument)
   4324  *     - ncpu
   4325  */
   4326 static void
   4327 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4328 {
   4329 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4330 
   4331 	if (nvectors < 2) {
   4332 		sc->sc_nqueues = 1;
   4333 		return;
   4334 	}
   4335 
   4336 	switch(sc->sc_type) {
   4337 	case WM_T_82572:
   4338 		hw_ntxqueues = 2;
   4339 		hw_nrxqueues = 2;
   4340 		break;
   4341 	case WM_T_82574:
   4342 		hw_ntxqueues = 2;
   4343 		hw_nrxqueues = 2;
   4344 		break;
   4345 	case WM_T_82575:
   4346 		hw_ntxqueues = 4;
   4347 		hw_nrxqueues = 4;
   4348 		break;
   4349 	case WM_T_82576:
   4350 		hw_ntxqueues = 16;
   4351 		hw_nrxqueues = 16;
   4352 		break;
   4353 	case WM_T_82580:
   4354 	case WM_T_I350:
   4355 	case WM_T_I354:
   4356 		hw_ntxqueues = 8;
   4357 		hw_nrxqueues = 8;
   4358 		break;
   4359 	case WM_T_I210:
   4360 		hw_ntxqueues = 4;
   4361 		hw_nrxqueues = 4;
   4362 		break;
   4363 	case WM_T_I211:
   4364 		hw_ntxqueues = 2;
   4365 		hw_nrxqueues = 2;
   4366 		break;
   4367 		/*
   4368 		 * As below ethernet controllers does not support MSI-X,
   4369 		 * this driver let them not use multiqueue.
   4370 		 *     - WM_T_80003
   4371 		 *     - WM_T_ICH8
   4372 		 *     - WM_T_ICH9
   4373 		 *     - WM_T_ICH10
   4374 		 *     - WM_T_PCH
   4375 		 *     - WM_T_PCH2
   4376 		 *     - WM_T_PCH_LPT
   4377 		 */
   4378 	default:
   4379 		hw_ntxqueues = 1;
   4380 		hw_nrxqueues = 1;
   4381 		break;
   4382 	}
   4383 
   4384 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4385 
   4386 	/*
   4387 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4388 	 * the number of queues used actually.
   4389 	 */
   4390 	if (nvectors < hw_nqueues + 1) {
   4391 		sc->sc_nqueues = nvectors - 1;
   4392 	} else {
   4393 		sc->sc_nqueues = hw_nqueues;
   4394 	}
   4395 
   4396 	/*
   4397 	 * As queues more then cpus cannot improve scaling, we limit
   4398 	 * the number of queues used actually.
   4399 	 */
   4400 	if (ncpu < sc->sc_nqueues)
   4401 		sc->sc_nqueues = ncpu;
   4402 }
   4403 
   4404 /*
   4405  * Both single interrupt MSI and INTx can use this function.
   4406  */
   4407 static int
   4408 wm_setup_legacy(struct wm_softc *sc)
   4409 {
   4410 	pci_chipset_tag_t pc = sc->sc_pc;
   4411 	const char *intrstr = NULL;
   4412 	char intrbuf[PCI_INTRSTR_LEN];
   4413 	int error;
   4414 
   4415 	error = wm_alloc_txrx_queues(sc);
   4416 	if (error) {
   4417 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4418 		    error);
   4419 		return ENOMEM;
   4420 	}
   4421 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4422 	    sizeof(intrbuf));
   4423 #ifdef WM_MPSAFE
   4424 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4425 #endif
   4426 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4427 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4428 	if (sc->sc_ihs[0] == NULL) {
   4429 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4430 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4431 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4432 		return ENOMEM;
   4433 	}
   4434 
   4435 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4436 	sc->sc_nintrs = 1;
   4437 	return 0;
   4438 }
   4439 
   4440 static int
   4441 wm_setup_msix(struct wm_softc *sc)
   4442 {
   4443 	void *vih;
   4444 	kcpuset_t *affinity;
   4445 	int qidx, error, intr_idx, txrx_established;
   4446 	pci_chipset_tag_t pc = sc->sc_pc;
   4447 	const char *intrstr = NULL;
   4448 	char intrbuf[PCI_INTRSTR_LEN];
   4449 	char intr_xname[INTRDEVNAMEBUF];
   4450 
   4451 	if (sc->sc_nqueues < ncpu) {
   4452 		/*
   4453 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4454 		 * interrupts start from CPU#1.
   4455 		 */
   4456 		sc->sc_affinity_offset = 1;
   4457 	} else {
   4458 		/*
   4459 		 * In this case, this device use all CPUs. So, we unify
   4460 		 * affinitied cpu_index to msix vector number for readability.
   4461 		 */
   4462 		sc->sc_affinity_offset = 0;
   4463 	}
   4464 
   4465 	error = wm_alloc_txrx_queues(sc);
   4466 	if (error) {
   4467 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4468 		    error);
   4469 		return ENOMEM;
   4470 	}
   4471 
   4472 	kcpuset_create(&affinity, false);
   4473 	intr_idx = 0;
   4474 
   4475 	/*
   4476 	 * TX and RX
   4477 	 */
   4478 	txrx_established = 0;
   4479 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4480 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4481 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4482 
   4483 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4484 		    sizeof(intrbuf));
   4485 #ifdef WM_MPSAFE
   4486 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4487 		    PCI_INTR_MPSAFE, true);
   4488 #endif
   4489 		memset(intr_xname, 0, sizeof(intr_xname));
   4490 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4491 		    device_xname(sc->sc_dev), qidx);
   4492 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4493 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4494 		if (vih == NULL) {
   4495 			aprint_error_dev(sc->sc_dev,
   4496 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4497 			    intrstr ? " at " : "",
   4498 			    intrstr ? intrstr : "");
   4499 
   4500 			goto fail;
   4501 		}
   4502 		kcpuset_zero(affinity);
   4503 		/* Round-robin affinity */
   4504 		kcpuset_set(affinity, affinity_to);
   4505 		error = interrupt_distribute(vih, affinity, NULL);
   4506 		if (error == 0) {
   4507 			aprint_normal_dev(sc->sc_dev,
   4508 			    "for TX and RX interrupting at %s affinity to %u\n",
   4509 			    intrstr, affinity_to);
   4510 		} else {
   4511 			aprint_normal_dev(sc->sc_dev,
   4512 			    "for TX and RX interrupting at %s\n", intrstr);
   4513 		}
   4514 		sc->sc_ihs[intr_idx] = vih;
   4515 		wmq->wmq_id= qidx;
   4516 		wmq->wmq_intr_idx = intr_idx;
   4517 
   4518 		txrx_established++;
   4519 		intr_idx++;
   4520 	}
   4521 
   4522 	/*
   4523 	 * LINK
   4524 	 */
   4525 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4526 	    sizeof(intrbuf));
   4527 #ifdef WM_MPSAFE
   4528 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4529 #endif
   4530 	memset(intr_xname, 0, sizeof(intr_xname));
   4531 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4532 	    device_xname(sc->sc_dev));
   4533 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4534 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4535 	if (vih == NULL) {
   4536 		aprint_error_dev(sc->sc_dev,
   4537 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4538 		    intrstr ? " at " : "",
   4539 		    intrstr ? intrstr : "");
   4540 
   4541 		goto fail;
   4542 	}
   4543 	/* keep default affinity to LINK interrupt */
   4544 	aprint_normal_dev(sc->sc_dev,
   4545 	    "for LINK interrupting at %s\n", intrstr);
   4546 	sc->sc_ihs[intr_idx] = vih;
   4547 	sc->sc_link_intr_idx = intr_idx;
   4548 
   4549 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4550 	kcpuset_destroy(affinity);
   4551 	return 0;
   4552 
   4553  fail:
   4554 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4555 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4556 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4557 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4558 	}
   4559 
   4560 	kcpuset_destroy(affinity);
   4561 	return ENOMEM;
   4562 }
   4563 
   4564 static void
   4565 wm_turnon(struct wm_softc *sc)
   4566 {
   4567 	int i;
   4568 
   4569 	KASSERT(WM_CORE_LOCKED(sc));
   4570 
   4571 	for(i = 0; i < sc->sc_nqueues; i++) {
   4572 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4573 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4574 
   4575 		mutex_enter(txq->txq_lock);
   4576 		txq->txq_stopping = false;
   4577 		mutex_exit(txq->txq_lock);
   4578 
   4579 		mutex_enter(rxq->rxq_lock);
   4580 		rxq->rxq_stopping = false;
   4581 		mutex_exit(rxq->rxq_lock);
   4582 	}
   4583 
   4584 	sc->sc_core_stopping = false;
   4585 }
   4586 
   4587 static void
   4588 wm_turnoff(struct wm_softc *sc)
   4589 {
   4590 	int i;
   4591 
   4592 	KASSERT(WM_CORE_LOCKED(sc));
   4593 
   4594 	sc->sc_core_stopping = true;
   4595 
   4596 	for(i = 0; i < sc->sc_nqueues; i++) {
   4597 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4598 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4599 
   4600 		mutex_enter(rxq->rxq_lock);
   4601 		rxq->rxq_stopping = true;
   4602 		mutex_exit(rxq->rxq_lock);
   4603 
   4604 		mutex_enter(txq->txq_lock);
   4605 		txq->txq_stopping = true;
   4606 		mutex_exit(txq->txq_lock);
   4607 	}
   4608 }
   4609 
   4610 /*
   4611  * wm_init:		[ifnet interface function]
   4612  *
   4613  *	Initialize the interface.
   4614  */
   4615 static int
   4616 wm_init(struct ifnet *ifp)
   4617 {
   4618 	struct wm_softc *sc = ifp->if_softc;
   4619 	int ret;
   4620 
   4621 	WM_CORE_LOCK(sc);
   4622 	ret = wm_init_locked(ifp);
   4623 	WM_CORE_UNLOCK(sc);
   4624 
   4625 	return ret;
   4626 }
   4627 
   4628 static int
   4629 wm_init_locked(struct ifnet *ifp)
   4630 {
   4631 	struct wm_softc *sc = ifp->if_softc;
   4632 	int i, j, trynum, error = 0;
   4633 	uint32_t reg;
   4634 
   4635 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4636 		device_xname(sc->sc_dev), __func__));
   4637 	KASSERT(WM_CORE_LOCKED(sc));
   4638 
   4639 	/*
   4640 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4641 	 * There is a small but measurable benefit to avoiding the adjusment
   4642 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4643 	 * on such platforms.  One possibility is that the DMA itself is
   4644 	 * slightly more efficient if the front of the entire packet (instead
   4645 	 * of the front of the headers) is aligned.
   4646 	 *
   4647 	 * Note we must always set align_tweak to 0 if we are using
   4648 	 * jumbo frames.
   4649 	 */
   4650 #ifdef __NO_STRICT_ALIGNMENT
   4651 	sc->sc_align_tweak = 0;
   4652 #else
   4653 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4654 		sc->sc_align_tweak = 0;
   4655 	else
   4656 		sc->sc_align_tweak = 2;
   4657 #endif /* __NO_STRICT_ALIGNMENT */
   4658 
   4659 	/* Cancel any pending I/O. */
   4660 	wm_stop_locked(ifp, 0);
   4661 
   4662 	/* update statistics before reset */
   4663 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4664 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4665 
   4666 	/* Reset the chip to a known state. */
   4667 	wm_reset(sc);
   4668 
   4669 	switch (sc->sc_type) {
   4670 	case WM_T_82571:
   4671 	case WM_T_82572:
   4672 	case WM_T_82573:
   4673 	case WM_T_82574:
   4674 	case WM_T_82583:
   4675 	case WM_T_80003:
   4676 	case WM_T_ICH8:
   4677 	case WM_T_ICH9:
   4678 	case WM_T_ICH10:
   4679 	case WM_T_PCH:
   4680 	case WM_T_PCH2:
   4681 	case WM_T_PCH_LPT:
   4682 	case WM_T_PCH_SPT:
   4683 		/* AMT based hardware can now take control from firmware */
   4684 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4685 			wm_get_hw_control(sc);
   4686 		break;
   4687 	default:
   4688 		break;
   4689 	}
   4690 
   4691 	/* Init hardware bits */
   4692 	wm_initialize_hardware_bits(sc);
   4693 
   4694 	/* Reset the PHY. */
   4695 	if (sc->sc_flags & WM_F_HAS_MII)
   4696 		wm_gmii_reset(sc);
   4697 
   4698 	/* Calculate (E)ITR value */
   4699 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4700 		sc->sc_itr = 450;	/* For EITR */
   4701 	} else if (sc->sc_type >= WM_T_82543) {
   4702 		/*
   4703 		 * Set up the interrupt throttling register (units of 256ns)
   4704 		 * Note that a footnote in Intel's documentation says this
   4705 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4706 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4707 		 * that that is also true for the 1024ns units of the other
   4708 		 * interrupt-related timer registers -- so, really, we ought
   4709 		 * to divide this value by 4 when the link speed is low.
   4710 		 *
   4711 		 * XXX implement this division at link speed change!
   4712 		 */
   4713 
   4714 		/*
   4715 		 * For N interrupts/sec, set this value to:
   4716 		 * 1000000000 / (N * 256).  Note that we set the
   4717 		 * absolute and packet timer values to this value
   4718 		 * divided by 4 to get "simple timer" behavior.
   4719 		 */
   4720 
   4721 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4722 	}
   4723 
   4724 	error = wm_init_txrx_queues(sc);
   4725 	if (error)
   4726 		goto out;
   4727 
   4728 	/*
   4729 	 * Clear out the VLAN table -- we don't use it (yet).
   4730 	 */
   4731 	CSR_WRITE(sc, WMREG_VET, 0);
   4732 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4733 		trynum = 10; /* Due to hw errata */
   4734 	else
   4735 		trynum = 1;
   4736 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4737 		for (j = 0; j < trynum; j++)
   4738 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4739 
   4740 	/*
   4741 	 * Set up flow-control parameters.
   4742 	 *
   4743 	 * XXX Values could probably stand some tuning.
   4744 	 */
   4745 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4746 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4747 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4748 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4749 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4750 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4751 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4752 	}
   4753 
   4754 	sc->sc_fcrtl = FCRTL_DFLT;
   4755 	if (sc->sc_type < WM_T_82543) {
   4756 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4757 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4758 	} else {
   4759 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4760 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4761 	}
   4762 
   4763 	if (sc->sc_type == WM_T_80003)
   4764 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4765 	else
   4766 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4767 
   4768 	/* Writes the control register. */
   4769 	wm_set_vlan(sc);
   4770 
   4771 	if (sc->sc_flags & WM_F_HAS_MII) {
   4772 		int val;
   4773 
   4774 		switch (sc->sc_type) {
   4775 		case WM_T_80003:
   4776 		case WM_T_ICH8:
   4777 		case WM_T_ICH9:
   4778 		case WM_T_ICH10:
   4779 		case WM_T_PCH:
   4780 		case WM_T_PCH2:
   4781 		case WM_T_PCH_LPT:
   4782 		case WM_T_PCH_SPT:
   4783 			/*
   4784 			 * Set the mac to wait the maximum time between each
   4785 			 * iteration and increase the max iterations when
   4786 			 * polling the phy; this fixes erroneous timeouts at
   4787 			 * 10Mbps.
   4788 			 */
   4789 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4790 			    0xFFFF);
   4791 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4792 			val |= 0x3F;
   4793 			wm_kmrn_writereg(sc,
   4794 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4795 			break;
   4796 		default:
   4797 			break;
   4798 		}
   4799 
   4800 		if (sc->sc_type == WM_T_80003) {
   4801 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4802 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4803 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4804 
   4805 			/* Bypass RX and TX FIFO's */
   4806 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4807 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4808 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4809 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4810 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4811 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4812 		}
   4813 	}
   4814 #if 0
   4815 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4816 #endif
   4817 
   4818 	/* Set up checksum offload parameters. */
   4819 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4820 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4821 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4822 		reg |= RXCSUM_IPOFL;
   4823 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4824 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4825 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4826 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4827 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4828 
   4829 	/* Set up MSI-X */
   4830 	if (sc->sc_nintrs > 1) {
   4831 		uint32_t ivar;
   4832 		struct wm_queue *wmq;
   4833 		int qid, qintr_idx;
   4834 
   4835 		if (sc->sc_type == WM_T_82575) {
   4836 			/* Interrupt control */
   4837 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4838 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4839 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4840 
   4841 			/* TX and RX */
   4842 			for (i = 0; i < sc->sc_nqueues; i++) {
   4843 				wmq = &sc->sc_queue[i];
   4844 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4845 				    EITR_TX_QUEUE(wmq->wmq_id)
   4846 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4847 			}
   4848 			/* Link status */
   4849 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4850 			    EITR_OTHER);
   4851 		} else if (sc->sc_type == WM_T_82574) {
   4852 			/* Interrupt control */
   4853 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4854 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4855 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4856 
   4857 			ivar = 0;
   4858 			/* TX and RX */
   4859 			for (i = 0; i < sc->sc_nqueues; i++) {
   4860 				wmq = &sc->sc_queue[i];
   4861 				qid = wmq->wmq_id;
   4862 				qintr_idx = wmq->wmq_intr_idx;
   4863 
   4864 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4865 				    IVAR_TX_MASK_Q_82574(qid));
   4866 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4867 				    IVAR_RX_MASK_Q_82574(qid));
   4868 			}
   4869 			/* Link status */
   4870 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4871 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4872 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4873 		} else {
   4874 			/* Interrupt control */
   4875 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4876 			    | GPIE_EIAME | GPIE_PBA);
   4877 
   4878 			switch (sc->sc_type) {
   4879 			case WM_T_82580:
   4880 			case WM_T_I350:
   4881 			case WM_T_I354:
   4882 			case WM_T_I210:
   4883 			case WM_T_I211:
   4884 				/* TX and RX */
   4885 				for (i = 0; i < sc->sc_nqueues; i++) {
   4886 					wmq = &sc->sc_queue[i];
   4887 					qid = wmq->wmq_id;
   4888 					qintr_idx = wmq->wmq_intr_idx;
   4889 
   4890 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4891 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4892 					ivar |= __SHIFTIN((qintr_idx
   4893 						| IVAR_VALID),
   4894 					    IVAR_TX_MASK_Q(qid));
   4895 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4896 					ivar |= __SHIFTIN((qintr_idx
   4897 						| IVAR_VALID),
   4898 					    IVAR_RX_MASK_Q(qid));
   4899 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4900 				}
   4901 				break;
   4902 			case WM_T_82576:
   4903 				/* TX and RX */
   4904 				for (i = 0; i < sc->sc_nqueues; i++) {
   4905 					wmq = &sc->sc_queue[i];
   4906 					qid = wmq->wmq_id;
   4907 					qintr_idx = wmq->wmq_intr_idx;
   4908 
   4909 					ivar = CSR_READ(sc,
   4910 					    WMREG_IVAR_Q_82576(qid));
   4911 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4912 					ivar |= __SHIFTIN((qintr_idx
   4913 						| IVAR_VALID),
   4914 					    IVAR_TX_MASK_Q_82576(qid));
   4915 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4916 					ivar |= __SHIFTIN((qintr_idx
   4917 						| IVAR_VALID),
   4918 					    IVAR_RX_MASK_Q_82576(qid));
   4919 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4920 					    ivar);
   4921 				}
   4922 				break;
   4923 			default:
   4924 				break;
   4925 			}
   4926 
   4927 			/* Link status */
   4928 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4929 			    IVAR_MISC_OTHER);
   4930 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4931 		}
   4932 
   4933 		if (sc->sc_nqueues > 1) {
   4934 			wm_init_rss(sc);
   4935 
   4936 			/*
   4937 			** NOTE: Receive Full-Packet Checksum Offload
   4938 			** is mutually exclusive with Multiqueue. However
   4939 			** this is not the same as TCP/IP checksums which
   4940 			** still work.
   4941 			*/
   4942 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4943 			reg |= RXCSUM_PCSD;
   4944 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4945 		}
   4946 	}
   4947 
   4948 	/* Set up the interrupt registers. */
   4949 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4950 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4951 	    ICR_RXO | ICR_RXT0;
   4952 	if (sc->sc_nintrs > 1) {
   4953 		uint32_t mask;
   4954 		struct wm_queue *wmq;
   4955 
   4956 		switch (sc->sc_type) {
   4957 		case WM_T_82574:
   4958 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4959 			    WMREG_EIAC_82574_MSIX_MASK);
   4960 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4961 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4962 			break;
   4963 		default:
   4964 			if (sc->sc_type == WM_T_82575) {
   4965 				mask = 0;
   4966 				for (i = 0; i < sc->sc_nqueues; i++) {
   4967 					wmq = &sc->sc_queue[i];
   4968 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   4969 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   4970 				}
   4971 				mask |= EITR_OTHER;
   4972 			} else {
   4973 				mask = 0;
   4974 				for (i = 0; i < sc->sc_nqueues; i++) {
   4975 					wmq = &sc->sc_queue[i];
   4976 					mask |= 1 << wmq->wmq_intr_idx;
   4977 				}
   4978 				mask |= 1 << sc->sc_link_intr_idx;
   4979 			}
   4980 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4981 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4982 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4983 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4984 			break;
   4985 		}
   4986 	} else
   4987 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4988 
   4989 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4990 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4991 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4992 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4993 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4994 		reg |= KABGTXD_BGSQLBIAS;
   4995 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4996 	}
   4997 
   4998 	/* Set up the inter-packet gap. */
   4999 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5000 
   5001 	if (sc->sc_type >= WM_T_82543) {
   5002 		/*
   5003 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5004 		 * the multi queue function with MSI-X.
   5005 		 */
   5006 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5007 			int qidx;
   5008 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5009 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5010 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5011 				    sc->sc_itr);
   5012 			}
   5013 			/*
   5014 			 * Link interrupts occur much less than TX
   5015 			 * interrupts and RX interrupts. So, we don't
   5016 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5017 			 * FreeBSD's if_igb.
   5018 			 */
   5019 		} else
   5020 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5021 	}
   5022 
   5023 	/* Set the VLAN ethernetype. */
   5024 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5025 
   5026 	/*
   5027 	 * Set up the transmit control register; we start out with
   5028 	 * a collision distance suitable for FDX, but update it whe
   5029 	 * we resolve the media type.
   5030 	 */
   5031 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5032 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5033 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5034 	if (sc->sc_type >= WM_T_82571)
   5035 		sc->sc_tctl |= TCTL_MULR;
   5036 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5037 
   5038 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5039 		/* Write TDT after TCTL.EN is set. See the document. */
   5040 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5041 	}
   5042 
   5043 	if (sc->sc_type == WM_T_80003) {
   5044 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5045 		reg &= ~TCTL_EXT_GCEX_MASK;
   5046 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5047 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5048 	}
   5049 
   5050 	/* Set the media. */
   5051 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5052 		goto out;
   5053 
   5054 	/* Configure for OS presence */
   5055 	wm_init_manageability(sc);
   5056 
   5057 	/*
   5058 	 * Set up the receive control register; we actually program
   5059 	 * the register when we set the receive filter.  Use multicast
   5060 	 * address offset type 0.
   5061 	 *
   5062 	 * Only the i82544 has the ability to strip the incoming
   5063 	 * CRC, so we don't enable that feature.
   5064 	 */
   5065 	sc->sc_mchash_type = 0;
   5066 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5067 	    | RCTL_MO(sc->sc_mchash_type);
   5068 
   5069 	/*
   5070 	 * The I350 has a bug where it always strips the CRC whether
   5071 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5072 	 */
   5073 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5074 	    || (sc->sc_type == WM_T_I210))
   5075 		sc->sc_rctl |= RCTL_SECRC;
   5076 
   5077 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5078 	    && (ifp->if_mtu > ETHERMTU)) {
   5079 		sc->sc_rctl |= RCTL_LPE;
   5080 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5081 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5082 	}
   5083 
   5084 	if (MCLBYTES == 2048) {
   5085 		sc->sc_rctl |= RCTL_2k;
   5086 	} else {
   5087 		if (sc->sc_type >= WM_T_82543) {
   5088 			switch (MCLBYTES) {
   5089 			case 4096:
   5090 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5091 				break;
   5092 			case 8192:
   5093 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5094 				break;
   5095 			case 16384:
   5096 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5097 				break;
   5098 			default:
   5099 				panic("wm_init: MCLBYTES %d unsupported",
   5100 				    MCLBYTES);
   5101 				break;
   5102 			}
   5103 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5104 	}
   5105 
   5106 	/* Set the receive filter. */
   5107 	wm_set_filter(sc);
   5108 
   5109 	/* Enable ECC */
   5110 	switch (sc->sc_type) {
   5111 	case WM_T_82571:
   5112 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5113 		reg |= PBA_ECC_CORR_EN;
   5114 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5115 		break;
   5116 	case WM_T_PCH_LPT:
   5117 	case WM_T_PCH_SPT:
   5118 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5119 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5120 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5121 
   5122 		reg = CSR_READ(sc, WMREG_CTRL);
   5123 		reg |= CTRL_MEHE;
   5124 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5125 		break;
   5126 	default:
   5127 		break;
   5128 	}
   5129 
   5130 	/* On 575 and later set RDT only if RX enabled */
   5131 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5132 		int qidx;
   5133 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5134 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5135 			for (i = 0; i < WM_NRXDESC; i++) {
   5136 				mutex_enter(rxq->rxq_lock);
   5137 				wm_init_rxdesc(rxq, i);
   5138 				mutex_exit(rxq->rxq_lock);
   5139 
   5140 			}
   5141 		}
   5142 	}
   5143 
   5144 	wm_turnon(sc);
   5145 
   5146 	/* Start the one second link check clock. */
   5147 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5148 
   5149 	/* ...all done! */
   5150 	ifp->if_flags |= IFF_RUNNING;
   5151 	ifp->if_flags &= ~IFF_OACTIVE;
   5152 
   5153  out:
   5154 	sc->sc_if_flags = ifp->if_flags;
   5155 	if (error)
   5156 		log(LOG_ERR, "%s: interface not running\n",
   5157 		    device_xname(sc->sc_dev));
   5158 	return error;
   5159 }
   5160 
   5161 /*
   5162  * wm_stop:		[ifnet interface function]
   5163  *
   5164  *	Stop transmission on the interface.
   5165  */
   5166 static void
   5167 wm_stop(struct ifnet *ifp, int disable)
   5168 {
   5169 	struct wm_softc *sc = ifp->if_softc;
   5170 
   5171 	WM_CORE_LOCK(sc);
   5172 	wm_stop_locked(ifp, disable);
   5173 	WM_CORE_UNLOCK(sc);
   5174 }
   5175 
   5176 static void
   5177 wm_stop_locked(struct ifnet *ifp, int disable)
   5178 {
   5179 	struct wm_softc *sc = ifp->if_softc;
   5180 	struct wm_txsoft *txs;
   5181 	int i, qidx;
   5182 
   5183 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5184 		device_xname(sc->sc_dev), __func__));
   5185 	KASSERT(WM_CORE_LOCKED(sc));
   5186 
   5187 	wm_turnoff(sc);
   5188 
   5189 	/* Stop the one second clock. */
   5190 	callout_stop(&sc->sc_tick_ch);
   5191 
   5192 	/* Stop the 82547 Tx FIFO stall check timer. */
   5193 	if (sc->sc_type == WM_T_82547)
   5194 		callout_stop(&sc->sc_txfifo_ch);
   5195 
   5196 	if (sc->sc_flags & WM_F_HAS_MII) {
   5197 		/* Down the MII. */
   5198 		mii_down(&sc->sc_mii);
   5199 	} else {
   5200 #if 0
   5201 		/* Should we clear PHY's status properly? */
   5202 		wm_reset(sc);
   5203 #endif
   5204 	}
   5205 
   5206 	/* Stop the transmit and receive processes. */
   5207 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5208 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5209 	sc->sc_rctl &= ~RCTL_EN;
   5210 
   5211 	/*
   5212 	 * Clear the interrupt mask to ensure the device cannot assert its
   5213 	 * interrupt line.
   5214 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5215 	 * service any currently pending or shared interrupt.
   5216 	 */
   5217 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5218 	sc->sc_icr = 0;
   5219 	if (sc->sc_nintrs > 1) {
   5220 		if (sc->sc_type != WM_T_82574) {
   5221 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5222 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5223 		} else
   5224 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5225 	}
   5226 
   5227 	/* Release any queued transmit buffers. */
   5228 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5229 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5230 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5231 		mutex_enter(txq->txq_lock);
   5232 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5233 			txs = &txq->txq_soft[i];
   5234 			if (txs->txs_mbuf != NULL) {
   5235 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5236 				m_freem(txs->txs_mbuf);
   5237 				txs->txs_mbuf = NULL;
   5238 			}
   5239 		}
   5240 		if (sc->sc_type == WM_T_PCH_SPT) {
   5241 			pcireg_t preg;
   5242 			uint32_t reg;
   5243 			int nexttx;
   5244 
   5245 			/* First, disable MULR fix in FEXTNVM11 */
   5246 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5247 			reg |= FEXTNVM11_DIS_MULRFIX;
   5248 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5249 
   5250 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5251 			    WM_PCI_DESCRING_STATUS);
   5252 			reg = CSR_READ(sc, WMREG_TDLEN(0));
   5253 			printf("XXX RST: FLUSH = %08x, len = %u\n",
   5254 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
   5255 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
   5256 			    && (reg != 0)) {
   5257 				/* TX */
   5258 				printf("XXX need TX flush (reg = %08x)\n",
   5259 				    preg);
   5260 				wm_init_tx_descs(sc, txq);
   5261 				wm_init_tx_regs(sc, wmq, txq);
   5262 				nexttx = txq->txq_next;
   5263 				wm_set_dma_addr(
   5264 					&txq->txq_descs[nexttx].wtx_addr,
   5265 					WM_CDTXADDR(txq, nexttx));
   5266 				txq->txq_descs[nexttx].wtx_cmdlen
   5267 				    = htole32(WTX_CMD_IFCS | 512);
   5268 				wm_cdtxsync(txq, nexttx, 1,
   5269 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5270 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5271 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5272 				CSR_WRITE_FLUSH(sc);
   5273 				delay(250);
   5274 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5275 			}
   5276 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5277 			    WM_PCI_DESCRING_STATUS);
   5278 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5279 				/* RX */
   5280 				printf("XXX need RX flush\n");
   5281 			}
   5282 		}
   5283 		mutex_exit(txq->txq_lock);
   5284 	}
   5285 
   5286 	/* Mark the interface as down and cancel the watchdog timer. */
   5287 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5288 	ifp->if_timer = 0;
   5289 
   5290 	if (disable) {
   5291 		for (i = 0; i < sc->sc_nqueues; i++) {
   5292 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5293 			mutex_enter(rxq->rxq_lock);
   5294 			wm_rxdrain(rxq);
   5295 			mutex_exit(rxq->rxq_lock);
   5296 		}
   5297 	}
   5298 
   5299 #if 0 /* notyet */
   5300 	if (sc->sc_type >= WM_T_82544)
   5301 		CSR_WRITE(sc, WMREG_WUC, 0);
   5302 #endif
   5303 }
   5304 
   5305 static void
   5306 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5307 {
   5308 	struct mbuf *m;
   5309 	int i;
   5310 
   5311 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5312 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5313 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5314 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5315 		    m->m_data, m->m_len, m->m_flags);
   5316 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5317 	    i, i == 1 ? "" : "s");
   5318 }
   5319 
   5320 /*
   5321  * wm_82547_txfifo_stall:
   5322  *
   5323  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5324  *	reset the FIFO pointers, and restart packet transmission.
   5325  */
   5326 static void
   5327 wm_82547_txfifo_stall(void *arg)
   5328 {
   5329 	struct wm_softc *sc = arg;
   5330 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5331 
   5332 	mutex_enter(txq->txq_lock);
   5333 
   5334 	if (txq->txq_stopping)
   5335 		goto out;
   5336 
   5337 	if (txq->txq_fifo_stall) {
   5338 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5339 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5340 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5341 			/*
   5342 			 * Packets have drained.  Stop transmitter, reset
   5343 			 * FIFO pointers, restart transmitter, and kick
   5344 			 * the packet queue.
   5345 			 */
   5346 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5347 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5348 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5349 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5350 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5351 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5352 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5353 			CSR_WRITE_FLUSH(sc);
   5354 
   5355 			txq->txq_fifo_head = 0;
   5356 			txq->txq_fifo_stall = 0;
   5357 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5358 		} else {
   5359 			/*
   5360 			 * Still waiting for packets to drain; try again in
   5361 			 * another tick.
   5362 			 */
   5363 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5364 		}
   5365 	}
   5366 
   5367 out:
   5368 	mutex_exit(txq->txq_lock);
   5369 }
   5370 
   5371 /*
   5372  * wm_82547_txfifo_bugchk:
   5373  *
   5374  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5375  *	prevent enqueueing a packet that would wrap around the end
   5376  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5377  *
   5378  *	We do this by checking the amount of space before the end
   5379  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5380  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5381  *	the internal FIFO pointers to the beginning, and restart
   5382  *	transmission on the interface.
   5383  */
   5384 #define	WM_FIFO_HDR		0x10
   5385 #define	WM_82547_PAD_LEN	0x3e0
   5386 static int
   5387 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5388 {
   5389 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5390 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5391 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5392 
   5393 	/* Just return if already stalled. */
   5394 	if (txq->txq_fifo_stall)
   5395 		return 1;
   5396 
   5397 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5398 		/* Stall only occurs in half-duplex mode. */
   5399 		goto send_packet;
   5400 	}
   5401 
   5402 	if (len >= WM_82547_PAD_LEN + space) {
   5403 		txq->txq_fifo_stall = 1;
   5404 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5405 		return 1;
   5406 	}
   5407 
   5408  send_packet:
   5409 	txq->txq_fifo_head += len;
   5410 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5411 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5412 
   5413 	return 0;
   5414 }
   5415 
   5416 static int
   5417 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5418 {
   5419 	int error;
   5420 
   5421 	/*
   5422 	 * Allocate the control data structures, and create and load the
   5423 	 * DMA map for it.
   5424 	 *
   5425 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5426 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5427 	 * both sets within the same 4G segment.
   5428 	 */
   5429 	if (sc->sc_type < WM_T_82544)
   5430 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5431 	else
   5432 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5433 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5434 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5435 	else
   5436 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5437 
   5438 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5439 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5440 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5441 		aprint_error_dev(sc->sc_dev,
   5442 		    "unable to allocate TX control data, error = %d\n",
   5443 		    error);
   5444 		goto fail_0;
   5445 	}
   5446 
   5447 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5448 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5449 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5450 		aprint_error_dev(sc->sc_dev,
   5451 		    "unable to map TX control data, error = %d\n", error);
   5452 		goto fail_1;
   5453 	}
   5454 
   5455 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5456 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5457 		aprint_error_dev(sc->sc_dev,
   5458 		    "unable to create TX control data DMA map, error = %d\n",
   5459 		    error);
   5460 		goto fail_2;
   5461 	}
   5462 
   5463 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5464 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5465 		aprint_error_dev(sc->sc_dev,
   5466 		    "unable to load TX control data DMA map, error = %d\n",
   5467 		    error);
   5468 		goto fail_3;
   5469 	}
   5470 
   5471 	return 0;
   5472 
   5473  fail_3:
   5474 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5475  fail_2:
   5476 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5477 	    WM_TXDESCS_SIZE(txq));
   5478  fail_1:
   5479 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5480  fail_0:
   5481 	return error;
   5482 }
   5483 
   5484 static void
   5485 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5486 {
   5487 
   5488 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5489 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5490 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5491 	    WM_TXDESCS_SIZE(txq));
   5492 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5493 }
   5494 
   5495 static int
   5496 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5497 {
   5498 	int error;
   5499 
   5500 	/*
   5501 	 * Allocate the control data structures, and create and load the
   5502 	 * DMA map for it.
   5503 	 *
   5504 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5505 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5506 	 * both sets within the same 4G segment.
   5507 	 */
   5508 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5509 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5510 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5511 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5512 		aprint_error_dev(sc->sc_dev,
   5513 		    "unable to allocate RX control data, error = %d\n",
   5514 		    error);
   5515 		goto fail_0;
   5516 	}
   5517 
   5518 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5519 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5520 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5521 		aprint_error_dev(sc->sc_dev,
   5522 		    "unable to map RX control data, error = %d\n", error);
   5523 		goto fail_1;
   5524 	}
   5525 
   5526 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5527 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5528 		aprint_error_dev(sc->sc_dev,
   5529 		    "unable to create RX control data DMA map, error = %d\n",
   5530 		    error);
   5531 		goto fail_2;
   5532 	}
   5533 
   5534 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5535 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5536 		aprint_error_dev(sc->sc_dev,
   5537 		    "unable to load RX control data DMA map, error = %d\n",
   5538 		    error);
   5539 		goto fail_3;
   5540 	}
   5541 
   5542 	return 0;
   5543 
   5544  fail_3:
   5545 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5546  fail_2:
   5547 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5548 	    rxq->rxq_desc_size);
   5549  fail_1:
   5550 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5551  fail_0:
   5552 	return error;
   5553 }
   5554 
   5555 static void
   5556 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5557 {
   5558 
   5559 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5560 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5561 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5562 	    rxq->rxq_desc_size);
   5563 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5564 }
   5565 
   5566 
   5567 static int
   5568 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5569 {
   5570 	int i, error;
   5571 
   5572 	/* Create the transmit buffer DMA maps. */
   5573 	WM_TXQUEUELEN(txq) =
   5574 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5575 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5576 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5577 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5578 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5579 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5580 			aprint_error_dev(sc->sc_dev,
   5581 			    "unable to create Tx DMA map %d, error = %d\n",
   5582 			    i, error);
   5583 			goto fail;
   5584 		}
   5585 	}
   5586 
   5587 	return 0;
   5588 
   5589  fail:
   5590 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5591 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5592 			bus_dmamap_destroy(sc->sc_dmat,
   5593 			    txq->txq_soft[i].txs_dmamap);
   5594 	}
   5595 	return error;
   5596 }
   5597 
   5598 static void
   5599 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5600 {
   5601 	int i;
   5602 
   5603 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5604 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5605 			bus_dmamap_destroy(sc->sc_dmat,
   5606 			    txq->txq_soft[i].txs_dmamap);
   5607 	}
   5608 }
   5609 
   5610 static int
   5611 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5612 {
   5613 	int i, error;
   5614 
   5615 	/* Create the receive buffer DMA maps. */
   5616 	for (i = 0; i < WM_NRXDESC; i++) {
   5617 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5618 			    MCLBYTES, 0, 0,
   5619 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5620 			aprint_error_dev(sc->sc_dev,
   5621 			    "unable to create Rx DMA map %d error = %d\n",
   5622 			    i, error);
   5623 			goto fail;
   5624 		}
   5625 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5626 	}
   5627 
   5628 	return 0;
   5629 
   5630  fail:
   5631 	for (i = 0; i < WM_NRXDESC; i++) {
   5632 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5633 			bus_dmamap_destroy(sc->sc_dmat,
   5634 			    rxq->rxq_soft[i].rxs_dmamap);
   5635 	}
   5636 	return error;
   5637 }
   5638 
   5639 static void
   5640 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5641 {
   5642 	int i;
   5643 
   5644 	for (i = 0; i < WM_NRXDESC; i++) {
   5645 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5646 			bus_dmamap_destroy(sc->sc_dmat,
   5647 			    rxq->rxq_soft[i].rxs_dmamap);
   5648 	}
   5649 }
   5650 
   5651 /*
   5652  * wm_alloc_quques:
   5653  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5654  */
   5655 static int
   5656 wm_alloc_txrx_queues(struct wm_softc *sc)
   5657 {
   5658 	int i, error, tx_done, rx_done;
   5659 
   5660 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5661 	    KM_SLEEP);
   5662 	if (sc->sc_queue == NULL) {
   5663 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5664 		error = ENOMEM;
   5665 		goto fail_0;
   5666 	}
   5667 
   5668 	/*
   5669 	 * For transmission
   5670 	 */
   5671 	error = 0;
   5672 	tx_done = 0;
   5673 	for (i = 0; i < sc->sc_nqueues; i++) {
   5674 #ifdef WM_EVENT_COUNTERS
   5675 		int j;
   5676 		const char *xname;
   5677 #endif
   5678 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5679 		txq->txq_sc = sc;
   5680 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5681 
   5682 		error = wm_alloc_tx_descs(sc, txq);
   5683 		if (error)
   5684 			break;
   5685 		error = wm_alloc_tx_buffer(sc, txq);
   5686 		if (error) {
   5687 			wm_free_tx_descs(sc, txq);
   5688 			break;
   5689 		}
   5690 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5691 		if (txq->txq_interq == NULL) {
   5692 			wm_free_tx_descs(sc, txq);
   5693 			wm_free_tx_buffer(sc, txq);
   5694 			error = ENOMEM;
   5695 			break;
   5696 		}
   5697 
   5698 #ifdef WM_EVENT_COUNTERS
   5699 		xname = device_xname(sc->sc_dev);
   5700 
   5701 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5702 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5703 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5704 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5705 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5706 
   5707 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5708 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5709 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5710 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5711 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5712 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5713 
   5714 		for (j = 0; j < WM_NTXSEGS; j++) {
   5715 			snprintf(txq->txq_txseg_evcnt_names[j],
   5716 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5717 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5718 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5719 		}
   5720 
   5721 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5722 
   5723 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5724 #endif /* WM_EVENT_COUNTERS */
   5725 
   5726 		tx_done++;
   5727 	}
   5728 	if (error)
   5729 		goto fail_1;
   5730 
   5731 	/*
   5732 	 * For recieve
   5733 	 */
   5734 	error = 0;
   5735 	rx_done = 0;
   5736 	for (i = 0; i < sc->sc_nqueues; i++) {
   5737 #ifdef WM_EVENT_COUNTERS
   5738 		const char *xname;
   5739 #endif
   5740 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5741 		rxq->rxq_sc = sc;
   5742 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5743 
   5744 		error = wm_alloc_rx_descs(sc, rxq);
   5745 		if (error)
   5746 			break;
   5747 
   5748 		error = wm_alloc_rx_buffer(sc, rxq);
   5749 		if (error) {
   5750 			wm_free_rx_descs(sc, rxq);
   5751 			break;
   5752 		}
   5753 
   5754 #ifdef WM_EVENT_COUNTERS
   5755 		xname = device_xname(sc->sc_dev);
   5756 
   5757 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5758 
   5759 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5760 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5761 #endif /* WM_EVENT_COUNTERS */
   5762 
   5763 		rx_done++;
   5764 	}
   5765 	if (error)
   5766 		goto fail_2;
   5767 
   5768 	return 0;
   5769 
   5770  fail_2:
   5771 	for (i = 0; i < rx_done; i++) {
   5772 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5773 		wm_free_rx_buffer(sc, rxq);
   5774 		wm_free_rx_descs(sc, rxq);
   5775 		if (rxq->rxq_lock)
   5776 			mutex_obj_free(rxq->rxq_lock);
   5777 	}
   5778  fail_1:
   5779 	for (i = 0; i < tx_done; i++) {
   5780 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5781 		pcq_destroy(txq->txq_interq);
   5782 		wm_free_tx_buffer(sc, txq);
   5783 		wm_free_tx_descs(sc, txq);
   5784 		if (txq->txq_lock)
   5785 			mutex_obj_free(txq->txq_lock);
   5786 	}
   5787 
   5788 	kmem_free(sc->sc_queue,
   5789 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5790  fail_0:
   5791 	return error;
   5792 }
   5793 
   5794 /*
   5795  * wm_free_quques:
   5796  *	Free {tx,rx}descs and {tx,rx} buffers
   5797  */
   5798 static void
   5799 wm_free_txrx_queues(struct wm_softc *sc)
   5800 {
   5801 	int i;
   5802 
   5803 	for (i = 0; i < sc->sc_nqueues; i++) {
   5804 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5805 		wm_free_rx_buffer(sc, rxq);
   5806 		wm_free_rx_descs(sc, rxq);
   5807 		if (rxq->rxq_lock)
   5808 			mutex_obj_free(rxq->rxq_lock);
   5809 	}
   5810 
   5811 	for (i = 0; i < sc->sc_nqueues; i++) {
   5812 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5813 		wm_free_tx_buffer(sc, txq);
   5814 		wm_free_tx_descs(sc, txq);
   5815 		if (txq->txq_lock)
   5816 			mutex_obj_free(txq->txq_lock);
   5817 	}
   5818 
   5819 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5820 }
   5821 
   5822 static void
   5823 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5824 {
   5825 
   5826 	KASSERT(mutex_owned(txq->txq_lock));
   5827 
   5828 	/* Initialize the transmit descriptor ring. */
   5829 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5830 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5831 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5832 	txq->txq_free = WM_NTXDESC(txq);
   5833 	txq->txq_next = 0;
   5834 }
   5835 
   5836 static void
   5837 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5838     struct wm_txqueue *txq)
   5839 {
   5840 
   5841 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5842 		device_xname(sc->sc_dev), __func__));
   5843 	KASSERT(mutex_owned(txq->txq_lock));
   5844 
   5845 	if (sc->sc_type < WM_T_82543) {
   5846 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5847 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5848 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5849 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5850 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5851 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5852 	} else {
   5853 		int qid = wmq->wmq_id;
   5854 
   5855 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5856 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5857 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5858 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5859 
   5860 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5861 			/*
   5862 			 * Don't write TDT before TCTL.EN is set.
   5863 			 * See the document.
   5864 			 */
   5865 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5866 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5867 			    | TXDCTL_WTHRESH(0));
   5868 		else {
   5869 			/* ITR / 4 */
   5870 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5871 			if (sc->sc_type >= WM_T_82540) {
   5872 				/* should be same */
   5873 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5874 			}
   5875 
   5876 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5877 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5878 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5879 		}
   5880 	}
   5881 }
   5882 
   5883 static void
   5884 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5885 {
   5886 	int i;
   5887 
   5888 	KASSERT(mutex_owned(txq->txq_lock));
   5889 
   5890 	/* Initialize the transmit job descriptors. */
   5891 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5892 		txq->txq_soft[i].txs_mbuf = NULL;
   5893 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5894 	txq->txq_snext = 0;
   5895 	txq->txq_sdirty = 0;
   5896 }
   5897 
   5898 static void
   5899 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5900     struct wm_txqueue *txq)
   5901 {
   5902 
   5903 	KASSERT(mutex_owned(txq->txq_lock));
   5904 
   5905 	/*
   5906 	 * Set up some register offsets that are different between
   5907 	 * the i82542 and the i82543 and later chips.
   5908 	 */
   5909 	if (sc->sc_type < WM_T_82543)
   5910 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5911 	else
   5912 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5913 
   5914 	wm_init_tx_descs(sc, txq);
   5915 	wm_init_tx_regs(sc, wmq, txq);
   5916 	wm_init_tx_buffer(sc, txq);
   5917 }
   5918 
   5919 static void
   5920 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5921     struct wm_rxqueue *rxq)
   5922 {
   5923 
   5924 	KASSERT(mutex_owned(rxq->rxq_lock));
   5925 
   5926 	/*
   5927 	 * Initialize the receive descriptor and receive job
   5928 	 * descriptor rings.
   5929 	 */
   5930 	if (sc->sc_type < WM_T_82543) {
   5931 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5932 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5933 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5934 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5935 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5936 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5937 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5938 
   5939 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5940 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5941 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5942 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5943 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5944 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5945 	} else {
   5946 		int qid = wmq->wmq_id;
   5947 
   5948 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5949 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5950 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5951 
   5952 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5953 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5954 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5955 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5956 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5957 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5958 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5959 			    | RXDCTL_WTHRESH(1));
   5960 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5961 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5962 		} else {
   5963 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5964 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5965 			/* ITR / 4 */
   5966 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5967 			/* MUST be same */
   5968 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5969 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5970 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5971 		}
   5972 	}
   5973 }
   5974 
   5975 static int
   5976 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5977 {
   5978 	struct wm_rxsoft *rxs;
   5979 	int error, i;
   5980 
   5981 	KASSERT(mutex_owned(rxq->rxq_lock));
   5982 
   5983 	for (i = 0; i < WM_NRXDESC; i++) {
   5984 		rxs = &rxq->rxq_soft[i];
   5985 		if (rxs->rxs_mbuf == NULL) {
   5986 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5987 				log(LOG_ERR, "%s: unable to allocate or map "
   5988 				    "rx buffer %d, error = %d\n",
   5989 				    device_xname(sc->sc_dev), i, error);
   5990 				/*
   5991 				 * XXX Should attempt to run with fewer receive
   5992 				 * XXX buffers instead of just failing.
   5993 				 */
   5994 				wm_rxdrain(rxq);
   5995 				return ENOMEM;
   5996 			}
   5997 		} else {
   5998 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5999 				wm_init_rxdesc(rxq, i);
   6000 			/*
   6001 			 * For 82575 and newer device, the RX descriptors
   6002 			 * must be initialized after the setting of RCTL.EN in
   6003 			 * wm_set_filter()
   6004 			 */
   6005 		}
   6006 	}
   6007 	rxq->rxq_ptr = 0;
   6008 	rxq->rxq_discard = 0;
   6009 	WM_RXCHAIN_RESET(rxq);
   6010 
   6011 	return 0;
   6012 }
   6013 
   6014 static int
   6015 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6016     struct wm_rxqueue *rxq)
   6017 {
   6018 
   6019 	KASSERT(mutex_owned(rxq->rxq_lock));
   6020 
   6021 	/*
   6022 	 * Set up some register offsets that are different between
   6023 	 * the i82542 and the i82543 and later chips.
   6024 	 */
   6025 	if (sc->sc_type < WM_T_82543)
   6026 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6027 	else
   6028 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6029 
   6030 	wm_init_rx_regs(sc, wmq, rxq);
   6031 	return wm_init_rx_buffer(sc, rxq);
   6032 }
   6033 
   6034 /*
   6035  * wm_init_quques:
   6036  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6037  */
   6038 static int
   6039 wm_init_txrx_queues(struct wm_softc *sc)
   6040 {
   6041 	int i, error = 0;
   6042 
   6043 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6044 		device_xname(sc->sc_dev), __func__));
   6045 
   6046 	for (i = 0; i < sc->sc_nqueues; i++) {
   6047 		struct wm_queue *wmq = &sc->sc_queue[i];
   6048 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6049 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6050 
   6051 		mutex_enter(txq->txq_lock);
   6052 		wm_init_tx_queue(sc, wmq, txq);
   6053 		mutex_exit(txq->txq_lock);
   6054 
   6055 		mutex_enter(rxq->rxq_lock);
   6056 		error = wm_init_rx_queue(sc, wmq, rxq);
   6057 		mutex_exit(rxq->rxq_lock);
   6058 		if (error)
   6059 			break;
   6060 	}
   6061 
   6062 	return error;
   6063 }
   6064 
   6065 /*
   6066  * wm_tx_offload:
   6067  *
   6068  *	Set up TCP/IP checksumming parameters for the
   6069  *	specified packet.
   6070  */
   6071 static int
   6072 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6073     uint8_t *fieldsp)
   6074 {
   6075 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6076 	struct mbuf *m0 = txs->txs_mbuf;
   6077 	struct livengood_tcpip_ctxdesc *t;
   6078 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6079 	uint32_t ipcse;
   6080 	struct ether_header *eh;
   6081 	int offset, iphl;
   6082 	uint8_t fields;
   6083 
   6084 	/*
   6085 	 * XXX It would be nice if the mbuf pkthdr had offset
   6086 	 * fields for the protocol headers.
   6087 	 */
   6088 
   6089 	eh = mtod(m0, struct ether_header *);
   6090 	switch (htons(eh->ether_type)) {
   6091 	case ETHERTYPE_IP:
   6092 	case ETHERTYPE_IPV6:
   6093 		offset = ETHER_HDR_LEN;
   6094 		break;
   6095 
   6096 	case ETHERTYPE_VLAN:
   6097 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6098 		break;
   6099 
   6100 	default:
   6101 		/*
   6102 		 * Don't support this protocol or encapsulation.
   6103 		 */
   6104 		*fieldsp = 0;
   6105 		*cmdp = 0;
   6106 		return 0;
   6107 	}
   6108 
   6109 	if ((m0->m_pkthdr.csum_flags &
   6110 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6111 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6112 	} else {
   6113 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6114 	}
   6115 	ipcse = offset + iphl - 1;
   6116 
   6117 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6118 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6119 	seg = 0;
   6120 	fields = 0;
   6121 
   6122 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6123 		int hlen = offset + iphl;
   6124 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6125 
   6126 		if (__predict_false(m0->m_len <
   6127 				    (hlen + sizeof(struct tcphdr)))) {
   6128 			/*
   6129 			 * TCP/IP headers are not in the first mbuf; we need
   6130 			 * to do this the slow and painful way.  Let's just
   6131 			 * hope this doesn't happen very often.
   6132 			 */
   6133 			struct tcphdr th;
   6134 
   6135 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6136 
   6137 			m_copydata(m0, hlen, sizeof(th), &th);
   6138 			if (v4) {
   6139 				struct ip ip;
   6140 
   6141 				m_copydata(m0, offset, sizeof(ip), &ip);
   6142 				ip.ip_len = 0;
   6143 				m_copyback(m0,
   6144 				    offset + offsetof(struct ip, ip_len),
   6145 				    sizeof(ip.ip_len), &ip.ip_len);
   6146 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6147 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6148 			} else {
   6149 				struct ip6_hdr ip6;
   6150 
   6151 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6152 				ip6.ip6_plen = 0;
   6153 				m_copyback(m0,
   6154 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6155 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6156 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6157 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6158 			}
   6159 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6160 			    sizeof(th.th_sum), &th.th_sum);
   6161 
   6162 			hlen += th.th_off << 2;
   6163 		} else {
   6164 			/*
   6165 			 * TCP/IP headers are in the first mbuf; we can do
   6166 			 * this the easy way.
   6167 			 */
   6168 			struct tcphdr *th;
   6169 
   6170 			if (v4) {
   6171 				struct ip *ip =
   6172 				    (void *)(mtod(m0, char *) + offset);
   6173 				th = (void *)(mtod(m0, char *) + hlen);
   6174 
   6175 				ip->ip_len = 0;
   6176 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6177 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6178 			} else {
   6179 				struct ip6_hdr *ip6 =
   6180 				    (void *)(mtod(m0, char *) + offset);
   6181 				th = (void *)(mtod(m0, char *) + hlen);
   6182 
   6183 				ip6->ip6_plen = 0;
   6184 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6185 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6186 			}
   6187 			hlen += th->th_off << 2;
   6188 		}
   6189 
   6190 		if (v4) {
   6191 			WM_Q_EVCNT_INCR(txq, txtso);
   6192 			cmdlen |= WTX_TCPIP_CMD_IP;
   6193 		} else {
   6194 			WM_Q_EVCNT_INCR(txq, txtso6);
   6195 			ipcse = 0;
   6196 		}
   6197 		cmd |= WTX_TCPIP_CMD_TSE;
   6198 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6199 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6200 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6201 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6202 	}
   6203 
   6204 	/*
   6205 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6206 	 * offload feature, if we load the context descriptor, we
   6207 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6208 	 */
   6209 
   6210 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6211 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6212 	    WTX_TCPIP_IPCSE(ipcse);
   6213 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6214 		WM_Q_EVCNT_INCR(txq, txipsum);
   6215 		fields |= WTX_IXSM;
   6216 	}
   6217 
   6218 	offset += iphl;
   6219 
   6220 	if (m0->m_pkthdr.csum_flags &
   6221 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6222 		WM_Q_EVCNT_INCR(txq, txtusum);
   6223 		fields |= WTX_TXSM;
   6224 		tucs = WTX_TCPIP_TUCSS(offset) |
   6225 		    WTX_TCPIP_TUCSO(offset +
   6226 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6227 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6228 	} else if ((m0->m_pkthdr.csum_flags &
   6229 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6230 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6231 		fields |= WTX_TXSM;
   6232 		tucs = WTX_TCPIP_TUCSS(offset) |
   6233 		    WTX_TCPIP_TUCSO(offset +
   6234 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6235 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6236 	} else {
   6237 		/* Just initialize it to a valid TCP context. */
   6238 		tucs = WTX_TCPIP_TUCSS(offset) |
   6239 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6240 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6241 	}
   6242 
   6243 	/* Fill in the context descriptor. */
   6244 	t = (struct livengood_tcpip_ctxdesc *)
   6245 	    &txq->txq_descs[txq->txq_next];
   6246 	t->tcpip_ipcs = htole32(ipcs);
   6247 	t->tcpip_tucs = htole32(tucs);
   6248 	t->tcpip_cmdlen = htole32(cmdlen);
   6249 	t->tcpip_seg = htole32(seg);
   6250 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6251 
   6252 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6253 	txs->txs_ndesc++;
   6254 
   6255 	*cmdp = cmd;
   6256 	*fieldsp = fields;
   6257 
   6258 	return 0;
   6259 }
   6260 
   6261 /*
   6262  * wm_start:		[ifnet interface function]
   6263  *
   6264  *	Start packet transmission on the interface.
   6265  */
   6266 static void
   6267 wm_start(struct ifnet *ifp)
   6268 {
   6269 	struct wm_softc *sc = ifp->if_softc;
   6270 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6271 
   6272 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6273 
   6274 	mutex_enter(txq->txq_lock);
   6275 	if (!txq->txq_stopping)
   6276 		wm_start_locked(ifp);
   6277 	mutex_exit(txq->txq_lock);
   6278 }
   6279 
   6280 static void
   6281 wm_start_locked(struct ifnet *ifp)
   6282 {
   6283 	struct wm_softc *sc = ifp->if_softc;
   6284 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6285 	struct mbuf *m0;
   6286 	struct m_tag *mtag;
   6287 	struct wm_txsoft *txs;
   6288 	bus_dmamap_t dmamap;
   6289 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6290 	bus_addr_t curaddr;
   6291 	bus_size_t seglen, curlen;
   6292 	uint32_t cksumcmd;
   6293 	uint8_t cksumfields;
   6294 
   6295 	KASSERT(mutex_owned(txq->txq_lock));
   6296 
   6297 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6298 		return;
   6299 
   6300 	/* Remember the previous number of free descriptors. */
   6301 	ofree = txq->txq_free;
   6302 
   6303 	/*
   6304 	 * Loop through the send queue, setting up transmit descriptors
   6305 	 * until we drain the queue, or use up all available transmit
   6306 	 * descriptors.
   6307 	 */
   6308 	for (;;) {
   6309 		m0 = NULL;
   6310 
   6311 		/* Get a work queue entry. */
   6312 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6313 			wm_txeof(sc, txq);
   6314 			if (txq->txq_sfree == 0) {
   6315 				DPRINTF(WM_DEBUG_TX,
   6316 				    ("%s: TX: no free job descriptors\n",
   6317 					device_xname(sc->sc_dev)));
   6318 				WM_Q_EVCNT_INCR(txq, txsstall);
   6319 				break;
   6320 			}
   6321 		}
   6322 
   6323 		/* Grab a packet off the queue. */
   6324 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6325 		if (m0 == NULL)
   6326 			break;
   6327 
   6328 		DPRINTF(WM_DEBUG_TX,
   6329 		    ("%s: TX: have packet to transmit: %p\n",
   6330 		    device_xname(sc->sc_dev), m0));
   6331 
   6332 		txs = &txq->txq_soft[txq->txq_snext];
   6333 		dmamap = txs->txs_dmamap;
   6334 
   6335 		use_tso = (m0->m_pkthdr.csum_flags &
   6336 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6337 
   6338 		/*
   6339 		 * So says the Linux driver:
   6340 		 * The controller does a simple calculation to make sure
   6341 		 * there is enough room in the FIFO before initiating the
   6342 		 * DMA for each buffer.  The calc is:
   6343 		 *	4 = ceil(buffer len / MSS)
   6344 		 * To make sure we don't overrun the FIFO, adjust the max
   6345 		 * buffer len if the MSS drops.
   6346 		 */
   6347 		dmamap->dm_maxsegsz =
   6348 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6349 		    ? m0->m_pkthdr.segsz << 2
   6350 		    : WTX_MAX_LEN;
   6351 
   6352 		/*
   6353 		 * Load the DMA map.  If this fails, the packet either
   6354 		 * didn't fit in the allotted number of segments, or we
   6355 		 * were short on resources.  For the too-many-segments
   6356 		 * case, we simply report an error and drop the packet,
   6357 		 * since we can't sanely copy a jumbo packet to a single
   6358 		 * buffer.
   6359 		 */
   6360 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6361 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6362 		if (error) {
   6363 			if (error == EFBIG) {
   6364 				WM_Q_EVCNT_INCR(txq, txdrop);
   6365 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6366 				    "DMA segments, dropping...\n",
   6367 				    device_xname(sc->sc_dev));
   6368 				wm_dump_mbuf_chain(sc, m0);
   6369 				m_freem(m0);
   6370 				continue;
   6371 			}
   6372 			/*  Short on resources, just stop for now. */
   6373 			DPRINTF(WM_DEBUG_TX,
   6374 			    ("%s: TX: dmamap load failed: %d\n",
   6375 			    device_xname(sc->sc_dev), error));
   6376 			break;
   6377 		}
   6378 
   6379 		segs_needed = dmamap->dm_nsegs;
   6380 		if (use_tso) {
   6381 			/* For sentinel descriptor; see below. */
   6382 			segs_needed++;
   6383 		}
   6384 
   6385 		/*
   6386 		 * Ensure we have enough descriptors free to describe
   6387 		 * the packet.  Note, we always reserve one descriptor
   6388 		 * at the end of the ring due to the semantics of the
   6389 		 * TDT register, plus one more in the event we need
   6390 		 * to load offload context.
   6391 		 */
   6392 		if (segs_needed > txq->txq_free - 2) {
   6393 			/*
   6394 			 * Not enough free descriptors to transmit this
   6395 			 * packet.  We haven't committed anything yet,
   6396 			 * so just unload the DMA map, put the packet
   6397 			 * pack on the queue, and punt.  Notify the upper
   6398 			 * layer that there are no more slots left.
   6399 			 */
   6400 			DPRINTF(WM_DEBUG_TX,
   6401 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6402 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6403 			    segs_needed, txq->txq_free - 1));
   6404 			ifp->if_flags |= IFF_OACTIVE;
   6405 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6406 			WM_Q_EVCNT_INCR(txq, txdstall);
   6407 			break;
   6408 		}
   6409 
   6410 		/*
   6411 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6412 		 * once we know we can transmit the packet, since we
   6413 		 * do some internal FIFO space accounting here.
   6414 		 */
   6415 		if (sc->sc_type == WM_T_82547 &&
   6416 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6417 			DPRINTF(WM_DEBUG_TX,
   6418 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6419 			    device_xname(sc->sc_dev)));
   6420 			ifp->if_flags |= IFF_OACTIVE;
   6421 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6422 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6423 			break;
   6424 		}
   6425 
   6426 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6427 
   6428 		DPRINTF(WM_DEBUG_TX,
   6429 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6430 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6431 
   6432 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6433 
   6434 		/*
   6435 		 * Store a pointer to the packet so that we can free it
   6436 		 * later.
   6437 		 *
   6438 		 * Initially, we consider the number of descriptors the
   6439 		 * packet uses the number of DMA segments.  This may be
   6440 		 * incremented by 1 if we do checksum offload (a descriptor
   6441 		 * is used to set the checksum context).
   6442 		 */
   6443 		txs->txs_mbuf = m0;
   6444 		txs->txs_firstdesc = txq->txq_next;
   6445 		txs->txs_ndesc = segs_needed;
   6446 
   6447 		/* Set up offload parameters for this packet. */
   6448 		if (m0->m_pkthdr.csum_flags &
   6449 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6450 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6451 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6452 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6453 					  &cksumfields) != 0) {
   6454 				/* Error message already displayed. */
   6455 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6456 				continue;
   6457 			}
   6458 		} else {
   6459 			cksumcmd = 0;
   6460 			cksumfields = 0;
   6461 		}
   6462 
   6463 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6464 
   6465 		/* Sync the DMA map. */
   6466 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6467 		    BUS_DMASYNC_PREWRITE);
   6468 
   6469 		/* Initialize the transmit descriptor. */
   6470 		for (nexttx = txq->txq_next, seg = 0;
   6471 		     seg < dmamap->dm_nsegs; seg++) {
   6472 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6473 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6474 			     seglen != 0;
   6475 			     curaddr += curlen, seglen -= curlen,
   6476 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6477 				curlen = seglen;
   6478 
   6479 				/*
   6480 				 * So says the Linux driver:
   6481 				 * Work around for premature descriptor
   6482 				 * write-backs in TSO mode.  Append a
   6483 				 * 4-byte sentinel descriptor.
   6484 				 */
   6485 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6486 				    curlen > 8)
   6487 					curlen -= 4;
   6488 
   6489 				wm_set_dma_addr(
   6490 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6491 				txq->txq_descs[nexttx].wtx_cmdlen
   6492 				    = htole32(cksumcmd | curlen);
   6493 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6494 				    = 0;
   6495 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6496 				    = cksumfields;
   6497 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6498 				lasttx = nexttx;
   6499 
   6500 				DPRINTF(WM_DEBUG_TX,
   6501 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6502 				     "len %#04zx\n",
   6503 				    device_xname(sc->sc_dev), nexttx,
   6504 				    (uint64_t)curaddr, curlen));
   6505 			}
   6506 		}
   6507 
   6508 		KASSERT(lasttx != -1);
   6509 
   6510 		/*
   6511 		 * Set up the command byte on the last descriptor of
   6512 		 * the packet.  If we're in the interrupt delay window,
   6513 		 * delay the interrupt.
   6514 		 */
   6515 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6516 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6517 
   6518 		/*
   6519 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6520 		 * up the descriptor to encapsulate the packet for us.
   6521 		 *
   6522 		 * This is only valid on the last descriptor of the packet.
   6523 		 */
   6524 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6525 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6526 			    htole32(WTX_CMD_VLE);
   6527 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6528 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6529 		}
   6530 
   6531 		txs->txs_lastdesc = lasttx;
   6532 
   6533 		DPRINTF(WM_DEBUG_TX,
   6534 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6535 		    device_xname(sc->sc_dev),
   6536 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6537 
   6538 		/* Sync the descriptors we're using. */
   6539 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6540 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6541 
   6542 		/* Give the packet to the chip. */
   6543 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6544 
   6545 		DPRINTF(WM_DEBUG_TX,
   6546 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6547 
   6548 		DPRINTF(WM_DEBUG_TX,
   6549 		    ("%s: TX: finished transmitting packet, job %d\n",
   6550 		    device_xname(sc->sc_dev), txq->txq_snext));
   6551 
   6552 		/* Advance the tx pointer. */
   6553 		txq->txq_free -= txs->txs_ndesc;
   6554 		txq->txq_next = nexttx;
   6555 
   6556 		txq->txq_sfree--;
   6557 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6558 
   6559 		/* Pass the packet to any BPF listeners. */
   6560 		bpf_mtap(ifp, m0);
   6561 	}
   6562 
   6563 	if (m0 != NULL) {
   6564 		ifp->if_flags |= IFF_OACTIVE;
   6565 		WM_Q_EVCNT_INCR(txq, txdrop);
   6566 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6567 			__func__));
   6568 		m_freem(m0);
   6569 	}
   6570 
   6571 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6572 		/* No more slots; notify upper layer. */
   6573 		ifp->if_flags |= IFF_OACTIVE;
   6574 	}
   6575 
   6576 	if (txq->txq_free != ofree) {
   6577 		/* Set a watchdog timer in case the chip flakes out. */
   6578 		ifp->if_timer = 5;
   6579 	}
   6580 }
   6581 
   6582 /*
   6583  * wm_nq_tx_offload:
   6584  *
   6585  *	Set up TCP/IP checksumming parameters for the
   6586  *	specified packet, for NEWQUEUE devices
   6587  */
   6588 static int
   6589 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6590     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6591 {
   6592 	struct mbuf *m0 = txs->txs_mbuf;
   6593 	struct m_tag *mtag;
   6594 	uint32_t vl_len, mssidx, cmdc;
   6595 	struct ether_header *eh;
   6596 	int offset, iphl;
   6597 
   6598 	/*
   6599 	 * XXX It would be nice if the mbuf pkthdr had offset
   6600 	 * fields for the protocol headers.
   6601 	 */
   6602 	*cmdlenp = 0;
   6603 	*fieldsp = 0;
   6604 
   6605 	eh = mtod(m0, struct ether_header *);
   6606 	switch (htons(eh->ether_type)) {
   6607 	case ETHERTYPE_IP:
   6608 	case ETHERTYPE_IPV6:
   6609 		offset = ETHER_HDR_LEN;
   6610 		break;
   6611 
   6612 	case ETHERTYPE_VLAN:
   6613 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6614 		break;
   6615 
   6616 	default:
   6617 		/* Don't support this protocol or encapsulation. */
   6618 		*do_csum = false;
   6619 		return 0;
   6620 	}
   6621 	*do_csum = true;
   6622 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6623 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6624 
   6625 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6626 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6627 
   6628 	if ((m0->m_pkthdr.csum_flags &
   6629 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6630 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6631 	} else {
   6632 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6633 	}
   6634 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6635 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6636 
   6637 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6638 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6639 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6640 		*cmdlenp |= NQTX_CMD_VLE;
   6641 	}
   6642 
   6643 	mssidx = 0;
   6644 
   6645 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6646 		int hlen = offset + iphl;
   6647 		int tcp_hlen;
   6648 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6649 
   6650 		if (__predict_false(m0->m_len <
   6651 				    (hlen + sizeof(struct tcphdr)))) {
   6652 			/*
   6653 			 * TCP/IP headers are not in the first mbuf; we need
   6654 			 * to do this the slow and painful way.  Let's just
   6655 			 * hope this doesn't happen very often.
   6656 			 */
   6657 			struct tcphdr th;
   6658 
   6659 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6660 
   6661 			m_copydata(m0, hlen, sizeof(th), &th);
   6662 			if (v4) {
   6663 				struct ip ip;
   6664 
   6665 				m_copydata(m0, offset, sizeof(ip), &ip);
   6666 				ip.ip_len = 0;
   6667 				m_copyback(m0,
   6668 				    offset + offsetof(struct ip, ip_len),
   6669 				    sizeof(ip.ip_len), &ip.ip_len);
   6670 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6671 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6672 			} else {
   6673 				struct ip6_hdr ip6;
   6674 
   6675 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6676 				ip6.ip6_plen = 0;
   6677 				m_copyback(m0,
   6678 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6679 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6680 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6681 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6682 			}
   6683 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6684 			    sizeof(th.th_sum), &th.th_sum);
   6685 
   6686 			tcp_hlen = th.th_off << 2;
   6687 		} else {
   6688 			/*
   6689 			 * TCP/IP headers are in the first mbuf; we can do
   6690 			 * this the easy way.
   6691 			 */
   6692 			struct tcphdr *th;
   6693 
   6694 			if (v4) {
   6695 				struct ip *ip =
   6696 				    (void *)(mtod(m0, char *) + offset);
   6697 				th = (void *)(mtod(m0, char *) + hlen);
   6698 
   6699 				ip->ip_len = 0;
   6700 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6701 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6702 			} else {
   6703 				struct ip6_hdr *ip6 =
   6704 				    (void *)(mtod(m0, char *) + offset);
   6705 				th = (void *)(mtod(m0, char *) + hlen);
   6706 
   6707 				ip6->ip6_plen = 0;
   6708 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6709 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6710 			}
   6711 			tcp_hlen = th->th_off << 2;
   6712 		}
   6713 		hlen += tcp_hlen;
   6714 		*cmdlenp |= NQTX_CMD_TSE;
   6715 
   6716 		if (v4) {
   6717 			WM_Q_EVCNT_INCR(txq, txtso);
   6718 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6719 		} else {
   6720 			WM_Q_EVCNT_INCR(txq, txtso6);
   6721 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6722 		}
   6723 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6724 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6725 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6726 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6727 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6728 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6729 	} else {
   6730 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6731 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6732 	}
   6733 
   6734 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6735 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6736 		cmdc |= NQTXC_CMD_IP4;
   6737 	}
   6738 
   6739 	if (m0->m_pkthdr.csum_flags &
   6740 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6741 		WM_Q_EVCNT_INCR(txq, txtusum);
   6742 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6743 			cmdc |= NQTXC_CMD_TCP;
   6744 		} else {
   6745 			cmdc |= NQTXC_CMD_UDP;
   6746 		}
   6747 		cmdc |= NQTXC_CMD_IP4;
   6748 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6749 	}
   6750 	if (m0->m_pkthdr.csum_flags &
   6751 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6752 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6753 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6754 			cmdc |= NQTXC_CMD_TCP;
   6755 		} else {
   6756 			cmdc |= NQTXC_CMD_UDP;
   6757 		}
   6758 		cmdc |= NQTXC_CMD_IP6;
   6759 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6760 	}
   6761 
   6762 	/* Fill in the context descriptor. */
   6763 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6764 	    htole32(vl_len);
   6765 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6766 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6767 	    htole32(cmdc);
   6768 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6769 	    htole32(mssidx);
   6770 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6771 	DPRINTF(WM_DEBUG_TX,
   6772 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6773 	    txq->txq_next, 0, vl_len));
   6774 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6775 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6776 	txs->txs_ndesc++;
   6777 	return 0;
   6778 }
   6779 
   6780 /*
   6781  * wm_nq_start:		[ifnet interface function]
   6782  *
   6783  *	Start packet transmission on the interface for NEWQUEUE devices
   6784  */
   6785 static void
   6786 wm_nq_start(struct ifnet *ifp)
   6787 {
   6788 	struct wm_softc *sc = ifp->if_softc;
   6789 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6790 
   6791 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6792 
   6793 	mutex_enter(txq->txq_lock);
   6794 	if (!txq->txq_stopping)
   6795 		wm_nq_start_locked(ifp);
   6796 	mutex_exit(txq->txq_lock);
   6797 }
   6798 
   6799 static void
   6800 wm_nq_start_locked(struct ifnet *ifp)
   6801 {
   6802 	struct wm_softc *sc = ifp->if_softc;
   6803 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6804 
   6805 	wm_nq_send_common_locked(ifp, txq, false);
   6806 }
   6807 
   6808 static inline int
   6809 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6810 {
   6811 	struct wm_softc *sc = ifp->if_softc;
   6812 	u_int cpuid = cpu_index(curcpu());
   6813 
   6814 	/*
   6815 	 * Currently, simple distribute strategy.
   6816 	 * TODO:
   6817 	 * destribute by flowid(RSS has value).
   6818 	 */
   6819 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6820 }
   6821 
   6822 static int
   6823 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6824 {
   6825 	int qid;
   6826 	struct wm_softc *sc = ifp->if_softc;
   6827 	struct wm_txqueue *txq;
   6828 
   6829 	qid = wm_nq_select_txqueue(ifp, m);
   6830 	txq = &sc->sc_queue[qid].wmq_txq;
   6831 
   6832 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6833 		m_freem(m);
   6834 		WM_Q_EVCNT_INCR(txq, txdrop);
   6835 		return ENOBUFS;
   6836 	}
   6837 
   6838 	if (mutex_tryenter(txq->txq_lock)) {
   6839 		/* XXXX should be per TX queue */
   6840 		ifp->if_obytes += m->m_pkthdr.len;
   6841 		if (m->m_flags & M_MCAST)
   6842 			ifp->if_omcasts++;
   6843 
   6844 		if (!txq->txq_stopping)
   6845 			wm_nq_transmit_locked(ifp, txq);
   6846 		mutex_exit(txq->txq_lock);
   6847 	}
   6848 
   6849 	return 0;
   6850 }
   6851 
   6852 static void
   6853 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6854 {
   6855 
   6856 	wm_nq_send_common_locked(ifp, txq, true);
   6857 }
   6858 
   6859 static void
   6860 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6861     bool is_transmit)
   6862 {
   6863 	struct wm_softc *sc = ifp->if_softc;
   6864 	struct mbuf *m0;
   6865 	struct m_tag *mtag;
   6866 	struct wm_txsoft *txs;
   6867 	bus_dmamap_t dmamap;
   6868 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6869 	bool do_csum, sent;
   6870 
   6871 	KASSERT(mutex_owned(txq->txq_lock));
   6872 
   6873 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6874 		return;
   6875 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6876 		return;
   6877 
   6878 	sent = false;
   6879 
   6880 	/*
   6881 	 * Loop through the send queue, setting up transmit descriptors
   6882 	 * until we drain the queue, or use up all available transmit
   6883 	 * descriptors.
   6884 	 */
   6885 	for (;;) {
   6886 		m0 = NULL;
   6887 
   6888 		/* Get a work queue entry. */
   6889 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6890 			wm_txeof(sc, txq);
   6891 			if (txq->txq_sfree == 0) {
   6892 				DPRINTF(WM_DEBUG_TX,
   6893 				    ("%s: TX: no free job descriptors\n",
   6894 					device_xname(sc->sc_dev)));
   6895 				WM_Q_EVCNT_INCR(txq, txsstall);
   6896 				break;
   6897 			}
   6898 		}
   6899 
   6900 		/* Grab a packet off the queue. */
   6901 		if (is_transmit)
   6902 			m0 = pcq_get(txq->txq_interq);
   6903 		else
   6904 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6905 		if (m0 == NULL)
   6906 			break;
   6907 
   6908 		DPRINTF(WM_DEBUG_TX,
   6909 		    ("%s: TX: have packet to transmit: %p\n",
   6910 		    device_xname(sc->sc_dev), m0));
   6911 
   6912 		txs = &txq->txq_soft[txq->txq_snext];
   6913 		dmamap = txs->txs_dmamap;
   6914 
   6915 		/*
   6916 		 * Load the DMA map.  If this fails, the packet either
   6917 		 * didn't fit in the allotted number of segments, or we
   6918 		 * were short on resources.  For the too-many-segments
   6919 		 * case, we simply report an error and drop the packet,
   6920 		 * since we can't sanely copy a jumbo packet to a single
   6921 		 * buffer.
   6922 		 */
   6923 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6924 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6925 		if (error) {
   6926 			if (error == EFBIG) {
   6927 				WM_Q_EVCNT_INCR(txq, txdrop);
   6928 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6929 				    "DMA segments, dropping...\n",
   6930 				    device_xname(sc->sc_dev));
   6931 				wm_dump_mbuf_chain(sc, m0);
   6932 				m_freem(m0);
   6933 				continue;
   6934 			}
   6935 			/* Short on resources, just stop for now. */
   6936 			DPRINTF(WM_DEBUG_TX,
   6937 			    ("%s: TX: dmamap load failed: %d\n",
   6938 			    device_xname(sc->sc_dev), error));
   6939 			break;
   6940 		}
   6941 
   6942 		segs_needed = dmamap->dm_nsegs;
   6943 
   6944 		/*
   6945 		 * Ensure we have enough descriptors free to describe
   6946 		 * the packet.  Note, we always reserve one descriptor
   6947 		 * at the end of the ring due to the semantics of the
   6948 		 * TDT register, plus one more in the event we need
   6949 		 * to load offload context.
   6950 		 */
   6951 		if (segs_needed > txq->txq_free - 2) {
   6952 			/*
   6953 			 * Not enough free descriptors to transmit this
   6954 			 * packet.  We haven't committed anything yet,
   6955 			 * so just unload the DMA map, put the packet
   6956 			 * pack on the queue, and punt.  Notify the upper
   6957 			 * layer that there are no more slots left.
   6958 			 */
   6959 			DPRINTF(WM_DEBUG_TX,
   6960 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6961 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6962 			    segs_needed, txq->txq_free - 1));
   6963 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6964 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6965 			WM_Q_EVCNT_INCR(txq, txdstall);
   6966 			break;
   6967 		}
   6968 
   6969 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6970 
   6971 		DPRINTF(WM_DEBUG_TX,
   6972 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6973 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6974 
   6975 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6976 
   6977 		/*
   6978 		 * Store a pointer to the packet so that we can free it
   6979 		 * later.
   6980 		 *
   6981 		 * Initially, we consider the number of descriptors the
   6982 		 * packet uses the number of DMA segments.  This may be
   6983 		 * incremented by 1 if we do checksum offload (a descriptor
   6984 		 * is used to set the checksum context).
   6985 		 */
   6986 		txs->txs_mbuf = m0;
   6987 		txs->txs_firstdesc = txq->txq_next;
   6988 		txs->txs_ndesc = segs_needed;
   6989 
   6990 		/* Set up offload parameters for this packet. */
   6991 		uint32_t cmdlen, fields, dcmdlen;
   6992 		if (m0->m_pkthdr.csum_flags &
   6993 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6994 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6995 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6996 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   6997 			    &do_csum) != 0) {
   6998 				/* Error message already displayed. */
   6999 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7000 				continue;
   7001 			}
   7002 		} else {
   7003 			do_csum = false;
   7004 			cmdlen = 0;
   7005 			fields = 0;
   7006 		}
   7007 
   7008 		/* Sync the DMA map. */
   7009 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7010 		    BUS_DMASYNC_PREWRITE);
   7011 
   7012 		/* Initialize the first transmit descriptor. */
   7013 		nexttx = txq->txq_next;
   7014 		if (!do_csum) {
   7015 			/* setup a legacy descriptor */
   7016 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7017 			    dmamap->dm_segs[0].ds_addr);
   7018 			txq->txq_descs[nexttx].wtx_cmdlen =
   7019 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7020 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7021 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7022 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7023 			    NULL) {
   7024 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7025 				    htole32(WTX_CMD_VLE);
   7026 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7027 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7028 			} else {
   7029 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7030 			}
   7031 			dcmdlen = 0;
   7032 		} else {
   7033 			/* setup an advanced data descriptor */
   7034 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7035 			    htole64(dmamap->dm_segs[0].ds_addr);
   7036 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7037 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7038 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7039 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7040 			    htole32(fields);
   7041 			DPRINTF(WM_DEBUG_TX,
   7042 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7043 			    device_xname(sc->sc_dev), nexttx,
   7044 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7045 			DPRINTF(WM_DEBUG_TX,
   7046 			    ("\t 0x%08x%08x\n", fields,
   7047 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7048 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7049 		}
   7050 
   7051 		lasttx = nexttx;
   7052 		nexttx = WM_NEXTTX(txq, nexttx);
   7053 		/*
   7054 		 * fill in the next descriptors. legacy or adcanced format
   7055 		 * is the same here
   7056 		 */
   7057 		for (seg = 1; seg < dmamap->dm_nsegs;
   7058 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7059 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7060 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7061 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7062 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7063 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7064 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7065 			lasttx = nexttx;
   7066 
   7067 			DPRINTF(WM_DEBUG_TX,
   7068 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7069 			     "len %#04zx\n",
   7070 			    device_xname(sc->sc_dev), nexttx,
   7071 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7072 			    dmamap->dm_segs[seg].ds_len));
   7073 		}
   7074 
   7075 		KASSERT(lasttx != -1);
   7076 
   7077 		/*
   7078 		 * Set up the command byte on the last descriptor of
   7079 		 * the packet.  If we're in the interrupt delay window,
   7080 		 * delay the interrupt.
   7081 		 */
   7082 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7083 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7084 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7085 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7086 
   7087 		txs->txs_lastdesc = lasttx;
   7088 
   7089 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7090 		    device_xname(sc->sc_dev),
   7091 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7092 
   7093 		/* Sync the descriptors we're using. */
   7094 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7095 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7096 
   7097 		/* Give the packet to the chip. */
   7098 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7099 		sent = true;
   7100 
   7101 		DPRINTF(WM_DEBUG_TX,
   7102 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7103 
   7104 		DPRINTF(WM_DEBUG_TX,
   7105 		    ("%s: TX: finished transmitting packet, job %d\n",
   7106 		    device_xname(sc->sc_dev), txq->txq_snext));
   7107 
   7108 		/* Advance the tx pointer. */
   7109 		txq->txq_free -= txs->txs_ndesc;
   7110 		txq->txq_next = nexttx;
   7111 
   7112 		txq->txq_sfree--;
   7113 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7114 
   7115 		/* Pass the packet to any BPF listeners. */
   7116 		bpf_mtap(ifp, m0);
   7117 	}
   7118 
   7119 	if (m0 != NULL) {
   7120 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7121 		WM_Q_EVCNT_INCR(txq, txdrop);
   7122 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7123 			__func__));
   7124 		m_freem(m0);
   7125 	}
   7126 
   7127 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7128 		/* No more slots; notify upper layer. */
   7129 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7130 	}
   7131 
   7132 	if (sent) {
   7133 		/* Set a watchdog timer in case the chip flakes out. */
   7134 		ifp->if_timer = 5;
   7135 	}
   7136 }
   7137 
   7138 /* Interrupt */
   7139 
   7140 /*
   7141  * wm_txeof:
   7142  *
   7143  *	Helper; handle transmit interrupts.
   7144  */
   7145 static int
   7146 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7147 {
   7148 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7149 	struct wm_txsoft *txs;
   7150 	bool processed = false;
   7151 	int count = 0;
   7152 	int i;
   7153 	uint8_t status;
   7154 
   7155 	KASSERT(mutex_owned(txq->txq_lock));
   7156 
   7157 	if (txq->txq_stopping)
   7158 		return 0;
   7159 
   7160 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7161 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7162 	else
   7163 		ifp->if_flags &= ~IFF_OACTIVE;
   7164 
   7165 	/*
   7166 	 * Go through the Tx list and free mbufs for those
   7167 	 * frames which have been transmitted.
   7168 	 */
   7169 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7170 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7171 		txs = &txq->txq_soft[i];
   7172 
   7173 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7174 			device_xname(sc->sc_dev), i));
   7175 
   7176 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7177 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7178 
   7179 		status =
   7180 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7181 		if ((status & WTX_ST_DD) == 0) {
   7182 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7183 			    BUS_DMASYNC_PREREAD);
   7184 			break;
   7185 		}
   7186 
   7187 		processed = true;
   7188 		count++;
   7189 		DPRINTF(WM_DEBUG_TX,
   7190 		    ("%s: TX: job %d done: descs %d..%d\n",
   7191 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7192 		    txs->txs_lastdesc));
   7193 
   7194 		/*
   7195 		 * XXX We should probably be using the statistics
   7196 		 * XXX registers, but I don't know if they exist
   7197 		 * XXX on chips before the i82544.
   7198 		 */
   7199 
   7200 #ifdef WM_EVENT_COUNTERS
   7201 		if (status & WTX_ST_TU)
   7202 			WM_Q_EVCNT_INCR(txq, tu);
   7203 #endif /* WM_EVENT_COUNTERS */
   7204 
   7205 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7206 			ifp->if_oerrors++;
   7207 			if (status & WTX_ST_LC)
   7208 				log(LOG_WARNING, "%s: late collision\n",
   7209 				    device_xname(sc->sc_dev));
   7210 			else if (status & WTX_ST_EC) {
   7211 				ifp->if_collisions += 16;
   7212 				log(LOG_WARNING, "%s: excessive collisions\n",
   7213 				    device_xname(sc->sc_dev));
   7214 			}
   7215 		} else
   7216 			ifp->if_opackets++;
   7217 
   7218 		txq->txq_free += txs->txs_ndesc;
   7219 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7220 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7221 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7222 		m_freem(txs->txs_mbuf);
   7223 		txs->txs_mbuf = NULL;
   7224 	}
   7225 
   7226 	/* Update the dirty transmit buffer pointer. */
   7227 	txq->txq_sdirty = i;
   7228 	DPRINTF(WM_DEBUG_TX,
   7229 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7230 
   7231 	if (count != 0)
   7232 		rnd_add_uint32(&sc->rnd_source, count);
   7233 
   7234 	/*
   7235 	 * If there are no more pending transmissions, cancel the watchdog
   7236 	 * timer.
   7237 	 */
   7238 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7239 		ifp->if_timer = 0;
   7240 
   7241 	return processed;
   7242 }
   7243 
   7244 /*
   7245  * wm_rxeof:
   7246  *
   7247  *	Helper; handle receive interrupts.
   7248  */
   7249 static void
   7250 wm_rxeof(struct wm_rxqueue *rxq)
   7251 {
   7252 	struct wm_softc *sc = rxq->rxq_sc;
   7253 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7254 	struct wm_rxsoft *rxs;
   7255 	struct mbuf *m;
   7256 	int i, len;
   7257 	int count = 0;
   7258 	uint8_t status, errors;
   7259 	uint16_t vlantag;
   7260 
   7261 	KASSERT(mutex_owned(rxq->rxq_lock));
   7262 
   7263 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7264 		rxs = &rxq->rxq_soft[i];
   7265 
   7266 		DPRINTF(WM_DEBUG_RX,
   7267 		    ("%s: RX: checking descriptor %d\n",
   7268 		    device_xname(sc->sc_dev), i));
   7269 
   7270 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7271 
   7272 		status = rxq->rxq_descs[i].wrx_status;
   7273 		errors = rxq->rxq_descs[i].wrx_errors;
   7274 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7275 		vlantag = rxq->rxq_descs[i].wrx_special;
   7276 
   7277 		if ((status & WRX_ST_DD) == 0) {
   7278 			/* We have processed all of the receive descriptors. */
   7279 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7280 			break;
   7281 		}
   7282 
   7283 		count++;
   7284 		if (__predict_false(rxq->rxq_discard)) {
   7285 			DPRINTF(WM_DEBUG_RX,
   7286 			    ("%s: RX: discarding contents of descriptor %d\n",
   7287 			    device_xname(sc->sc_dev), i));
   7288 			wm_init_rxdesc(rxq, i);
   7289 			if (status & WRX_ST_EOP) {
   7290 				/* Reset our state. */
   7291 				DPRINTF(WM_DEBUG_RX,
   7292 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7293 				    device_xname(sc->sc_dev)));
   7294 				rxq->rxq_discard = 0;
   7295 			}
   7296 			continue;
   7297 		}
   7298 
   7299 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7300 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7301 
   7302 		m = rxs->rxs_mbuf;
   7303 
   7304 		/*
   7305 		 * Add a new receive buffer to the ring, unless of
   7306 		 * course the length is zero. Treat the latter as a
   7307 		 * failed mapping.
   7308 		 */
   7309 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7310 			/*
   7311 			 * Failed, throw away what we've done so
   7312 			 * far, and discard the rest of the packet.
   7313 			 */
   7314 			ifp->if_ierrors++;
   7315 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7316 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7317 			wm_init_rxdesc(rxq, i);
   7318 			if ((status & WRX_ST_EOP) == 0)
   7319 				rxq->rxq_discard = 1;
   7320 			if (rxq->rxq_head != NULL)
   7321 				m_freem(rxq->rxq_head);
   7322 			WM_RXCHAIN_RESET(rxq);
   7323 			DPRINTF(WM_DEBUG_RX,
   7324 			    ("%s: RX: Rx buffer allocation failed, "
   7325 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7326 			    rxq->rxq_discard ? " (discard)" : ""));
   7327 			continue;
   7328 		}
   7329 
   7330 		m->m_len = len;
   7331 		rxq->rxq_len += len;
   7332 		DPRINTF(WM_DEBUG_RX,
   7333 		    ("%s: RX: buffer at %p len %d\n",
   7334 		    device_xname(sc->sc_dev), m->m_data, len));
   7335 
   7336 		/* If this is not the end of the packet, keep looking. */
   7337 		if ((status & WRX_ST_EOP) == 0) {
   7338 			WM_RXCHAIN_LINK(rxq, m);
   7339 			DPRINTF(WM_DEBUG_RX,
   7340 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7341 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7342 			continue;
   7343 		}
   7344 
   7345 		/*
   7346 		 * Okay, we have the entire packet now.  The chip is
   7347 		 * configured to include the FCS except I350 and I21[01]
   7348 		 * (not all chips can be configured to strip it),
   7349 		 * so we need to trim it.
   7350 		 * May need to adjust length of previous mbuf in the
   7351 		 * chain if the current mbuf is too short.
   7352 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7353 		 * is always set in I350, so we don't trim it.
   7354 		 */
   7355 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7356 		    && (sc->sc_type != WM_T_I210)
   7357 		    && (sc->sc_type != WM_T_I211)) {
   7358 			if (m->m_len < ETHER_CRC_LEN) {
   7359 				rxq->rxq_tail->m_len
   7360 				    -= (ETHER_CRC_LEN - m->m_len);
   7361 				m->m_len = 0;
   7362 			} else
   7363 				m->m_len -= ETHER_CRC_LEN;
   7364 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7365 		} else
   7366 			len = rxq->rxq_len;
   7367 
   7368 		WM_RXCHAIN_LINK(rxq, m);
   7369 
   7370 		*rxq->rxq_tailp = NULL;
   7371 		m = rxq->rxq_head;
   7372 
   7373 		WM_RXCHAIN_RESET(rxq);
   7374 
   7375 		DPRINTF(WM_DEBUG_RX,
   7376 		    ("%s: RX: have entire packet, len -> %d\n",
   7377 		    device_xname(sc->sc_dev), len));
   7378 
   7379 		/* If an error occurred, update stats and drop the packet. */
   7380 		if (errors &
   7381 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7382 			if (errors & WRX_ER_SE)
   7383 				log(LOG_WARNING, "%s: symbol error\n",
   7384 				    device_xname(sc->sc_dev));
   7385 			else if (errors & WRX_ER_SEQ)
   7386 				log(LOG_WARNING, "%s: receive sequence error\n",
   7387 				    device_xname(sc->sc_dev));
   7388 			else if (errors & WRX_ER_CE)
   7389 				log(LOG_WARNING, "%s: CRC error\n",
   7390 				    device_xname(sc->sc_dev));
   7391 			m_freem(m);
   7392 			continue;
   7393 		}
   7394 
   7395 		/* No errors.  Receive the packet. */
   7396 		m_set_rcvif(m, ifp);
   7397 		m->m_pkthdr.len = len;
   7398 
   7399 		/*
   7400 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7401 		 * for us.  Associate the tag with the packet.
   7402 		 */
   7403 		/* XXXX should check for i350 and i354 */
   7404 		if ((status & WRX_ST_VP) != 0) {
   7405 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7406 		}
   7407 
   7408 		/* Set up checksum info for this packet. */
   7409 		if ((status & WRX_ST_IXSM) == 0) {
   7410 			if (status & WRX_ST_IPCS) {
   7411 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7412 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7413 				if (errors & WRX_ER_IPE)
   7414 					m->m_pkthdr.csum_flags |=
   7415 					    M_CSUM_IPv4_BAD;
   7416 			}
   7417 			if (status & WRX_ST_TCPCS) {
   7418 				/*
   7419 				 * Note: we don't know if this was TCP or UDP,
   7420 				 * so we just set both bits, and expect the
   7421 				 * upper layers to deal.
   7422 				 */
   7423 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7424 				m->m_pkthdr.csum_flags |=
   7425 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7426 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7427 				if (errors & WRX_ER_TCPE)
   7428 					m->m_pkthdr.csum_flags |=
   7429 					    M_CSUM_TCP_UDP_BAD;
   7430 			}
   7431 		}
   7432 
   7433 		ifp->if_ipackets++;
   7434 
   7435 		mutex_exit(rxq->rxq_lock);
   7436 
   7437 		/* Pass this up to any BPF listeners. */
   7438 		bpf_mtap(ifp, m);
   7439 
   7440 		/* Pass it on. */
   7441 		if_percpuq_enqueue(sc->sc_ipq, m);
   7442 
   7443 		mutex_enter(rxq->rxq_lock);
   7444 
   7445 		if (rxq->rxq_stopping)
   7446 			break;
   7447 	}
   7448 
   7449 	/* Update the receive pointer. */
   7450 	rxq->rxq_ptr = i;
   7451 	if (count != 0)
   7452 		rnd_add_uint32(&sc->rnd_source, count);
   7453 
   7454 	DPRINTF(WM_DEBUG_RX,
   7455 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7456 }
   7457 
   7458 /*
   7459  * wm_linkintr_gmii:
   7460  *
   7461  *	Helper; handle link interrupts for GMII.
   7462  */
   7463 static void
   7464 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7465 {
   7466 
   7467 	KASSERT(WM_CORE_LOCKED(sc));
   7468 
   7469 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7470 		__func__));
   7471 
   7472 	if (icr & ICR_LSC) {
   7473 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7474 
   7475 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7476 			wm_gig_downshift_workaround_ich8lan(sc);
   7477 
   7478 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7479 			device_xname(sc->sc_dev)));
   7480 		mii_pollstat(&sc->sc_mii);
   7481 		if (sc->sc_type == WM_T_82543) {
   7482 			int miistatus, active;
   7483 
   7484 			/*
   7485 			 * With 82543, we need to force speed and
   7486 			 * duplex on the MAC equal to what the PHY
   7487 			 * speed and duplex configuration is.
   7488 			 */
   7489 			miistatus = sc->sc_mii.mii_media_status;
   7490 
   7491 			if (miistatus & IFM_ACTIVE) {
   7492 				active = sc->sc_mii.mii_media_active;
   7493 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7494 				switch (IFM_SUBTYPE(active)) {
   7495 				case IFM_10_T:
   7496 					sc->sc_ctrl |= CTRL_SPEED_10;
   7497 					break;
   7498 				case IFM_100_TX:
   7499 					sc->sc_ctrl |= CTRL_SPEED_100;
   7500 					break;
   7501 				case IFM_1000_T:
   7502 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7503 					break;
   7504 				default:
   7505 					/*
   7506 					 * fiber?
   7507 					 * Shoud not enter here.
   7508 					 */
   7509 					printf("unknown media (%x)\n", active);
   7510 					break;
   7511 				}
   7512 				if (active & IFM_FDX)
   7513 					sc->sc_ctrl |= CTRL_FD;
   7514 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7515 			}
   7516 		} else if ((sc->sc_type == WM_T_ICH8)
   7517 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7518 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7519 		} else if (sc->sc_type == WM_T_PCH) {
   7520 			wm_k1_gig_workaround_hv(sc,
   7521 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7522 		}
   7523 
   7524 		if ((sc->sc_phytype == WMPHY_82578)
   7525 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7526 			== IFM_1000_T)) {
   7527 
   7528 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7529 				delay(200*1000); /* XXX too big */
   7530 
   7531 				/* Link stall fix for link up */
   7532 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7533 				    HV_MUX_DATA_CTRL,
   7534 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7535 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7536 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7537 				    HV_MUX_DATA_CTRL,
   7538 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7539 			}
   7540 		}
   7541 	} else if (icr & ICR_RXSEQ) {
   7542 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7543 			device_xname(sc->sc_dev)));
   7544 	}
   7545 }
   7546 
   7547 /*
   7548  * wm_linkintr_tbi:
   7549  *
   7550  *	Helper; handle link interrupts for TBI mode.
   7551  */
   7552 static void
   7553 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7554 {
   7555 	uint32_t status;
   7556 
   7557 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7558 		__func__));
   7559 
   7560 	status = CSR_READ(sc, WMREG_STATUS);
   7561 	if (icr & ICR_LSC) {
   7562 		if (status & STATUS_LU) {
   7563 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7564 			    device_xname(sc->sc_dev),
   7565 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7566 			/*
   7567 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7568 			 * so we should update sc->sc_ctrl
   7569 			 */
   7570 
   7571 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7572 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7573 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7574 			if (status & STATUS_FD)
   7575 				sc->sc_tctl |=
   7576 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7577 			else
   7578 				sc->sc_tctl |=
   7579 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7580 			if (sc->sc_ctrl & CTRL_TFCE)
   7581 				sc->sc_fcrtl |= FCRTL_XONE;
   7582 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7583 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7584 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7585 				      sc->sc_fcrtl);
   7586 			sc->sc_tbi_linkup = 1;
   7587 		} else {
   7588 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7589 			    device_xname(sc->sc_dev)));
   7590 			sc->sc_tbi_linkup = 0;
   7591 		}
   7592 		/* Update LED */
   7593 		wm_tbi_serdes_set_linkled(sc);
   7594 	} else if (icr & ICR_RXSEQ) {
   7595 		DPRINTF(WM_DEBUG_LINK,
   7596 		    ("%s: LINK: Receive sequence error\n",
   7597 		    device_xname(sc->sc_dev)));
   7598 	}
   7599 }
   7600 
   7601 /*
   7602  * wm_linkintr_serdes:
   7603  *
   7604  *	Helper; handle link interrupts for TBI mode.
   7605  */
   7606 static void
   7607 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7608 {
   7609 	struct mii_data *mii = &sc->sc_mii;
   7610 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7611 	uint32_t pcs_adv, pcs_lpab, reg;
   7612 
   7613 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7614 		__func__));
   7615 
   7616 	if (icr & ICR_LSC) {
   7617 		/* Check PCS */
   7618 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7619 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7620 			mii->mii_media_status |= IFM_ACTIVE;
   7621 			sc->sc_tbi_linkup = 1;
   7622 		} else {
   7623 			mii->mii_media_status |= IFM_NONE;
   7624 			sc->sc_tbi_linkup = 0;
   7625 			wm_tbi_serdes_set_linkled(sc);
   7626 			return;
   7627 		}
   7628 		mii->mii_media_active |= IFM_1000_SX;
   7629 		if ((reg & PCS_LSTS_FDX) != 0)
   7630 			mii->mii_media_active |= IFM_FDX;
   7631 		else
   7632 			mii->mii_media_active |= IFM_HDX;
   7633 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7634 			/* Check flow */
   7635 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7636 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7637 				DPRINTF(WM_DEBUG_LINK,
   7638 				    ("XXX LINKOK but not ACOMP\n"));
   7639 				return;
   7640 			}
   7641 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7642 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7643 			DPRINTF(WM_DEBUG_LINK,
   7644 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7645 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7646 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7647 				mii->mii_media_active |= IFM_FLOW
   7648 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7649 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7650 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7651 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7652 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7653 				mii->mii_media_active |= IFM_FLOW
   7654 				    | IFM_ETH_TXPAUSE;
   7655 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7656 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7657 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7658 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7659 				mii->mii_media_active |= IFM_FLOW
   7660 				    | IFM_ETH_RXPAUSE;
   7661 		}
   7662 		/* Update LED */
   7663 		wm_tbi_serdes_set_linkled(sc);
   7664 	} else {
   7665 		DPRINTF(WM_DEBUG_LINK,
   7666 		    ("%s: LINK: Receive sequence error\n",
   7667 		    device_xname(sc->sc_dev)));
   7668 	}
   7669 }
   7670 
   7671 /*
   7672  * wm_linkintr:
   7673  *
   7674  *	Helper; handle link interrupts.
   7675  */
   7676 static void
   7677 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7678 {
   7679 
   7680 	KASSERT(WM_CORE_LOCKED(sc));
   7681 
   7682 	if (sc->sc_flags & WM_F_HAS_MII)
   7683 		wm_linkintr_gmii(sc, icr);
   7684 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7685 	    && (sc->sc_type >= WM_T_82575))
   7686 		wm_linkintr_serdes(sc, icr);
   7687 	else
   7688 		wm_linkintr_tbi(sc, icr);
   7689 }
   7690 
   7691 /*
   7692  * wm_intr_legacy:
   7693  *
   7694  *	Interrupt service routine for INTx and MSI.
   7695  */
   7696 static int
   7697 wm_intr_legacy(void *arg)
   7698 {
   7699 	struct wm_softc *sc = arg;
   7700 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7701 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7702 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7703 	uint32_t icr, rndval = 0;
   7704 	int handled = 0;
   7705 
   7706 	DPRINTF(WM_DEBUG_TX,
   7707 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7708 	while (1 /* CONSTCOND */) {
   7709 		icr = CSR_READ(sc, WMREG_ICR);
   7710 		if ((icr & sc->sc_icr) == 0)
   7711 			break;
   7712 		if (rndval == 0)
   7713 			rndval = icr;
   7714 
   7715 		mutex_enter(rxq->rxq_lock);
   7716 
   7717 		if (rxq->rxq_stopping) {
   7718 			mutex_exit(rxq->rxq_lock);
   7719 			break;
   7720 		}
   7721 
   7722 		handled = 1;
   7723 
   7724 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7725 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7726 			DPRINTF(WM_DEBUG_RX,
   7727 			    ("%s: RX: got Rx intr 0x%08x\n",
   7728 			    device_xname(sc->sc_dev),
   7729 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7730 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7731 		}
   7732 #endif
   7733 		wm_rxeof(rxq);
   7734 
   7735 		mutex_exit(rxq->rxq_lock);
   7736 		mutex_enter(txq->txq_lock);
   7737 
   7738 		if (txq->txq_stopping) {
   7739 			mutex_exit(txq->txq_lock);
   7740 			break;
   7741 		}
   7742 
   7743 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7744 		if (icr & ICR_TXDW) {
   7745 			DPRINTF(WM_DEBUG_TX,
   7746 			    ("%s: TX: got TXDW interrupt\n",
   7747 			    device_xname(sc->sc_dev)));
   7748 			WM_Q_EVCNT_INCR(txq, txdw);
   7749 		}
   7750 #endif
   7751 		wm_txeof(sc, txq);
   7752 
   7753 		mutex_exit(txq->txq_lock);
   7754 		WM_CORE_LOCK(sc);
   7755 
   7756 		if (sc->sc_core_stopping) {
   7757 			WM_CORE_UNLOCK(sc);
   7758 			break;
   7759 		}
   7760 
   7761 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7762 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7763 			wm_linkintr(sc, icr);
   7764 		}
   7765 
   7766 		WM_CORE_UNLOCK(sc);
   7767 
   7768 		if (icr & ICR_RXO) {
   7769 #if defined(WM_DEBUG)
   7770 			log(LOG_WARNING, "%s: Receive overrun\n",
   7771 			    device_xname(sc->sc_dev));
   7772 #endif /* defined(WM_DEBUG) */
   7773 		}
   7774 	}
   7775 
   7776 	rnd_add_uint32(&sc->rnd_source, rndval);
   7777 
   7778 	if (handled) {
   7779 		/* Try to get more packets going. */
   7780 		ifp->if_start(ifp);
   7781 	}
   7782 
   7783 	return handled;
   7784 }
   7785 
   7786 static int
   7787 wm_txrxintr_msix(void *arg)
   7788 {
   7789 	struct wm_queue *wmq = arg;
   7790 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7791 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7792 	struct wm_softc *sc = txq->txq_sc;
   7793 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7794 
   7795 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7796 
   7797 	DPRINTF(WM_DEBUG_TX,
   7798 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7799 
   7800 	if (sc->sc_type == WM_T_82574)
   7801 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7802 	else if (sc->sc_type == WM_T_82575)
   7803 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7804 	else
   7805 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7806 
   7807 	mutex_enter(txq->txq_lock);
   7808 
   7809 	if (txq->txq_stopping) {
   7810 		mutex_exit(txq->txq_lock);
   7811 		return 0;
   7812 	}
   7813 
   7814 	WM_Q_EVCNT_INCR(txq, txdw);
   7815 	wm_txeof(sc, txq);
   7816 
   7817 	/* Try to get more packets going. */
   7818 	if (pcq_peek(txq->txq_interq) != NULL)
   7819 		wm_nq_transmit_locked(ifp, txq);
   7820 	/*
   7821 	 * There are still some upper layer processing which call
   7822 	 * ifp->if_start(). e.g. ALTQ
   7823 	 */
   7824 	if (wmq->wmq_id == 0) {
   7825 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7826 			wm_nq_start_locked(ifp);
   7827 	}
   7828 
   7829 	mutex_exit(txq->txq_lock);
   7830 
   7831 	DPRINTF(WM_DEBUG_RX,
   7832 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7833 	mutex_enter(rxq->rxq_lock);
   7834 
   7835 	if (rxq->rxq_stopping) {
   7836 		mutex_exit(rxq->rxq_lock);
   7837 		return 0;
   7838 	}
   7839 
   7840 	WM_Q_EVCNT_INCR(rxq, rxintr);
   7841 	wm_rxeof(rxq);
   7842 	mutex_exit(rxq->rxq_lock);
   7843 
   7844 	if (sc->sc_type == WM_T_82574)
   7845 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7846 	else if (sc->sc_type == WM_T_82575)
   7847 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7848 	else
   7849 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7850 
   7851 	return 1;
   7852 }
   7853 
   7854 /*
   7855  * wm_linkintr_msix:
   7856  *
   7857  *	Interrupt service routine for link status change for MSI-X.
   7858  */
   7859 static int
   7860 wm_linkintr_msix(void *arg)
   7861 {
   7862 	struct wm_softc *sc = arg;
   7863 	uint32_t reg;
   7864 
   7865 	DPRINTF(WM_DEBUG_LINK,
   7866 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7867 
   7868 	reg = CSR_READ(sc, WMREG_ICR);
   7869 	WM_CORE_LOCK(sc);
   7870 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   7871 		goto out;
   7872 
   7873 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7874 	wm_linkintr(sc, ICR_LSC);
   7875 
   7876 out:
   7877 	WM_CORE_UNLOCK(sc);
   7878 
   7879 	if (sc->sc_type == WM_T_82574)
   7880 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7881 	else if (sc->sc_type == WM_T_82575)
   7882 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7883 	else
   7884 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7885 
   7886 	return 1;
   7887 }
   7888 
   7889 /*
   7890  * Media related.
   7891  * GMII, SGMII, TBI (and SERDES)
   7892  */
   7893 
   7894 /* Common */
   7895 
   7896 /*
   7897  * wm_tbi_serdes_set_linkled:
   7898  *
   7899  *	Update the link LED on TBI and SERDES devices.
   7900  */
   7901 static void
   7902 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7903 {
   7904 
   7905 	if (sc->sc_tbi_linkup)
   7906 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7907 	else
   7908 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7909 
   7910 	/* 82540 or newer devices are active low */
   7911 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7912 
   7913 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7914 }
   7915 
   7916 /* GMII related */
   7917 
   7918 /*
   7919  * wm_gmii_reset:
   7920  *
   7921  *	Reset the PHY.
   7922  */
   7923 static void
   7924 wm_gmii_reset(struct wm_softc *sc)
   7925 {
   7926 	uint32_t reg;
   7927 	int rv;
   7928 
   7929 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7930 		device_xname(sc->sc_dev), __func__));
   7931 
   7932 	rv = sc->phy.acquire(sc);
   7933 	if (rv != 0) {
   7934 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7935 		    __func__);
   7936 		return;
   7937 	}
   7938 
   7939 	switch (sc->sc_type) {
   7940 	case WM_T_82542_2_0:
   7941 	case WM_T_82542_2_1:
   7942 		/* null */
   7943 		break;
   7944 	case WM_T_82543:
   7945 		/*
   7946 		 * With 82543, we need to force speed and duplex on the MAC
   7947 		 * equal to what the PHY speed and duplex configuration is.
   7948 		 * In addition, we need to perform a hardware reset on the PHY
   7949 		 * to take it out of reset.
   7950 		 */
   7951 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7952 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7953 
   7954 		/* The PHY reset pin is active-low. */
   7955 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7956 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7957 		    CTRL_EXT_SWDPIN(4));
   7958 		reg |= CTRL_EXT_SWDPIO(4);
   7959 
   7960 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7961 		CSR_WRITE_FLUSH(sc);
   7962 		delay(10*1000);
   7963 
   7964 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7965 		CSR_WRITE_FLUSH(sc);
   7966 		delay(150);
   7967 #if 0
   7968 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7969 #endif
   7970 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7971 		break;
   7972 	case WM_T_82544:	/* reset 10000us */
   7973 	case WM_T_82540:
   7974 	case WM_T_82545:
   7975 	case WM_T_82545_3:
   7976 	case WM_T_82546:
   7977 	case WM_T_82546_3:
   7978 	case WM_T_82541:
   7979 	case WM_T_82541_2:
   7980 	case WM_T_82547:
   7981 	case WM_T_82547_2:
   7982 	case WM_T_82571:	/* reset 100us */
   7983 	case WM_T_82572:
   7984 	case WM_T_82573:
   7985 	case WM_T_82574:
   7986 	case WM_T_82575:
   7987 	case WM_T_82576:
   7988 	case WM_T_82580:
   7989 	case WM_T_I350:
   7990 	case WM_T_I354:
   7991 	case WM_T_I210:
   7992 	case WM_T_I211:
   7993 	case WM_T_82583:
   7994 	case WM_T_80003:
   7995 		/* generic reset */
   7996 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7997 		CSR_WRITE_FLUSH(sc);
   7998 		delay(20000);
   7999 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8000 		CSR_WRITE_FLUSH(sc);
   8001 		delay(20000);
   8002 
   8003 		if ((sc->sc_type == WM_T_82541)
   8004 		    || (sc->sc_type == WM_T_82541_2)
   8005 		    || (sc->sc_type == WM_T_82547)
   8006 		    || (sc->sc_type == WM_T_82547_2)) {
   8007 			/* workaround for igp are done in igp_reset() */
   8008 			/* XXX add code to set LED after phy reset */
   8009 		}
   8010 		break;
   8011 	case WM_T_ICH8:
   8012 	case WM_T_ICH9:
   8013 	case WM_T_ICH10:
   8014 	case WM_T_PCH:
   8015 	case WM_T_PCH2:
   8016 	case WM_T_PCH_LPT:
   8017 	case WM_T_PCH_SPT:
   8018 		/* generic reset */
   8019 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8020 		CSR_WRITE_FLUSH(sc);
   8021 		delay(100);
   8022 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8023 		CSR_WRITE_FLUSH(sc);
   8024 		delay(150);
   8025 		break;
   8026 	default:
   8027 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8028 		    __func__);
   8029 		break;
   8030 	}
   8031 
   8032 	sc->phy.release(sc);
   8033 
   8034 	/* get_cfg_done */
   8035 	wm_get_cfg_done(sc);
   8036 
   8037 	/* extra setup */
   8038 	switch (sc->sc_type) {
   8039 	case WM_T_82542_2_0:
   8040 	case WM_T_82542_2_1:
   8041 	case WM_T_82543:
   8042 	case WM_T_82544:
   8043 	case WM_T_82540:
   8044 	case WM_T_82545:
   8045 	case WM_T_82545_3:
   8046 	case WM_T_82546:
   8047 	case WM_T_82546_3:
   8048 	case WM_T_82541_2:
   8049 	case WM_T_82547_2:
   8050 	case WM_T_82571:
   8051 	case WM_T_82572:
   8052 	case WM_T_82573:
   8053 	case WM_T_82575:
   8054 	case WM_T_82576:
   8055 	case WM_T_82580:
   8056 	case WM_T_I350:
   8057 	case WM_T_I354:
   8058 	case WM_T_I210:
   8059 	case WM_T_I211:
   8060 	case WM_T_80003:
   8061 		/* null */
   8062 		break;
   8063 	case WM_T_82574:
   8064 	case WM_T_82583:
   8065 		wm_lplu_d0_disable(sc);
   8066 		break;
   8067 	case WM_T_82541:
   8068 	case WM_T_82547:
   8069 		/* XXX Configure actively LED after PHY reset */
   8070 		break;
   8071 	case WM_T_ICH8:
   8072 	case WM_T_ICH9:
   8073 	case WM_T_ICH10:
   8074 	case WM_T_PCH:
   8075 	case WM_T_PCH2:
   8076 	case WM_T_PCH_LPT:
   8077 	case WM_T_PCH_SPT:
   8078 		/* Allow time for h/w to get to a quiescent state afer reset */
   8079 		delay(10*1000);
   8080 
   8081 		if (sc->sc_type == WM_T_PCH)
   8082 			wm_hv_phy_workaround_ich8lan(sc);
   8083 
   8084 		if (sc->sc_type == WM_T_PCH2)
   8085 			wm_lv_phy_workaround_ich8lan(sc);
   8086 
   8087 		/* Clear the host wakeup bit after lcd reset */
   8088 		if (sc->sc_type >= WM_T_PCH) {
   8089 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8090 			    BM_PORT_GEN_CFG);
   8091 			reg &= ~BM_WUC_HOST_WU_BIT;
   8092 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8093 			    BM_PORT_GEN_CFG, reg);
   8094 		}
   8095 
   8096 		/*
   8097 		 * XXX Configure the LCD with th extended configuration region
   8098 		 * in NVM
   8099 		 */
   8100 
   8101 		/* Disable D0 LPLU. */
   8102 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8103 			wm_lplu_d0_disable_pch(sc);
   8104 		else
   8105 			wm_lplu_d0_disable(sc);	/* ICH* */
   8106 		break;
   8107 	default:
   8108 		panic("%s: unknown type\n", __func__);
   8109 		break;
   8110 	}
   8111 }
   8112 
   8113 /*
   8114  * wm_get_phy_id_82575:
   8115  *
   8116  * Return PHY ID. Return -1 if it failed.
   8117  */
   8118 static int
   8119 wm_get_phy_id_82575(struct wm_softc *sc)
   8120 {
   8121 	uint32_t reg;
   8122 	int phyid = -1;
   8123 
   8124 	/* XXX */
   8125 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8126 		return -1;
   8127 
   8128 	if (wm_sgmii_uses_mdio(sc)) {
   8129 		switch (sc->sc_type) {
   8130 		case WM_T_82575:
   8131 		case WM_T_82576:
   8132 			reg = CSR_READ(sc, WMREG_MDIC);
   8133 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8134 			break;
   8135 		case WM_T_82580:
   8136 		case WM_T_I350:
   8137 		case WM_T_I354:
   8138 		case WM_T_I210:
   8139 		case WM_T_I211:
   8140 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8141 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8142 			break;
   8143 		default:
   8144 			return -1;
   8145 		}
   8146 	}
   8147 
   8148 	return phyid;
   8149 }
   8150 
   8151 
   8152 /*
   8153  * wm_gmii_mediainit:
   8154  *
   8155  *	Initialize media for use on 1000BASE-T devices.
   8156  */
   8157 static void
   8158 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8159 {
   8160 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8161 	struct mii_data *mii = &sc->sc_mii;
   8162 	uint32_t reg;
   8163 
   8164 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8165 		device_xname(sc->sc_dev), __func__));
   8166 
   8167 	/* We have GMII. */
   8168 	sc->sc_flags |= WM_F_HAS_MII;
   8169 
   8170 	if (sc->sc_type == WM_T_80003)
   8171 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8172 	else
   8173 		sc->sc_tipg = TIPG_1000T_DFLT;
   8174 
   8175 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8176 	if ((sc->sc_type == WM_T_82580)
   8177 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8178 	    || (sc->sc_type == WM_T_I211)) {
   8179 		reg = CSR_READ(sc, WMREG_PHPM);
   8180 		reg &= ~PHPM_GO_LINK_D;
   8181 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8182 	}
   8183 
   8184 	/*
   8185 	 * Let the chip set speed/duplex on its own based on
   8186 	 * signals from the PHY.
   8187 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8188 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8189 	 */
   8190 	sc->sc_ctrl |= CTRL_SLU;
   8191 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8192 
   8193 	/* Initialize our media structures and probe the GMII. */
   8194 	mii->mii_ifp = ifp;
   8195 
   8196 	/*
   8197 	 * Determine the PHY access method.
   8198 	 *
   8199 	 *  For SGMII, use SGMII specific method.
   8200 	 *
   8201 	 *  For some devices, we can determine the PHY access method
   8202 	 * from sc_type.
   8203 	 *
   8204 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8205 	 * access  method by sc_type, so use the PCI product ID for some
   8206 	 * devices.
   8207 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8208 	 * can't detect, then use bm's method.
   8209 	 */
   8210 	switch (prodid) {
   8211 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8212 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8213 		/* 82577 */
   8214 		sc->sc_phytype = WMPHY_82577;
   8215 		break;
   8216 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8217 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8218 		/* 82578 */
   8219 		sc->sc_phytype = WMPHY_82578;
   8220 		break;
   8221 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8222 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8223 		/* 82579 */
   8224 		sc->sc_phytype = WMPHY_82579;
   8225 		break;
   8226 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8227 	case PCI_PRODUCT_INTEL_82801I_BM:
   8228 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8229 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8230 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8231 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8232 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8233 		/* ICH8, 9, 10 with 82567 */
   8234 		sc->sc_phytype = WMPHY_BM;
   8235 		mii->mii_readreg = wm_gmii_bm_readreg;
   8236 		mii->mii_writereg = wm_gmii_bm_writereg;
   8237 		break;
   8238 	default:
   8239 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8240 		    && !wm_sgmii_uses_mdio(sc)){
   8241 			/* SGMII */
   8242 			mii->mii_readreg = wm_sgmii_readreg;
   8243 			mii->mii_writereg = wm_sgmii_writereg;
   8244 		} else if ((sc->sc_type == WM_T_82574)
   8245 		    || (sc->sc_type == WM_T_82583)) {
   8246 			/* BM2 (phyaddr == 1) */
   8247 			sc->sc_phytype = WMPHY_BM;
   8248 			mii->mii_readreg = wm_gmii_bm_readreg;
   8249 			mii->mii_writereg = wm_gmii_bm_writereg;
   8250 		} else if (sc->sc_type >= WM_T_ICH8) {
   8251 			/* non-82567 ICH8, 9 and 10 */
   8252 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8253 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8254 		} else if (sc->sc_type >= WM_T_80003) {
   8255 			/* 80003 */
   8256 			sc->sc_phytype = WMPHY_GG82563;
   8257 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8258 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8259 		} else if (sc->sc_type >= WM_T_I210) {
   8260 			/* I210 and I211 */
   8261 			sc->sc_phytype = WMPHY_210;
   8262 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8263 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8264 		} else if (sc->sc_type >= WM_T_82580) {
   8265 			/* 82580, I350 and I354 */
   8266 			sc->sc_phytype = WMPHY_82580;
   8267 			mii->mii_readreg = wm_gmii_82580_readreg;
   8268 			mii->mii_writereg = wm_gmii_82580_writereg;
   8269 		} else if (sc->sc_type >= WM_T_82544) {
   8270 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8271 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8272 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8273 		} else {
   8274 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8275 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8276 		}
   8277 		break;
   8278 	}
   8279 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8280 		/* All PCH* use _hv_ */
   8281 		mii->mii_readreg = wm_gmii_hv_readreg;
   8282 		mii->mii_writereg = wm_gmii_hv_writereg;
   8283 	}
   8284 	mii->mii_statchg = wm_gmii_statchg;
   8285 
   8286 	wm_gmii_reset(sc);
   8287 
   8288 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8289 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8290 	    wm_gmii_mediastatus);
   8291 
   8292 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8293 	    || (sc->sc_type == WM_T_82580)
   8294 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8295 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8296 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8297 			/* Attach only one port */
   8298 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8299 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8300 		} else {
   8301 			int i, id;
   8302 			uint32_t ctrl_ext;
   8303 
   8304 			id = wm_get_phy_id_82575(sc);
   8305 			if (id != -1) {
   8306 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8307 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8308 			}
   8309 			if ((id == -1)
   8310 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8311 				/* Power on sgmii phy if it is disabled */
   8312 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8313 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8314 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8315 				CSR_WRITE_FLUSH(sc);
   8316 				delay(300*1000); /* XXX too long */
   8317 
   8318 				/* from 1 to 8 */
   8319 				for (i = 1; i < 8; i++)
   8320 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8321 					    0xffffffff, i, MII_OFFSET_ANY,
   8322 					    MIIF_DOPAUSE);
   8323 
   8324 				/* restore previous sfp cage power state */
   8325 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8326 			}
   8327 		}
   8328 	} else {
   8329 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8330 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8331 	}
   8332 
   8333 	/*
   8334 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8335 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8336 	 */
   8337 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8338 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8339 		wm_set_mdio_slow_mode_hv(sc);
   8340 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8341 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8342 	}
   8343 
   8344 	/*
   8345 	 * (For ICH8 variants)
   8346 	 * If PHY detection failed, use BM's r/w function and retry.
   8347 	 */
   8348 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8349 		/* if failed, retry with *_bm_* */
   8350 		mii->mii_readreg = wm_gmii_bm_readreg;
   8351 		mii->mii_writereg = wm_gmii_bm_writereg;
   8352 
   8353 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8354 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8355 	}
   8356 
   8357 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8358 		/* Any PHY wasn't find */
   8359 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8360 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8361 		sc->sc_phytype = WMPHY_NONE;
   8362 	} else {
   8363 		/*
   8364 		 * PHY Found!
   8365 		 * Check PHY type.
   8366 		 */
   8367 		uint32_t model;
   8368 		struct mii_softc *child;
   8369 
   8370 		child = LIST_FIRST(&mii->mii_phys);
   8371 		model = child->mii_mpd_model;
   8372 		if (model == MII_MODEL_yyINTEL_I82566)
   8373 			sc->sc_phytype = WMPHY_IGP_3;
   8374 
   8375 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8376 	}
   8377 }
   8378 
   8379 /*
   8380  * wm_gmii_mediachange:	[ifmedia interface function]
   8381  *
   8382  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8383  */
   8384 static int
   8385 wm_gmii_mediachange(struct ifnet *ifp)
   8386 {
   8387 	struct wm_softc *sc = ifp->if_softc;
   8388 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8389 	int rc;
   8390 
   8391 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8392 		device_xname(sc->sc_dev), __func__));
   8393 	if ((ifp->if_flags & IFF_UP) == 0)
   8394 		return 0;
   8395 
   8396 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8397 	sc->sc_ctrl |= CTRL_SLU;
   8398 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8399 	    || (sc->sc_type > WM_T_82543)) {
   8400 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8401 	} else {
   8402 		sc->sc_ctrl &= ~CTRL_ASDE;
   8403 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8404 		if (ife->ifm_media & IFM_FDX)
   8405 			sc->sc_ctrl |= CTRL_FD;
   8406 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8407 		case IFM_10_T:
   8408 			sc->sc_ctrl |= CTRL_SPEED_10;
   8409 			break;
   8410 		case IFM_100_TX:
   8411 			sc->sc_ctrl |= CTRL_SPEED_100;
   8412 			break;
   8413 		case IFM_1000_T:
   8414 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8415 			break;
   8416 		default:
   8417 			panic("wm_gmii_mediachange: bad media 0x%x",
   8418 			    ife->ifm_media);
   8419 		}
   8420 	}
   8421 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8422 	if (sc->sc_type <= WM_T_82543)
   8423 		wm_gmii_reset(sc);
   8424 
   8425 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8426 		return 0;
   8427 	return rc;
   8428 }
   8429 
   8430 /*
   8431  * wm_gmii_mediastatus:	[ifmedia interface function]
   8432  *
   8433  *	Get the current interface media status on a 1000BASE-T device.
   8434  */
   8435 static void
   8436 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8437 {
   8438 	struct wm_softc *sc = ifp->if_softc;
   8439 
   8440 	ether_mediastatus(ifp, ifmr);
   8441 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8442 	    | sc->sc_flowflags;
   8443 }
   8444 
   8445 #define	MDI_IO		CTRL_SWDPIN(2)
   8446 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8447 #define	MDI_CLK		CTRL_SWDPIN(3)
   8448 
   8449 static void
   8450 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8451 {
   8452 	uint32_t i, v;
   8453 
   8454 	v = CSR_READ(sc, WMREG_CTRL);
   8455 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8456 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8457 
   8458 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8459 		if (data & i)
   8460 			v |= MDI_IO;
   8461 		else
   8462 			v &= ~MDI_IO;
   8463 		CSR_WRITE(sc, WMREG_CTRL, v);
   8464 		CSR_WRITE_FLUSH(sc);
   8465 		delay(10);
   8466 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8467 		CSR_WRITE_FLUSH(sc);
   8468 		delay(10);
   8469 		CSR_WRITE(sc, WMREG_CTRL, v);
   8470 		CSR_WRITE_FLUSH(sc);
   8471 		delay(10);
   8472 	}
   8473 }
   8474 
   8475 static uint32_t
   8476 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8477 {
   8478 	uint32_t v, i, data = 0;
   8479 
   8480 	v = CSR_READ(sc, WMREG_CTRL);
   8481 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8482 	v |= CTRL_SWDPIO(3);
   8483 
   8484 	CSR_WRITE(sc, WMREG_CTRL, v);
   8485 	CSR_WRITE_FLUSH(sc);
   8486 	delay(10);
   8487 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8488 	CSR_WRITE_FLUSH(sc);
   8489 	delay(10);
   8490 	CSR_WRITE(sc, WMREG_CTRL, v);
   8491 	CSR_WRITE_FLUSH(sc);
   8492 	delay(10);
   8493 
   8494 	for (i = 0; i < 16; i++) {
   8495 		data <<= 1;
   8496 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8497 		CSR_WRITE_FLUSH(sc);
   8498 		delay(10);
   8499 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8500 			data |= 1;
   8501 		CSR_WRITE(sc, WMREG_CTRL, v);
   8502 		CSR_WRITE_FLUSH(sc);
   8503 		delay(10);
   8504 	}
   8505 
   8506 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8507 	CSR_WRITE_FLUSH(sc);
   8508 	delay(10);
   8509 	CSR_WRITE(sc, WMREG_CTRL, v);
   8510 	CSR_WRITE_FLUSH(sc);
   8511 	delay(10);
   8512 
   8513 	return data;
   8514 }
   8515 
   8516 #undef MDI_IO
   8517 #undef MDI_DIR
   8518 #undef MDI_CLK
   8519 
   8520 /*
   8521  * wm_gmii_i82543_readreg:	[mii interface function]
   8522  *
   8523  *	Read a PHY register on the GMII (i82543 version).
   8524  */
   8525 static int
   8526 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8527 {
   8528 	struct wm_softc *sc = device_private(self);
   8529 	int rv;
   8530 
   8531 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8532 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8533 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8534 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8535 
   8536 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8537 	    device_xname(sc->sc_dev), phy, reg, rv));
   8538 
   8539 	return rv;
   8540 }
   8541 
   8542 /*
   8543  * wm_gmii_i82543_writereg:	[mii interface function]
   8544  *
   8545  *	Write a PHY register on the GMII (i82543 version).
   8546  */
   8547 static void
   8548 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8549 {
   8550 	struct wm_softc *sc = device_private(self);
   8551 
   8552 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8553 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8554 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8555 	    (MII_COMMAND_START << 30), 32);
   8556 }
   8557 
   8558 /*
   8559  * wm_gmii_mdic_readreg:	[mii interface function]
   8560  *
   8561  *	Read a PHY register on the GMII.
   8562  */
   8563 static int
   8564 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   8565 {
   8566 	struct wm_softc *sc = device_private(self);
   8567 	uint32_t mdic = 0;
   8568 	int i, rv;
   8569 
   8570 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8571 	    MDIC_REGADD(reg));
   8572 
   8573 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8574 		mdic = CSR_READ(sc, WMREG_MDIC);
   8575 		if (mdic & MDIC_READY)
   8576 			break;
   8577 		delay(50);
   8578 	}
   8579 
   8580 	if ((mdic & MDIC_READY) == 0) {
   8581 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8582 		    device_xname(sc->sc_dev), phy, reg);
   8583 		rv = 0;
   8584 	} else if (mdic & MDIC_E) {
   8585 #if 0 /* This is normal if no PHY is present. */
   8586 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8587 		    device_xname(sc->sc_dev), phy, reg);
   8588 #endif
   8589 		rv = 0;
   8590 	} else {
   8591 		rv = MDIC_DATA(mdic);
   8592 		if (rv == 0xffff)
   8593 			rv = 0;
   8594 	}
   8595 
   8596 	return rv;
   8597 }
   8598 
   8599 /*
   8600  * wm_gmii_mdic_writereg:	[mii interface function]
   8601  *
   8602  *	Write a PHY register on the GMII.
   8603  */
   8604 static void
   8605 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   8606 {
   8607 	struct wm_softc *sc = device_private(self);
   8608 	uint32_t mdic = 0;
   8609 	int i;
   8610 
   8611 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8612 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8613 
   8614 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8615 		mdic = CSR_READ(sc, WMREG_MDIC);
   8616 		if (mdic & MDIC_READY)
   8617 			break;
   8618 		delay(50);
   8619 	}
   8620 
   8621 	if ((mdic & MDIC_READY) == 0)
   8622 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8623 		    device_xname(sc->sc_dev), phy, reg);
   8624 	else if (mdic & MDIC_E)
   8625 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8626 		    device_xname(sc->sc_dev), phy, reg);
   8627 }
   8628 
   8629 /*
   8630  * wm_gmii_i82544_readreg:	[mii interface function]
   8631  *
   8632  *	Read a PHY register on the GMII.
   8633  */
   8634 static int
   8635 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8636 {
   8637 	struct wm_softc *sc = device_private(self);
   8638 	int rv;
   8639 
   8640 	if (sc->phy.acquire(sc)) {
   8641 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8642 		    __func__);
   8643 		return 0;
   8644 	}
   8645 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8646 	sc->phy.release(sc);
   8647 
   8648 	return rv;
   8649 }
   8650 
   8651 /*
   8652  * wm_gmii_i82544_writereg:	[mii interface function]
   8653  *
   8654  *	Write a PHY register on the GMII.
   8655  */
   8656 static void
   8657 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8658 {
   8659 	struct wm_softc *sc = device_private(self);
   8660 
   8661 	if (sc->phy.acquire(sc)) {
   8662 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8663 		    __func__);
   8664 	}
   8665 	wm_gmii_mdic_writereg(self, phy, reg, val);
   8666 	sc->phy.release(sc);
   8667 }
   8668 
   8669 /*
   8670  * wm_gmii_i80003_readreg:	[mii interface function]
   8671  *
   8672  *	Read a PHY register on the kumeran
   8673  * This could be handled by the PHY layer if we didn't have to lock the
   8674  * ressource ...
   8675  */
   8676 static int
   8677 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8678 {
   8679 	struct wm_softc *sc = device_private(self);
   8680 	int rv;
   8681 
   8682 	if (phy != 1) /* only one PHY on kumeran bus */
   8683 		return 0;
   8684 
   8685 	if (sc->phy.acquire(sc)) {
   8686 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8687 		    __func__);
   8688 		return 0;
   8689 	}
   8690 
   8691 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8692 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8693 		    reg >> GG82563_PAGE_SHIFT);
   8694 	} else {
   8695 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8696 		    reg >> GG82563_PAGE_SHIFT);
   8697 	}
   8698 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8699 	delay(200);
   8700 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8701 	delay(200);
   8702 	sc->phy.release(sc);
   8703 
   8704 	return rv;
   8705 }
   8706 
   8707 /*
   8708  * wm_gmii_i80003_writereg:	[mii interface function]
   8709  *
   8710  *	Write a PHY register on the kumeran.
   8711  * This could be handled by the PHY layer if we didn't have to lock the
   8712  * ressource ...
   8713  */
   8714 static void
   8715 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8716 {
   8717 	struct wm_softc *sc = device_private(self);
   8718 
   8719 	if (phy != 1) /* only one PHY on kumeran bus */
   8720 		return;
   8721 
   8722 	if (sc->phy.acquire(sc)) {
   8723 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8724 		    __func__);
   8725 		return;
   8726 	}
   8727 
   8728 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8729 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8730 		    reg >> GG82563_PAGE_SHIFT);
   8731 	} else {
   8732 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8733 		    reg >> GG82563_PAGE_SHIFT);
   8734 	}
   8735 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8736 	delay(200);
   8737 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8738 	delay(200);
   8739 
   8740 	sc->phy.release(sc);
   8741 }
   8742 
   8743 /*
   8744  * wm_gmii_bm_readreg:	[mii interface function]
   8745  *
   8746  *	Read a PHY register on the kumeran
   8747  * This could be handled by the PHY layer if we didn't have to lock the
   8748  * ressource ...
   8749  */
   8750 static int
   8751 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8752 {
   8753 	struct wm_softc *sc = device_private(self);
   8754 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8755 	uint16_t val;
   8756 	int rv;
   8757 
   8758 	if (sc->phy.acquire(sc)) {
   8759 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8760 		    __func__);
   8761 		return 0;
   8762 	}
   8763 
   8764 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8765 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8766 		    || (reg == 31)) ? 1 : phy;
   8767 	/* Page 800 works differently than the rest so it has its own func */
   8768 	if (page == BM_WUC_PAGE) {
   8769 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8770 		rv = val;
   8771 		goto release;
   8772 	}
   8773 
   8774 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8775 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8776 		    && (sc->sc_type != WM_T_82583))
   8777 			wm_gmii_mdic_writereg(self, phy,
   8778 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8779 		else
   8780 			wm_gmii_mdic_writereg(self, phy,
   8781 			    BME1000_PHY_PAGE_SELECT, page);
   8782 	}
   8783 
   8784 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8785 
   8786 release:
   8787 	sc->phy.release(sc);
   8788 	return rv;
   8789 }
   8790 
   8791 /*
   8792  * wm_gmii_bm_writereg:	[mii interface function]
   8793  *
   8794  *	Write a PHY register on the kumeran.
   8795  * This could be handled by the PHY layer if we didn't have to lock the
   8796  * ressource ...
   8797  */
   8798 static void
   8799 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8800 {
   8801 	struct wm_softc *sc = device_private(self);
   8802 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8803 
   8804 	if (sc->phy.acquire(sc)) {
   8805 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8806 		    __func__);
   8807 		return;
   8808 	}
   8809 
   8810 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8811 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8812 		    || (reg == 31)) ? 1 : phy;
   8813 	/* Page 800 works differently than the rest so it has its own func */
   8814 	if (page == BM_WUC_PAGE) {
   8815 		uint16_t tmp;
   8816 
   8817 		tmp = val;
   8818 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8819 		goto release;
   8820 	}
   8821 
   8822 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8823 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8824 		    && (sc->sc_type != WM_T_82583))
   8825 			wm_gmii_mdic_writereg(self, phy,
   8826 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8827 		else
   8828 			wm_gmii_mdic_writereg(self, phy,
   8829 			    BME1000_PHY_PAGE_SELECT, page);
   8830 	}
   8831 
   8832 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8833 
   8834 release:
   8835 	sc->phy.release(sc);
   8836 }
   8837 
   8838 static void
   8839 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8840 {
   8841 	struct wm_softc *sc = device_private(self);
   8842 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8843 	uint16_t wuce, reg;
   8844 
   8845 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8846 		device_xname(sc->sc_dev), __func__));
   8847 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8848 	if (sc->sc_type == WM_T_PCH) {
   8849 		/* XXX e1000 driver do nothing... why? */
   8850 	}
   8851 
   8852 	/*
   8853 	 * 1) Enable PHY wakeup register first.
   8854 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   8855 	 */
   8856 
   8857 	/* Set page 769 */
   8858 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8859 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8860 
   8861 	/* Read WUCE and save it */
   8862 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   8863 
   8864 	reg = wuce | BM_WUC_ENABLE_BIT;
   8865 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   8866 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   8867 
   8868 	/* Select page 800 */
   8869 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8870 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8871 
   8872 	/*
   8873 	 * 2) Access PHY wakeup register.
   8874 	 * See e1000_access_phy_wakeup_reg_bm.
   8875 	 */
   8876 
   8877 	/* Write page 800 */
   8878 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8879 
   8880 	if (rd)
   8881 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8882 	else
   8883 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8884 
   8885 	/*
   8886 	 * 3) Disable PHY wakeup register.
   8887 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   8888 	 */
   8889 	/* Set page 769 */
   8890 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8891 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8892 
   8893 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8894 }
   8895 
   8896 /*
   8897  * wm_gmii_hv_readreg:	[mii interface function]
   8898  *
   8899  *	Read a PHY register on the kumeran
   8900  * This could be handled by the PHY layer if we didn't have to lock the
   8901  * ressource ...
   8902  */
   8903 static int
   8904 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8905 {
   8906 	struct wm_softc *sc = device_private(self);
   8907 	int rv;
   8908 
   8909 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8910 		device_xname(sc->sc_dev), __func__));
   8911 	if (sc->phy.acquire(sc)) {
   8912 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8913 		    __func__);
   8914 		return 0;
   8915 	}
   8916 
   8917 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   8918 	sc->phy.release(sc);
   8919 	return rv;
   8920 }
   8921 
   8922 static int
   8923 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   8924 {
   8925 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8926 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8927 	uint16_t val;
   8928 	int rv;
   8929 
   8930 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   8931 
   8932 	/* Page 800 works differently than the rest so it has its own func */
   8933 	if (page == BM_WUC_PAGE) {
   8934 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8935 		return val;
   8936 	}
   8937 
   8938 	/*
   8939 	 * Lower than page 768 works differently than the rest so it has its
   8940 	 * own func
   8941 	 */
   8942 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8943 		printf("gmii_hv_readreg!!!\n");
   8944 		return 0;
   8945 	}
   8946 
   8947 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8948 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8949 		    page << BME1000_PAGE_SHIFT);
   8950 	}
   8951 
   8952 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   8953 	return rv;
   8954 }
   8955 
   8956 /*
   8957  * wm_gmii_hv_writereg:	[mii interface function]
   8958  *
   8959  *	Write a PHY register on the kumeran.
   8960  * This could be handled by the PHY layer if we didn't have to lock the
   8961  * ressource ...
   8962  */
   8963 static void
   8964 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8965 {
   8966 	struct wm_softc *sc = device_private(self);
   8967 
   8968 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8969 		device_xname(sc->sc_dev), __func__));
   8970 
   8971 	if (sc->phy.acquire(sc)) {
   8972 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8973 		    __func__);
   8974 		return;
   8975 	}
   8976 
   8977 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   8978 	sc->phy.release(sc);
   8979 }
   8980 
   8981 static void
   8982 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   8983 {
   8984 	struct wm_softc *sc = device_private(self);
   8985 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8986 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8987 
   8988 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   8989 
   8990 	/* Page 800 works differently than the rest so it has its own func */
   8991 	if (page == BM_WUC_PAGE) {
   8992 		uint16_t tmp;
   8993 
   8994 		tmp = val;
   8995 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8996 		return;
   8997 	}
   8998 
   8999 	/*
   9000 	 * Lower than page 768 works differently than the rest so it has its
   9001 	 * own func
   9002 	 */
   9003 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9004 		printf("gmii_hv_writereg!!!\n");
   9005 		return;
   9006 	}
   9007 
   9008 	{
   9009 		/*
   9010 		 * XXX Workaround MDIO accesses being disabled after entering
   9011 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9012 		 * register is set)
   9013 		 */
   9014 		if (sc->sc_phytype == WMPHY_82578) {
   9015 			struct mii_softc *child;
   9016 
   9017 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9018 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9019 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9020 			    && ((val & (1 << 11)) != 0)) {
   9021 				printf("XXX need workaround\n");
   9022 			}
   9023 		}
   9024 
   9025 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9026 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9027 			    page << BME1000_PAGE_SHIFT);
   9028 		}
   9029 	}
   9030 
   9031 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9032 }
   9033 
   9034 /*
   9035  * wm_gmii_82580_readreg:	[mii interface function]
   9036  *
   9037  *	Read a PHY register on the 82580 and I350.
   9038  * This could be handled by the PHY layer if we didn't have to lock the
   9039  * ressource ...
   9040  */
   9041 static int
   9042 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9043 {
   9044 	struct wm_softc *sc = device_private(self);
   9045 	int rv;
   9046 
   9047 	if (sc->phy.acquire(sc) != 0) {
   9048 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9049 		    __func__);
   9050 		return 0;
   9051 	}
   9052 
   9053 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9054 
   9055 	sc->phy.release(sc);
   9056 	return rv;
   9057 }
   9058 
   9059 /*
   9060  * wm_gmii_82580_writereg:	[mii interface function]
   9061  *
   9062  *	Write a PHY register on the 82580 and I350.
   9063  * This could be handled by the PHY layer if we didn't have to lock the
   9064  * ressource ...
   9065  */
   9066 static void
   9067 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9068 {
   9069 	struct wm_softc *sc = device_private(self);
   9070 
   9071 	if (sc->phy.acquire(sc) != 0) {
   9072 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9073 		    __func__);
   9074 		return;
   9075 	}
   9076 
   9077 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9078 
   9079 	sc->phy.release(sc);
   9080 }
   9081 
   9082 /*
   9083  * wm_gmii_gs40g_readreg:	[mii interface function]
   9084  *
   9085  *	Read a PHY register on the I2100 and I211.
   9086  * This could be handled by the PHY layer if we didn't have to lock the
   9087  * ressource ...
   9088  */
   9089 static int
   9090 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9091 {
   9092 	struct wm_softc *sc = device_private(self);
   9093 	int page, offset;
   9094 	int rv;
   9095 
   9096 	/* Acquire semaphore */
   9097 	if (sc->phy.acquire(sc)) {
   9098 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9099 		    __func__);
   9100 		return 0;
   9101 	}
   9102 
   9103 	/* Page select */
   9104 	page = reg >> GS40G_PAGE_SHIFT;
   9105 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9106 
   9107 	/* Read reg */
   9108 	offset = reg & GS40G_OFFSET_MASK;
   9109 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9110 
   9111 	sc->phy.release(sc);
   9112 	return rv;
   9113 }
   9114 
   9115 /*
   9116  * wm_gmii_gs40g_writereg:	[mii interface function]
   9117  *
   9118  *	Write a PHY register on the I210 and I211.
   9119  * This could be handled by the PHY layer if we didn't have to lock the
   9120  * ressource ...
   9121  */
   9122 static void
   9123 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9124 {
   9125 	struct wm_softc *sc = device_private(self);
   9126 	int page, offset;
   9127 
   9128 	/* Acquire semaphore */
   9129 	if (sc->phy.acquire(sc)) {
   9130 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9131 		    __func__);
   9132 		return;
   9133 	}
   9134 
   9135 	/* Page select */
   9136 	page = reg >> GS40G_PAGE_SHIFT;
   9137 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9138 
   9139 	/* Write reg */
   9140 	offset = reg & GS40G_OFFSET_MASK;
   9141 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9142 
   9143 	/* Release semaphore */
   9144 	sc->phy.release(sc);
   9145 }
   9146 
   9147 /*
   9148  * wm_gmii_statchg:	[mii interface function]
   9149  *
   9150  *	Callback from MII layer when media changes.
   9151  */
   9152 static void
   9153 wm_gmii_statchg(struct ifnet *ifp)
   9154 {
   9155 	struct wm_softc *sc = ifp->if_softc;
   9156 	struct mii_data *mii = &sc->sc_mii;
   9157 
   9158 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9159 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9160 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9161 
   9162 	/*
   9163 	 * Get flow control negotiation result.
   9164 	 */
   9165 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9166 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9167 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9168 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9169 	}
   9170 
   9171 	if (sc->sc_flowflags & IFM_FLOW) {
   9172 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9173 			sc->sc_ctrl |= CTRL_TFCE;
   9174 			sc->sc_fcrtl |= FCRTL_XONE;
   9175 		}
   9176 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9177 			sc->sc_ctrl |= CTRL_RFCE;
   9178 	}
   9179 
   9180 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9181 		DPRINTF(WM_DEBUG_LINK,
   9182 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9183 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9184 	} else {
   9185 		DPRINTF(WM_DEBUG_LINK,
   9186 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9187 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9188 	}
   9189 
   9190 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9191 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9192 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9193 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9194 	if (sc->sc_type == WM_T_80003) {
   9195 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9196 		case IFM_1000_T:
   9197 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9198 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9199 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9200 			break;
   9201 		default:
   9202 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9203 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9204 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9205 			break;
   9206 		}
   9207 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9208 	}
   9209 }
   9210 
   9211 /*
   9212  * wm_kmrn_readreg:
   9213  *
   9214  *	Read a kumeran register
   9215  */
   9216 static int
   9217 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9218 {
   9219 	int rv;
   9220 
   9221 	if (sc->sc_type == WM_T_80003)
   9222 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9223 	else
   9224 		rv = sc->phy.acquire(sc);
   9225 	if (rv != 0) {
   9226 		aprint_error_dev(sc->sc_dev,
   9227 		    "%s: failed to get semaphore\n", __func__);
   9228 		return 0;
   9229 	}
   9230 
   9231 	rv = wm_kmrn_readreg_locked(sc, reg);
   9232 
   9233 	if (sc->sc_type == WM_T_80003)
   9234 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9235 	else
   9236 		sc->phy.release(sc);
   9237 
   9238 	return rv;
   9239 }
   9240 
   9241 static int
   9242 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9243 {
   9244 	int rv;
   9245 
   9246 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9247 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9248 	    KUMCTRLSTA_REN);
   9249 	CSR_WRITE_FLUSH(sc);
   9250 	delay(2);
   9251 
   9252 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9253 
   9254 	return rv;
   9255 }
   9256 
   9257 /*
   9258  * wm_kmrn_writereg:
   9259  *
   9260  *	Write a kumeran register
   9261  */
   9262 static void
   9263 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9264 {
   9265 	int rv;
   9266 
   9267 	if (sc->sc_type == WM_T_80003)
   9268 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9269 	else
   9270 		rv = sc->phy.acquire(sc);
   9271 	if (rv != 0) {
   9272 		aprint_error_dev(sc->sc_dev,
   9273 		    "%s: failed to get semaphore\n", __func__);
   9274 		return;
   9275 	}
   9276 
   9277 	wm_kmrn_writereg_locked(sc, reg, val);
   9278 
   9279 	if (sc->sc_type == WM_T_80003)
   9280 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9281 	else
   9282 		sc->phy.release(sc);
   9283 }
   9284 
   9285 static void
   9286 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9287 {
   9288 
   9289 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9290 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9291 	    (val & KUMCTRLSTA_MASK));
   9292 }
   9293 
   9294 /* SGMII related */
   9295 
   9296 /*
   9297  * wm_sgmii_uses_mdio
   9298  *
   9299  * Check whether the transaction is to the internal PHY or the external
   9300  * MDIO interface. Return true if it's MDIO.
   9301  */
   9302 static bool
   9303 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9304 {
   9305 	uint32_t reg;
   9306 	bool ismdio = false;
   9307 
   9308 	switch (sc->sc_type) {
   9309 	case WM_T_82575:
   9310 	case WM_T_82576:
   9311 		reg = CSR_READ(sc, WMREG_MDIC);
   9312 		ismdio = ((reg & MDIC_DEST) != 0);
   9313 		break;
   9314 	case WM_T_82580:
   9315 	case WM_T_I350:
   9316 	case WM_T_I354:
   9317 	case WM_T_I210:
   9318 	case WM_T_I211:
   9319 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9320 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9321 		break;
   9322 	default:
   9323 		break;
   9324 	}
   9325 
   9326 	return ismdio;
   9327 }
   9328 
   9329 /*
   9330  * wm_sgmii_readreg:	[mii interface function]
   9331  *
   9332  *	Read a PHY register on the SGMII
   9333  * This could be handled by the PHY layer if we didn't have to lock the
   9334  * ressource ...
   9335  */
   9336 static int
   9337 wm_sgmii_readreg(device_t self, int phy, int reg)
   9338 {
   9339 	struct wm_softc *sc = device_private(self);
   9340 	uint32_t i2ccmd;
   9341 	int i, rv;
   9342 
   9343 	if (sc->phy.acquire(sc)) {
   9344 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9345 		    __func__);
   9346 		return 0;
   9347 	}
   9348 
   9349 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9350 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9351 	    | I2CCMD_OPCODE_READ;
   9352 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9353 
   9354 	/* Poll the ready bit */
   9355 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9356 		delay(50);
   9357 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9358 		if (i2ccmd & I2CCMD_READY)
   9359 			break;
   9360 	}
   9361 	if ((i2ccmd & I2CCMD_READY) == 0)
   9362 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9363 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9364 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9365 
   9366 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9367 
   9368 	sc->phy.release(sc);
   9369 	return rv;
   9370 }
   9371 
   9372 /*
   9373  * wm_sgmii_writereg:	[mii interface function]
   9374  *
   9375  *	Write a PHY register on the SGMII.
   9376  * This could be handled by the PHY layer if we didn't have to lock the
   9377  * ressource ...
   9378  */
   9379 static void
   9380 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9381 {
   9382 	struct wm_softc *sc = device_private(self);
   9383 	uint32_t i2ccmd;
   9384 	int i;
   9385 	int val_swapped;
   9386 
   9387 	if (sc->phy.acquire(sc) != 0) {
   9388 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9389 		    __func__);
   9390 		return;
   9391 	}
   9392 	/* Swap the data bytes for the I2C interface */
   9393 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9394 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9395 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9396 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9397 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9398 
   9399 	/* Poll the ready bit */
   9400 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9401 		delay(50);
   9402 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9403 		if (i2ccmd & I2CCMD_READY)
   9404 			break;
   9405 	}
   9406 	if ((i2ccmd & I2CCMD_READY) == 0)
   9407 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9408 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9409 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9410 
   9411 	sc->phy.release(sc);
   9412 }
   9413 
   9414 /* TBI related */
   9415 
   9416 /*
   9417  * wm_tbi_mediainit:
   9418  *
   9419  *	Initialize media for use on 1000BASE-X devices.
   9420  */
   9421 static void
   9422 wm_tbi_mediainit(struct wm_softc *sc)
   9423 {
   9424 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9425 	const char *sep = "";
   9426 
   9427 	if (sc->sc_type < WM_T_82543)
   9428 		sc->sc_tipg = TIPG_WM_DFLT;
   9429 	else
   9430 		sc->sc_tipg = TIPG_LG_DFLT;
   9431 
   9432 	sc->sc_tbi_serdes_anegticks = 5;
   9433 
   9434 	/* Initialize our media structures */
   9435 	sc->sc_mii.mii_ifp = ifp;
   9436 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9437 
   9438 	if ((sc->sc_type >= WM_T_82575)
   9439 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9440 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9441 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9442 	else
   9443 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9444 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9445 
   9446 	/*
   9447 	 * SWD Pins:
   9448 	 *
   9449 	 *	0 = Link LED (output)
   9450 	 *	1 = Loss Of Signal (input)
   9451 	 */
   9452 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9453 
   9454 	/* XXX Perhaps this is only for TBI */
   9455 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9456 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9457 
   9458 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9459 		sc->sc_ctrl &= ~CTRL_LRST;
   9460 
   9461 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9462 
   9463 #define	ADD(ss, mm, dd)							\
   9464 do {									\
   9465 	aprint_normal("%s%s", sep, ss);					\
   9466 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9467 	sep = ", ";							\
   9468 } while (/*CONSTCOND*/0)
   9469 
   9470 	aprint_normal_dev(sc->sc_dev, "");
   9471 
   9472 	/* Only 82545 is LX */
   9473 	if (sc->sc_type == WM_T_82545) {
   9474 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9475 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9476 	} else {
   9477 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9478 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9479 	}
   9480 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9481 	aprint_normal("\n");
   9482 
   9483 #undef ADD
   9484 
   9485 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9486 }
   9487 
   9488 /*
   9489  * wm_tbi_mediachange:	[ifmedia interface function]
   9490  *
   9491  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9492  */
   9493 static int
   9494 wm_tbi_mediachange(struct ifnet *ifp)
   9495 {
   9496 	struct wm_softc *sc = ifp->if_softc;
   9497 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9498 	uint32_t status;
   9499 	int i;
   9500 
   9501 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9502 		/* XXX need some work for >= 82571 and < 82575 */
   9503 		if (sc->sc_type < WM_T_82575)
   9504 			return 0;
   9505 	}
   9506 
   9507 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9508 	    || (sc->sc_type >= WM_T_82575))
   9509 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9510 
   9511 	sc->sc_ctrl &= ~CTRL_LRST;
   9512 	sc->sc_txcw = TXCW_ANE;
   9513 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9514 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9515 	else if (ife->ifm_media & IFM_FDX)
   9516 		sc->sc_txcw |= TXCW_FD;
   9517 	else
   9518 		sc->sc_txcw |= TXCW_HD;
   9519 
   9520 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9521 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9522 
   9523 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9524 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9525 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9526 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9527 	CSR_WRITE_FLUSH(sc);
   9528 	delay(1000);
   9529 
   9530 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9531 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9532 
   9533 	/*
   9534 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9535 	 * optics detect a signal, 0 if they don't.
   9536 	 */
   9537 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9538 		/* Have signal; wait for the link to come up. */
   9539 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9540 			delay(10000);
   9541 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9542 				break;
   9543 		}
   9544 
   9545 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9546 			    device_xname(sc->sc_dev),i));
   9547 
   9548 		status = CSR_READ(sc, WMREG_STATUS);
   9549 		DPRINTF(WM_DEBUG_LINK,
   9550 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9551 			device_xname(sc->sc_dev),status, STATUS_LU));
   9552 		if (status & STATUS_LU) {
   9553 			/* Link is up. */
   9554 			DPRINTF(WM_DEBUG_LINK,
   9555 			    ("%s: LINK: set media -> link up %s\n",
   9556 			    device_xname(sc->sc_dev),
   9557 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9558 
   9559 			/*
   9560 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9561 			 * so we should update sc->sc_ctrl
   9562 			 */
   9563 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9564 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9565 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9566 			if (status & STATUS_FD)
   9567 				sc->sc_tctl |=
   9568 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9569 			else
   9570 				sc->sc_tctl |=
   9571 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9572 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9573 				sc->sc_fcrtl |= FCRTL_XONE;
   9574 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9575 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9576 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9577 				      sc->sc_fcrtl);
   9578 			sc->sc_tbi_linkup = 1;
   9579 		} else {
   9580 			if (i == WM_LINKUP_TIMEOUT)
   9581 				wm_check_for_link(sc);
   9582 			/* Link is down. */
   9583 			DPRINTF(WM_DEBUG_LINK,
   9584 			    ("%s: LINK: set media -> link down\n",
   9585 			    device_xname(sc->sc_dev)));
   9586 			sc->sc_tbi_linkup = 0;
   9587 		}
   9588 	} else {
   9589 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9590 		    device_xname(sc->sc_dev)));
   9591 		sc->sc_tbi_linkup = 0;
   9592 	}
   9593 
   9594 	wm_tbi_serdes_set_linkled(sc);
   9595 
   9596 	return 0;
   9597 }
   9598 
   9599 /*
   9600  * wm_tbi_mediastatus:	[ifmedia interface function]
   9601  *
   9602  *	Get the current interface media status on a 1000BASE-X device.
   9603  */
   9604 static void
   9605 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9606 {
   9607 	struct wm_softc *sc = ifp->if_softc;
   9608 	uint32_t ctrl, status;
   9609 
   9610 	ifmr->ifm_status = IFM_AVALID;
   9611 	ifmr->ifm_active = IFM_ETHER;
   9612 
   9613 	status = CSR_READ(sc, WMREG_STATUS);
   9614 	if ((status & STATUS_LU) == 0) {
   9615 		ifmr->ifm_active |= IFM_NONE;
   9616 		return;
   9617 	}
   9618 
   9619 	ifmr->ifm_status |= IFM_ACTIVE;
   9620 	/* Only 82545 is LX */
   9621 	if (sc->sc_type == WM_T_82545)
   9622 		ifmr->ifm_active |= IFM_1000_LX;
   9623 	else
   9624 		ifmr->ifm_active |= IFM_1000_SX;
   9625 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9626 		ifmr->ifm_active |= IFM_FDX;
   9627 	else
   9628 		ifmr->ifm_active |= IFM_HDX;
   9629 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9630 	if (ctrl & CTRL_RFCE)
   9631 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9632 	if (ctrl & CTRL_TFCE)
   9633 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9634 }
   9635 
   9636 /* XXX TBI only */
   9637 static int
   9638 wm_check_for_link(struct wm_softc *sc)
   9639 {
   9640 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9641 	uint32_t rxcw;
   9642 	uint32_t ctrl;
   9643 	uint32_t status;
   9644 	uint32_t sig;
   9645 
   9646 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9647 		/* XXX need some work for >= 82571 */
   9648 		if (sc->sc_type >= WM_T_82571) {
   9649 			sc->sc_tbi_linkup = 1;
   9650 			return 0;
   9651 		}
   9652 	}
   9653 
   9654 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9655 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9656 	status = CSR_READ(sc, WMREG_STATUS);
   9657 
   9658 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9659 
   9660 	DPRINTF(WM_DEBUG_LINK,
   9661 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9662 		device_xname(sc->sc_dev), __func__,
   9663 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9664 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9665 
   9666 	/*
   9667 	 * SWDPIN   LU RXCW
   9668 	 *      0    0    0
   9669 	 *      0    0    1	(should not happen)
   9670 	 *      0    1    0	(should not happen)
   9671 	 *      0    1    1	(should not happen)
   9672 	 *      1    0    0	Disable autonego and force linkup
   9673 	 *      1    0    1	got /C/ but not linkup yet
   9674 	 *      1    1    0	(linkup)
   9675 	 *      1    1    1	If IFM_AUTO, back to autonego
   9676 	 *
   9677 	 */
   9678 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9679 	    && ((status & STATUS_LU) == 0)
   9680 	    && ((rxcw & RXCW_C) == 0)) {
   9681 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9682 			__func__));
   9683 		sc->sc_tbi_linkup = 0;
   9684 		/* Disable auto-negotiation in the TXCW register */
   9685 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9686 
   9687 		/*
   9688 		 * Force link-up and also force full-duplex.
   9689 		 *
   9690 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9691 		 * so we should update sc->sc_ctrl
   9692 		 */
   9693 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9694 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9695 	} else if (((status & STATUS_LU) != 0)
   9696 	    && ((rxcw & RXCW_C) != 0)
   9697 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9698 		sc->sc_tbi_linkup = 1;
   9699 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9700 			__func__));
   9701 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9702 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9703 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9704 	    && ((rxcw & RXCW_C) != 0)) {
   9705 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9706 	} else {
   9707 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9708 			status));
   9709 	}
   9710 
   9711 	return 0;
   9712 }
   9713 
   9714 /*
   9715  * wm_tbi_tick:
   9716  *
   9717  *	Check the link on TBI devices.
   9718  *	This function acts as mii_tick().
   9719  */
   9720 static void
   9721 wm_tbi_tick(struct wm_softc *sc)
   9722 {
   9723 	struct mii_data *mii = &sc->sc_mii;
   9724 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9725 	uint32_t status;
   9726 
   9727 	KASSERT(WM_CORE_LOCKED(sc));
   9728 
   9729 	status = CSR_READ(sc, WMREG_STATUS);
   9730 
   9731 	/* XXX is this needed? */
   9732 	(void)CSR_READ(sc, WMREG_RXCW);
   9733 	(void)CSR_READ(sc, WMREG_CTRL);
   9734 
   9735 	/* set link status */
   9736 	if ((status & STATUS_LU) == 0) {
   9737 		DPRINTF(WM_DEBUG_LINK,
   9738 		    ("%s: LINK: checklink -> down\n",
   9739 			device_xname(sc->sc_dev)));
   9740 		sc->sc_tbi_linkup = 0;
   9741 	} else if (sc->sc_tbi_linkup == 0) {
   9742 		DPRINTF(WM_DEBUG_LINK,
   9743 		    ("%s: LINK: checklink -> up %s\n",
   9744 			device_xname(sc->sc_dev),
   9745 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9746 		sc->sc_tbi_linkup = 1;
   9747 		sc->sc_tbi_serdes_ticks = 0;
   9748 	}
   9749 
   9750 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9751 		goto setled;
   9752 
   9753 	if ((status & STATUS_LU) == 0) {
   9754 		sc->sc_tbi_linkup = 0;
   9755 		/* If the timer expired, retry autonegotiation */
   9756 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9757 		    && (++sc->sc_tbi_serdes_ticks
   9758 			>= sc->sc_tbi_serdes_anegticks)) {
   9759 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9760 			sc->sc_tbi_serdes_ticks = 0;
   9761 			/*
   9762 			 * Reset the link, and let autonegotiation do
   9763 			 * its thing
   9764 			 */
   9765 			sc->sc_ctrl |= CTRL_LRST;
   9766 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9767 			CSR_WRITE_FLUSH(sc);
   9768 			delay(1000);
   9769 			sc->sc_ctrl &= ~CTRL_LRST;
   9770 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9771 			CSR_WRITE_FLUSH(sc);
   9772 			delay(1000);
   9773 			CSR_WRITE(sc, WMREG_TXCW,
   9774 			    sc->sc_txcw & ~TXCW_ANE);
   9775 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9776 		}
   9777 	}
   9778 
   9779 setled:
   9780 	wm_tbi_serdes_set_linkled(sc);
   9781 }
   9782 
   9783 /* SERDES related */
   9784 static void
   9785 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9786 {
   9787 	uint32_t reg;
   9788 
   9789 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9790 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9791 		return;
   9792 
   9793 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9794 	reg |= PCS_CFG_PCS_EN;
   9795 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9796 
   9797 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9798 	reg &= ~CTRL_EXT_SWDPIN(3);
   9799 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9800 	CSR_WRITE_FLUSH(sc);
   9801 }
   9802 
   9803 static int
   9804 wm_serdes_mediachange(struct ifnet *ifp)
   9805 {
   9806 	struct wm_softc *sc = ifp->if_softc;
   9807 	bool pcs_autoneg = true; /* XXX */
   9808 	uint32_t ctrl_ext, pcs_lctl, reg;
   9809 
   9810 	/* XXX Currently, this function is not called on 8257[12] */
   9811 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9812 	    || (sc->sc_type >= WM_T_82575))
   9813 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9814 
   9815 	wm_serdes_power_up_link_82575(sc);
   9816 
   9817 	sc->sc_ctrl |= CTRL_SLU;
   9818 
   9819 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9820 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9821 
   9822 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9823 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9824 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9825 	case CTRL_EXT_LINK_MODE_SGMII:
   9826 		pcs_autoneg = true;
   9827 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9828 		break;
   9829 	case CTRL_EXT_LINK_MODE_1000KX:
   9830 		pcs_autoneg = false;
   9831 		/* FALLTHROUGH */
   9832 	default:
   9833 		if ((sc->sc_type == WM_T_82575)
   9834 		    || (sc->sc_type == WM_T_82576)) {
   9835 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9836 				pcs_autoneg = false;
   9837 		}
   9838 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9839 		    | CTRL_FRCFDX;
   9840 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9841 	}
   9842 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9843 
   9844 	if (pcs_autoneg) {
   9845 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9846 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9847 
   9848 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9849 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9850 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9851 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9852 	} else
   9853 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9854 
   9855 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9856 
   9857 
   9858 	return 0;
   9859 }
   9860 
   9861 static void
   9862 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9863 {
   9864 	struct wm_softc *sc = ifp->if_softc;
   9865 	struct mii_data *mii = &sc->sc_mii;
   9866 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9867 	uint32_t pcs_adv, pcs_lpab, reg;
   9868 
   9869 	ifmr->ifm_status = IFM_AVALID;
   9870 	ifmr->ifm_active = IFM_ETHER;
   9871 
   9872 	/* Check PCS */
   9873 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9874 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9875 		ifmr->ifm_active |= IFM_NONE;
   9876 		sc->sc_tbi_linkup = 0;
   9877 		goto setled;
   9878 	}
   9879 
   9880 	sc->sc_tbi_linkup = 1;
   9881 	ifmr->ifm_status |= IFM_ACTIVE;
   9882 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9883 	if ((reg & PCS_LSTS_FDX) != 0)
   9884 		ifmr->ifm_active |= IFM_FDX;
   9885 	else
   9886 		ifmr->ifm_active |= IFM_HDX;
   9887 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9888 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9889 		/* Check flow */
   9890 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9891 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9892 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9893 			goto setled;
   9894 		}
   9895 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9896 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9897 		DPRINTF(WM_DEBUG_LINK,
   9898 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9899 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9900 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9901 			mii->mii_media_active |= IFM_FLOW
   9902 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9903 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9904 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9905 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9906 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9907 			mii->mii_media_active |= IFM_FLOW
   9908 			    | IFM_ETH_TXPAUSE;
   9909 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9910 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9911 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9912 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9913 			mii->mii_media_active |= IFM_FLOW
   9914 			    | IFM_ETH_RXPAUSE;
   9915 		} else {
   9916 		}
   9917 	}
   9918 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9919 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9920 setled:
   9921 	wm_tbi_serdes_set_linkled(sc);
   9922 }
   9923 
   9924 /*
   9925  * wm_serdes_tick:
   9926  *
   9927  *	Check the link on serdes devices.
   9928  */
   9929 static void
   9930 wm_serdes_tick(struct wm_softc *sc)
   9931 {
   9932 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9933 	struct mii_data *mii = &sc->sc_mii;
   9934 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9935 	uint32_t reg;
   9936 
   9937 	KASSERT(WM_CORE_LOCKED(sc));
   9938 
   9939 	mii->mii_media_status = IFM_AVALID;
   9940 	mii->mii_media_active = IFM_ETHER;
   9941 
   9942 	/* Check PCS */
   9943 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9944 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9945 		mii->mii_media_status |= IFM_ACTIVE;
   9946 		sc->sc_tbi_linkup = 1;
   9947 		sc->sc_tbi_serdes_ticks = 0;
   9948 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9949 		if ((reg & PCS_LSTS_FDX) != 0)
   9950 			mii->mii_media_active |= IFM_FDX;
   9951 		else
   9952 			mii->mii_media_active |= IFM_HDX;
   9953 	} else {
   9954 		mii->mii_media_status |= IFM_NONE;
   9955 		sc->sc_tbi_linkup = 0;
   9956 		    /* If the timer expired, retry autonegotiation */
   9957 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9958 		    && (++sc->sc_tbi_serdes_ticks
   9959 			>= sc->sc_tbi_serdes_anegticks)) {
   9960 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9961 			sc->sc_tbi_serdes_ticks = 0;
   9962 			/* XXX */
   9963 			wm_serdes_mediachange(ifp);
   9964 		}
   9965 	}
   9966 
   9967 	wm_tbi_serdes_set_linkled(sc);
   9968 }
   9969 
   9970 /* SFP related */
   9971 
   9972 static int
   9973 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9974 {
   9975 	uint32_t i2ccmd;
   9976 	int i;
   9977 
   9978 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9979 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9980 
   9981 	/* Poll the ready bit */
   9982 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9983 		delay(50);
   9984 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9985 		if (i2ccmd & I2CCMD_READY)
   9986 			break;
   9987 	}
   9988 	if ((i2ccmd & I2CCMD_READY) == 0)
   9989 		return -1;
   9990 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9991 		return -1;
   9992 
   9993 	*data = i2ccmd & 0x00ff;
   9994 
   9995 	return 0;
   9996 }
   9997 
   9998 static uint32_t
   9999 wm_sfp_get_media_type(struct wm_softc *sc)
   10000 {
   10001 	uint32_t ctrl_ext;
   10002 	uint8_t val = 0;
   10003 	int timeout = 3;
   10004 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10005 	int rv = -1;
   10006 
   10007 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10008 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10009 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10010 	CSR_WRITE_FLUSH(sc);
   10011 
   10012 	/* Read SFP module data */
   10013 	while (timeout) {
   10014 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10015 		if (rv == 0)
   10016 			break;
   10017 		delay(100*1000); /* XXX too big */
   10018 		timeout--;
   10019 	}
   10020 	if (rv != 0)
   10021 		goto out;
   10022 	switch (val) {
   10023 	case SFF_SFP_ID_SFF:
   10024 		aprint_normal_dev(sc->sc_dev,
   10025 		    "Module/Connector soldered to board\n");
   10026 		break;
   10027 	case SFF_SFP_ID_SFP:
   10028 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10029 		break;
   10030 	case SFF_SFP_ID_UNKNOWN:
   10031 		goto out;
   10032 	default:
   10033 		break;
   10034 	}
   10035 
   10036 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10037 	if (rv != 0) {
   10038 		goto out;
   10039 	}
   10040 
   10041 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10042 		mediatype = WM_MEDIATYPE_SERDES;
   10043 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10044 		sc->sc_flags |= WM_F_SGMII;
   10045 		mediatype = WM_MEDIATYPE_COPPER;
   10046 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10047 		sc->sc_flags |= WM_F_SGMII;
   10048 		mediatype = WM_MEDIATYPE_SERDES;
   10049 	}
   10050 
   10051 out:
   10052 	/* Restore I2C interface setting */
   10053 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10054 
   10055 	return mediatype;
   10056 }
   10057 /*
   10058  * NVM related.
   10059  * Microwire, SPI (w/wo EERD) and Flash.
   10060  */
   10061 
   10062 /* Both spi and uwire */
   10063 
   10064 /*
   10065  * wm_eeprom_sendbits:
   10066  *
   10067  *	Send a series of bits to the EEPROM.
   10068  */
   10069 static void
   10070 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10071 {
   10072 	uint32_t reg;
   10073 	int x;
   10074 
   10075 	reg = CSR_READ(sc, WMREG_EECD);
   10076 
   10077 	for (x = nbits; x > 0; x--) {
   10078 		if (bits & (1U << (x - 1)))
   10079 			reg |= EECD_DI;
   10080 		else
   10081 			reg &= ~EECD_DI;
   10082 		CSR_WRITE(sc, WMREG_EECD, reg);
   10083 		CSR_WRITE_FLUSH(sc);
   10084 		delay(2);
   10085 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10086 		CSR_WRITE_FLUSH(sc);
   10087 		delay(2);
   10088 		CSR_WRITE(sc, WMREG_EECD, reg);
   10089 		CSR_WRITE_FLUSH(sc);
   10090 		delay(2);
   10091 	}
   10092 }
   10093 
   10094 /*
   10095  * wm_eeprom_recvbits:
   10096  *
   10097  *	Receive a series of bits from the EEPROM.
   10098  */
   10099 static void
   10100 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10101 {
   10102 	uint32_t reg, val;
   10103 	int x;
   10104 
   10105 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10106 
   10107 	val = 0;
   10108 	for (x = nbits; x > 0; x--) {
   10109 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10110 		CSR_WRITE_FLUSH(sc);
   10111 		delay(2);
   10112 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10113 			val |= (1U << (x - 1));
   10114 		CSR_WRITE(sc, WMREG_EECD, reg);
   10115 		CSR_WRITE_FLUSH(sc);
   10116 		delay(2);
   10117 	}
   10118 	*valp = val;
   10119 }
   10120 
   10121 /* Microwire */
   10122 
   10123 /*
   10124  * wm_nvm_read_uwire:
   10125  *
   10126  *	Read a word from the EEPROM using the MicroWire protocol.
   10127  */
   10128 static int
   10129 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10130 {
   10131 	uint32_t reg, val;
   10132 	int i;
   10133 
   10134 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10135 		device_xname(sc->sc_dev), __func__));
   10136 
   10137 	for (i = 0; i < wordcnt; i++) {
   10138 		/* Clear SK and DI. */
   10139 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10140 		CSR_WRITE(sc, WMREG_EECD, reg);
   10141 
   10142 		/*
   10143 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10144 		 * and Xen.
   10145 		 *
   10146 		 * We use this workaround only for 82540 because qemu's
   10147 		 * e1000 act as 82540.
   10148 		 */
   10149 		if (sc->sc_type == WM_T_82540) {
   10150 			reg |= EECD_SK;
   10151 			CSR_WRITE(sc, WMREG_EECD, reg);
   10152 			reg &= ~EECD_SK;
   10153 			CSR_WRITE(sc, WMREG_EECD, reg);
   10154 			CSR_WRITE_FLUSH(sc);
   10155 			delay(2);
   10156 		}
   10157 		/* XXX: end of workaround */
   10158 
   10159 		/* Set CHIP SELECT. */
   10160 		reg |= EECD_CS;
   10161 		CSR_WRITE(sc, WMREG_EECD, reg);
   10162 		CSR_WRITE_FLUSH(sc);
   10163 		delay(2);
   10164 
   10165 		/* Shift in the READ command. */
   10166 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10167 
   10168 		/* Shift in address. */
   10169 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10170 
   10171 		/* Shift out the data. */
   10172 		wm_eeprom_recvbits(sc, &val, 16);
   10173 		data[i] = val & 0xffff;
   10174 
   10175 		/* Clear CHIP SELECT. */
   10176 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10177 		CSR_WRITE(sc, WMREG_EECD, reg);
   10178 		CSR_WRITE_FLUSH(sc);
   10179 		delay(2);
   10180 	}
   10181 
   10182 	return 0;
   10183 }
   10184 
   10185 /* SPI */
   10186 
   10187 /*
   10188  * Set SPI and FLASH related information from the EECD register.
   10189  * For 82541 and 82547, the word size is taken from EEPROM.
   10190  */
   10191 static int
   10192 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10193 {
   10194 	int size;
   10195 	uint32_t reg;
   10196 	uint16_t data;
   10197 
   10198 	reg = CSR_READ(sc, WMREG_EECD);
   10199 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10200 
   10201 	/* Read the size of NVM from EECD by default */
   10202 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10203 	switch (sc->sc_type) {
   10204 	case WM_T_82541:
   10205 	case WM_T_82541_2:
   10206 	case WM_T_82547:
   10207 	case WM_T_82547_2:
   10208 		/* Set dummy value to access EEPROM */
   10209 		sc->sc_nvm_wordsize = 64;
   10210 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10211 		reg = data;
   10212 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10213 		if (size == 0)
   10214 			size = 6; /* 64 word size */
   10215 		else
   10216 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10217 		break;
   10218 	case WM_T_80003:
   10219 	case WM_T_82571:
   10220 	case WM_T_82572:
   10221 	case WM_T_82573: /* SPI case */
   10222 	case WM_T_82574: /* SPI case */
   10223 	case WM_T_82583: /* SPI case */
   10224 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10225 		if (size > 14)
   10226 			size = 14;
   10227 		break;
   10228 	case WM_T_82575:
   10229 	case WM_T_82576:
   10230 	case WM_T_82580:
   10231 	case WM_T_I350:
   10232 	case WM_T_I354:
   10233 	case WM_T_I210:
   10234 	case WM_T_I211:
   10235 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10236 		if (size > 15)
   10237 			size = 15;
   10238 		break;
   10239 	default:
   10240 		aprint_error_dev(sc->sc_dev,
   10241 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10242 		return -1;
   10243 		break;
   10244 	}
   10245 
   10246 	sc->sc_nvm_wordsize = 1 << size;
   10247 
   10248 	return 0;
   10249 }
   10250 
   10251 /*
   10252  * wm_nvm_ready_spi:
   10253  *
   10254  *	Wait for a SPI EEPROM to be ready for commands.
   10255  */
   10256 static int
   10257 wm_nvm_ready_spi(struct wm_softc *sc)
   10258 {
   10259 	uint32_t val;
   10260 	int usec;
   10261 
   10262 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10263 		device_xname(sc->sc_dev), __func__));
   10264 
   10265 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10266 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10267 		wm_eeprom_recvbits(sc, &val, 8);
   10268 		if ((val & SPI_SR_RDY) == 0)
   10269 			break;
   10270 	}
   10271 	if (usec >= SPI_MAX_RETRIES) {
   10272 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10273 		return 1;
   10274 	}
   10275 	return 0;
   10276 }
   10277 
   10278 /*
   10279  * wm_nvm_read_spi:
   10280  *
   10281  *	Read a work from the EEPROM using the SPI protocol.
   10282  */
   10283 static int
   10284 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10285 {
   10286 	uint32_t reg, val;
   10287 	int i;
   10288 	uint8_t opc;
   10289 
   10290 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10291 		device_xname(sc->sc_dev), __func__));
   10292 
   10293 	/* Clear SK and CS. */
   10294 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10295 	CSR_WRITE(sc, WMREG_EECD, reg);
   10296 	CSR_WRITE_FLUSH(sc);
   10297 	delay(2);
   10298 
   10299 	if (wm_nvm_ready_spi(sc))
   10300 		return 1;
   10301 
   10302 	/* Toggle CS to flush commands. */
   10303 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10304 	CSR_WRITE_FLUSH(sc);
   10305 	delay(2);
   10306 	CSR_WRITE(sc, WMREG_EECD, reg);
   10307 	CSR_WRITE_FLUSH(sc);
   10308 	delay(2);
   10309 
   10310 	opc = SPI_OPC_READ;
   10311 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10312 		opc |= SPI_OPC_A8;
   10313 
   10314 	wm_eeprom_sendbits(sc, opc, 8);
   10315 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10316 
   10317 	for (i = 0; i < wordcnt; i++) {
   10318 		wm_eeprom_recvbits(sc, &val, 16);
   10319 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10320 	}
   10321 
   10322 	/* Raise CS and clear SK. */
   10323 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10324 	CSR_WRITE(sc, WMREG_EECD, reg);
   10325 	CSR_WRITE_FLUSH(sc);
   10326 	delay(2);
   10327 
   10328 	return 0;
   10329 }
   10330 
   10331 /* Using with EERD */
   10332 
   10333 static int
   10334 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10335 {
   10336 	uint32_t attempts = 100000;
   10337 	uint32_t i, reg = 0;
   10338 	int32_t done = -1;
   10339 
   10340 	for (i = 0; i < attempts; i++) {
   10341 		reg = CSR_READ(sc, rw);
   10342 
   10343 		if (reg & EERD_DONE) {
   10344 			done = 0;
   10345 			break;
   10346 		}
   10347 		delay(5);
   10348 	}
   10349 
   10350 	return done;
   10351 }
   10352 
   10353 static int
   10354 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10355     uint16_t *data)
   10356 {
   10357 	int i, eerd = 0;
   10358 	int error = 0;
   10359 
   10360 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10361 		device_xname(sc->sc_dev), __func__));
   10362 
   10363 	for (i = 0; i < wordcnt; i++) {
   10364 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10365 
   10366 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10367 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10368 		if (error != 0)
   10369 			break;
   10370 
   10371 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10372 	}
   10373 
   10374 	return error;
   10375 }
   10376 
   10377 /* Flash */
   10378 
   10379 static int
   10380 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10381 {
   10382 	uint32_t eecd;
   10383 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10384 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10385 	uint8_t sig_byte = 0;
   10386 
   10387 	switch (sc->sc_type) {
   10388 	case WM_T_PCH_SPT:
   10389 		/*
   10390 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10391 		 * sector valid bits from the NVM.
   10392 		 */
   10393 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10394 		if ((*bank == 0) || (*bank == 1)) {
   10395 			aprint_error_dev(sc->sc_dev,
   10396 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10397 				*bank);
   10398 			return -1;
   10399 		} else {
   10400 			*bank = *bank - 2;
   10401 			return 0;
   10402 		}
   10403 	case WM_T_ICH8:
   10404 	case WM_T_ICH9:
   10405 		eecd = CSR_READ(sc, WMREG_EECD);
   10406 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10407 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10408 			return 0;
   10409 		}
   10410 		/* FALLTHROUGH */
   10411 	default:
   10412 		/* Default to 0 */
   10413 		*bank = 0;
   10414 
   10415 		/* Check bank 0 */
   10416 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10417 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10418 			*bank = 0;
   10419 			return 0;
   10420 		}
   10421 
   10422 		/* Check bank 1 */
   10423 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10424 		    &sig_byte);
   10425 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10426 			*bank = 1;
   10427 			return 0;
   10428 		}
   10429 	}
   10430 
   10431 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10432 		device_xname(sc->sc_dev)));
   10433 	return -1;
   10434 }
   10435 
   10436 /******************************************************************************
   10437  * This function does initial flash setup so that a new read/write/erase cycle
   10438  * can be started.
   10439  *
   10440  * sc - The pointer to the hw structure
   10441  ****************************************************************************/
   10442 static int32_t
   10443 wm_ich8_cycle_init(struct wm_softc *sc)
   10444 {
   10445 	uint16_t hsfsts;
   10446 	int32_t error = 1;
   10447 	int32_t i     = 0;
   10448 
   10449 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10450 
   10451 	/* May be check the Flash Des Valid bit in Hw status */
   10452 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10453 		return error;
   10454 	}
   10455 
   10456 	/* Clear FCERR in Hw status by writing 1 */
   10457 	/* Clear DAEL in Hw status by writing a 1 */
   10458 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10459 
   10460 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10461 
   10462 	/*
   10463 	 * Either we should have a hardware SPI cycle in progress bit to check
   10464 	 * against, in order to start a new cycle or FDONE bit should be
   10465 	 * changed in the hardware so that it is 1 after harware reset, which
   10466 	 * can then be used as an indication whether a cycle is in progress or
   10467 	 * has been completed .. we should also have some software semaphore
   10468 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10469 	 * threads access to those bits can be sequentiallized or a way so that
   10470 	 * 2 threads dont start the cycle at the same time
   10471 	 */
   10472 
   10473 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10474 		/*
   10475 		 * There is no cycle running at present, so we can start a
   10476 		 * cycle
   10477 		 */
   10478 
   10479 		/* Begin by setting Flash Cycle Done. */
   10480 		hsfsts |= HSFSTS_DONE;
   10481 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10482 		error = 0;
   10483 	} else {
   10484 		/*
   10485 		 * otherwise poll for sometime so the current cycle has a
   10486 		 * chance to end before giving up.
   10487 		 */
   10488 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10489 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10490 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10491 				error = 0;
   10492 				break;
   10493 			}
   10494 			delay(1);
   10495 		}
   10496 		if (error == 0) {
   10497 			/*
   10498 			 * Successful in waiting for previous cycle to timeout,
   10499 			 * now set the Flash Cycle Done.
   10500 			 */
   10501 			hsfsts |= HSFSTS_DONE;
   10502 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10503 		}
   10504 	}
   10505 	return error;
   10506 }
   10507 
   10508 /******************************************************************************
   10509  * This function starts a flash cycle and waits for its completion
   10510  *
   10511  * sc - The pointer to the hw structure
   10512  ****************************************************************************/
   10513 static int32_t
   10514 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10515 {
   10516 	uint16_t hsflctl;
   10517 	uint16_t hsfsts;
   10518 	int32_t error = 1;
   10519 	uint32_t i = 0;
   10520 
   10521 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10522 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10523 	hsflctl |= HSFCTL_GO;
   10524 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10525 
   10526 	/* Wait till FDONE bit is set to 1 */
   10527 	do {
   10528 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10529 		if (hsfsts & HSFSTS_DONE)
   10530 			break;
   10531 		delay(1);
   10532 		i++;
   10533 	} while (i < timeout);
   10534 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10535 		error = 0;
   10536 
   10537 	return error;
   10538 }
   10539 
   10540 /******************************************************************************
   10541  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10542  *
   10543  * sc - The pointer to the hw structure
   10544  * index - The index of the byte or word to read.
   10545  * size - Size of data to read, 1=byte 2=word, 4=dword
   10546  * data - Pointer to the word to store the value read.
   10547  *****************************************************************************/
   10548 static int32_t
   10549 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10550     uint32_t size, uint32_t *data)
   10551 {
   10552 	uint16_t hsfsts;
   10553 	uint16_t hsflctl;
   10554 	uint32_t flash_linear_address;
   10555 	uint32_t flash_data = 0;
   10556 	int32_t error = 1;
   10557 	int32_t count = 0;
   10558 
   10559 	if (size < 1  || size > 4 || data == 0x0 ||
   10560 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10561 		return error;
   10562 
   10563 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10564 	    sc->sc_ich8_flash_base;
   10565 
   10566 	do {
   10567 		delay(1);
   10568 		/* Steps */
   10569 		error = wm_ich8_cycle_init(sc);
   10570 		if (error)
   10571 			break;
   10572 
   10573 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10574 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10575 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10576 		    & HSFCTL_BCOUNT_MASK;
   10577 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10578 		if (sc->sc_type == WM_T_PCH_SPT) {
   10579 			/*
   10580 			 * In SPT, This register is in Lan memory space, not
   10581 			 * flash. Therefore, only 32 bit access is supported.
   10582 			 */
   10583 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10584 			    (uint32_t)hsflctl);
   10585 		} else
   10586 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10587 
   10588 		/*
   10589 		 * Write the last 24 bits of index into Flash Linear address
   10590 		 * field in Flash Address
   10591 		 */
   10592 		/* TODO: TBD maybe check the index against the size of flash */
   10593 
   10594 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10595 
   10596 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10597 
   10598 		/*
   10599 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10600 		 * the whole sequence a few more times, else read in (shift in)
   10601 		 * the Flash Data0, the order is least significant byte first
   10602 		 * msb to lsb
   10603 		 */
   10604 		if (error == 0) {
   10605 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10606 			if (size == 1)
   10607 				*data = (uint8_t)(flash_data & 0x000000FF);
   10608 			else if (size == 2)
   10609 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10610 			else if (size == 4)
   10611 				*data = (uint32_t)flash_data;
   10612 			break;
   10613 		} else {
   10614 			/*
   10615 			 * If we've gotten here, then things are probably
   10616 			 * completely hosed, but if the error condition is
   10617 			 * detected, it won't hurt to give it another try...
   10618 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10619 			 */
   10620 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10621 			if (hsfsts & HSFSTS_ERR) {
   10622 				/* Repeat for some time before giving up. */
   10623 				continue;
   10624 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10625 				break;
   10626 		}
   10627 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10628 
   10629 	return error;
   10630 }
   10631 
   10632 /******************************************************************************
   10633  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10634  *
   10635  * sc - pointer to wm_hw structure
   10636  * index - The index of the byte to read.
   10637  * data - Pointer to a byte to store the value read.
   10638  *****************************************************************************/
   10639 static int32_t
   10640 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10641 {
   10642 	int32_t status;
   10643 	uint32_t word = 0;
   10644 
   10645 	status = wm_read_ich8_data(sc, index, 1, &word);
   10646 	if (status == 0)
   10647 		*data = (uint8_t)word;
   10648 	else
   10649 		*data = 0;
   10650 
   10651 	return status;
   10652 }
   10653 
   10654 /******************************************************************************
   10655  * Reads a word from the NVM using the ICH8 flash access registers.
   10656  *
   10657  * sc - pointer to wm_hw structure
   10658  * index - The starting byte index of the word to read.
   10659  * data - Pointer to a word to store the value read.
   10660  *****************************************************************************/
   10661 static int32_t
   10662 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10663 {
   10664 	int32_t status;
   10665 	uint32_t word = 0;
   10666 
   10667 	status = wm_read_ich8_data(sc, index, 2, &word);
   10668 	if (status == 0)
   10669 		*data = (uint16_t)word;
   10670 	else
   10671 		*data = 0;
   10672 
   10673 	return status;
   10674 }
   10675 
   10676 /******************************************************************************
   10677  * Reads a dword from the NVM using the ICH8 flash access registers.
   10678  *
   10679  * sc - pointer to wm_hw structure
   10680  * index - The starting byte index of the word to read.
   10681  * data - Pointer to a word to store the value read.
   10682  *****************************************************************************/
   10683 static int32_t
   10684 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10685 {
   10686 	int32_t status;
   10687 
   10688 	status = wm_read_ich8_data(sc, index, 4, data);
   10689 	return status;
   10690 }
   10691 
   10692 /******************************************************************************
   10693  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10694  * register.
   10695  *
   10696  * sc - Struct containing variables accessed by shared code
   10697  * offset - offset of word in the EEPROM to read
   10698  * data - word read from the EEPROM
   10699  * words - number of words to read
   10700  *****************************************************************************/
   10701 static int
   10702 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10703 {
   10704 	int32_t  error = 0;
   10705 	uint32_t flash_bank = 0;
   10706 	uint32_t act_offset = 0;
   10707 	uint32_t bank_offset = 0;
   10708 	uint16_t word = 0;
   10709 	uint16_t i = 0;
   10710 
   10711 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10712 		device_xname(sc->sc_dev), __func__));
   10713 
   10714 	/*
   10715 	 * We need to know which is the valid flash bank.  In the event
   10716 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10717 	 * managing flash_bank.  So it cannot be trusted and needs
   10718 	 * to be updated with each read.
   10719 	 */
   10720 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10721 	if (error) {
   10722 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10723 			device_xname(sc->sc_dev)));
   10724 		flash_bank = 0;
   10725 	}
   10726 
   10727 	/*
   10728 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10729 	 * size
   10730 	 */
   10731 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10732 
   10733 	error = wm_get_swfwhw_semaphore(sc);
   10734 	if (error) {
   10735 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10736 		    __func__);
   10737 		return error;
   10738 	}
   10739 
   10740 	for (i = 0; i < words; i++) {
   10741 		/* The NVM part needs a byte offset, hence * 2 */
   10742 		act_offset = bank_offset + ((offset + i) * 2);
   10743 		error = wm_read_ich8_word(sc, act_offset, &word);
   10744 		if (error) {
   10745 			aprint_error_dev(sc->sc_dev,
   10746 			    "%s: failed to read NVM\n", __func__);
   10747 			break;
   10748 		}
   10749 		data[i] = word;
   10750 	}
   10751 
   10752 	wm_put_swfwhw_semaphore(sc);
   10753 	return error;
   10754 }
   10755 
   10756 /******************************************************************************
   10757  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10758  * register.
   10759  *
   10760  * sc - Struct containing variables accessed by shared code
   10761  * offset - offset of word in the EEPROM to read
   10762  * data - word read from the EEPROM
   10763  * words - number of words to read
   10764  *****************************************************************************/
   10765 static int
   10766 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10767 {
   10768 	int32_t  error = 0;
   10769 	uint32_t flash_bank = 0;
   10770 	uint32_t act_offset = 0;
   10771 	uint32_t bank_offset = 0;
   10772 	uint32_t dword = 0;
   10773 	uint16_t i = 0;
   10774 
   10775 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10776 		device_xname(sc->sc_dev), __func__));
   10777 
   10778 	/*
   10779 	 * We need to know which is the valid flash bank.  In the event
   10780 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10781 	 * managing flash_bank.  So it cannot be trusted and needs
   10782 	 * to be updated with each read.
   10783 	 */
   10784 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10785 	if (error) {
   10786 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10787 			device_xname(sc->sc_dev)));
   10788 		flash_bank = 0;
   10789 	}
   10790 
   10791 	/*
   10792 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10793 	 * size
   10794 	 */
   10795 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10796 
   10797 	error = wm_get_swfwhw_semaphore(sc);
   10798 	if (error) {
   10799 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10800 		    __func__);
   10801 		return error;
   10802 	}
   10803 
   10804 	for (i = 0; i < words; i++) {
   10805 		/* The NVM part needs a byte offset, hence * 2 */
   10806 		act_offset = bank_offset + ((offset + i) * 2);
   10807 		/* but we must read dword aligned, so mask ... */
   10808 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10809 		if (error) {
   10810 			aprint_error_dev(sc->sc_dev,
   10811 			    "%s: failed to read NVM\n", __func__);
   10812 			break;
   10813 		}
   10814 		/* ... and pick out low or high word */
   10815 		if ((act_offset & 0x2) == 0)
   10816 			data[i] = (uint16_t)(dword & 0xFFFF);
   10817 		else
   10818 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10819 	}
   10820 
   10821 	wm_put_swfwhw_semaphore(sc);
   10822 	return error;
   10823 }
   10824 
   10825 /* iNVM */
   10826 
   10827 static int
   10828 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10829 {
   10830 	int32_t  rv = 0;
   10831 	uint32_t invm_dword;
   10832 	uint16_t i;
   10833 	uint8_t record_type, word_address;
   10834 
   10835 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10836 		device_xname(sc->sc_dev), __func__));
   10837 
   10838 	for (i = 0; i < INVM_SIZE; i++) {
   10839 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10840 		/* Get record type */
   10841 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10842 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10843 			break;
   10844 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10845 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10846 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10847 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10848 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10849 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10850 			if (word_address == address) {
   10851 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10852 				rv = 0;
   10853 				break;
   10854 			}
   10855 		}
   10856 	}
   10857 
   10858 	return rv;
   10859 }
   10860 
   10861 static int
   10862 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10863 {
   10864 	int rv = 0;
   10865 	int i;
   10866 
   10867 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10868 		device_xname(sc->sc_dev), __func__));
   10869 
   10870 	for (i = 0; i < words; i++) {
   10871 		switch (offset + i) {
   10872 		case NVM_OFF_MACADDR:
   10873 		case NVM_OFF_MACADDR1:
   10874 		case NVM_OFF_MACADDR2:
   10875 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10876 			if (rv != 0) {
   10877 				data[i] = 0xffff;
   10878 				rv = -1;
   10879 			}
   10880 			break;
   10881 		case NVM_OFF_CFG2:
   10882 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10883 			if (rv != 0) {
   10884 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10885 				rv = 0;
   10886 			}
   10887 			break;
   10888 		case NVM_OFF_CFG4:
   10889 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10890 			if (rv != 0) {
   10891 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10892 				rv = 0;
   10893 			}
   10894 			break;
   10895 		case NVM_OFF_LED_1_CFG:
   10896 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10897 			if (rv != 0) {
   10898 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10899 				rv = 0;
   10900 			}
   10901 			break;
   10902 		case NVM_OFF_LED_0_2_CFG:
   10903 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10904 			if (rv != 0) {
   10905 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10906 				rv = 0;
   10907 			}
   10908 			break;
   10909 		case NVM_OFF_ID_LED_SETTINGS:
   10910 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10911 			if (rv != 0) {
   10912 				*data = ID_LED_RESERVED_FFFF;
   10913 				rv = 0;
   10914 			}
   10915 			break;
   10916 		default:
   10917 			DPRINTF(WM_DEBUG_NVM,
   10918 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10919 			*data = NVM_RESERVED_WORD;
   10920 			break;
   10921 		}
   10922 	}
   10923 
   10924 	return rv;
   10925 }
   10926 
   10927 /* Lock, detecting NVM type, validate checksum, version and read */
   10928 
   10929 /*
   10930  * wm_nvm_acquire:
   10931  *
   10932  *	Perform the EEPROM handshake required on some chips.
   10933  */
   10934 static int
   10935 wm_nvm_acquire(struct wm_softc *sc)
   10936 {
   10937 	uint32_t reg;
   10938 	int x;
   10939 	int ret = 0;
   10940 
   10941 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10942 		device_xname(sc->sc_dev), __func__));
   10943 
   10944 	if (sc->sc_type >= WM_T_ICH8) {
   10945 		ret = wm_get_nvm_ich8lan(sc);
   10946 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10947 		ret = wm_get_swfwhw_semaphore(sc);
   10948 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10949 		/* This will also do wm_get_swsm_semaphore() if needed */
   10950 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10951 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10952 		ret = wm_get_swsm_semaphore(sc);
   10953 	}
   10954 
   10955 	if (ret) {
   10956 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10957 			__func__);
   10958 		return 1;
   10959 	}
   10960 
   10961 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10962 		reg = CSR_READ(sc, WMREG_EECD);
   10963 
   10964 		/* Request EEPROM access. */
   10965 		reg |= EECD_EE_REQ;
   10966 		CSR_WRITE(sc, WMREG_EECD, reg);
   10967 
   10968 		/* ..and wait for it to be granted. */
   10969 		for (x = 0; x < 1000; x++) {
   10970 			reg = CSR_READ(sc, WMREG_EECD);
   10971 			if (reg & EECD_EE_GNT)
   10972 				break;
   10973 			delay(5);
   10974 		}
   10975 		if ((reg & EECD_EE_GNT) == 0) {
   10976 			aprint_error_dev(sc->sc_dev,
   10977 			    "could not acquire EEPROM GNT\n");
   10978 			reg &= ~EECD_EE_REQ;
   10979 			CSR_WRITE(sc, WMREG_EECD, reg);
   10980 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10981 				wm_put_swfwhw_semaphore(sc);
   10982 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10983 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10984 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10985 				wm_put_swsm_semaphore(sc);
   10986 			return 1;
   10987 		}
   10988 	}
   10989 
   10990 	return 0;
   10991 }
   10992 
   10993 /*
   10994  * wm_nvm_release:
   10995  *
   10996  *	Release the EEPROM mutex.
   10997  */
   10998 static void
   10999 wm_nvm_release(struct wm_softc *sc)
   11000 {
   11001 	uint32_t reg;
   11002 
   11003 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11004 		device_xname(sc->sc_dev), __func__));
   11005 
   11006 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11007 		reg = CSR_READ(sc, WMREG_EECD);
   11008 		reg &= ~EECD_EE_REQ;
   11009 		CSR_WRITE(sc, WMREG_EECD, reg);
   11010 	}
   11011 
   11012 	if (sc->sc_type >= WM_T_ICH8) {
   11013 		wm_put_nvm_ich8lan(sc);
   11014 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11015 		wm_put_swfwhw_semaphore(sc);
   11016 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11017 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11018 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11019 		wm_put_swsm_semaphore(sc);
   11020 }
   11021 
   11022 static int
   11023 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11024 {
   11025 	uint32_t eecd = 0;
   11026 
   11027 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11028 	    || sc->sc_type == WM_T_82583) {
   11029 		eecd = CSR_READ(sc, WMREG_EECD);
   11030 
   11031 		/* Isolate bits 15 & 16 */
   11032 		eecd = ((eecd >> 15) & 0x03);
   11033 
   11034 		/* If both bits are set, device is Flash type */
   11035 		if (eecd == 0x03)
   11036 			return 0;
   11037 	}
   11038 	return 1;
   11039 }
   11040 
   11041 static int
   11042 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11043 {
   11044 	uint32_t eec;
   11045 
   11046 	eec = CSR_READ(sc, WMREG_EEC);
   11047 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11048 		return 1;
   11049 
   11050 	return 0;
   11051 }
   11052 
   11053 /*
   11054  * wm_nvm_validate_checksum
   11055  *
   11056  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11057  */
   11058 static int
   11059 wm_nvm_validate_checksum(struct wm_softc *sc)
   11060 {
   11061 	uint16_t checksum;
   11062 	uint16_t eeprom_data;
   11063 #ifdef WM_DEBUG
   11064 	uint16_t csum_wordaddr, valid_checksum;
   11065 #endif
   11066 	int i;
   11067 
   11068 	checksum = 0;
   11069 
   11070 	/* Don't check for I211 */
   11071 	if (sc->sc_type == WM_T_I211)
   11072 		return 0;
   11073 
   11074 #ifdef WM_DEBUG
   11075 	if (sc->sc_type == WM_T_PCH_LPT) {
   11076 		csum_wordaddr = NVM_OFF_COMPAT;
   11077 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11078 	} else {
   11079 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11080 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11081 	}
   11082 
   11083 	/* Dump EEPROM image for debug */
   11084 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11085 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11086 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11087 		/* XXX PCH_SPT? */
   11088 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11089 		if ((eeprom_data & valid_checksum) == 0) {
   11090 			DPRINTF(WM_DEBUG_NVM,
   11091 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11092 				device_xname(sc->sc_dev), eeprom_data,
   11093 				    valid_checksum));
   11094 		}
   11095 	}
   11096 
   11097 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11098 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11099 		for (i = 0; i < NVM_SIZE; i++) {
   11100 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11101 				printf("XXXX ");
   11102 			else
   11103 				printf("%04hx ", eeprom_data);
   11104 			if (i % 8 == 7)
   11105 				printf("\n");
   11106 		}
   11107 	}
   11108 
   11109 #endif /* WM_DEBUG */
   11110 
   11111 	for (i = 0; i < NVM_SIZE; i++) {
   11112 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11113 			return 1;
   11114 		checksum += eeprom_data;
   11115 	}
   11116 
   11117 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11118 #ifdef WM_DEBUG
   11119 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11120 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11121 #endif
   11122 	}
   11123 
   11124 	return 0;
   11125 }
   11126 
   11127 static void
   11128 wm_nvm_version_invm(struct wm_softc *sc)
   11129 {
   11130 	uint32_t dword;
   11131 
   11132 	/*
   11133 	 * Linux's code to decode version is very strange, so we don't
   11134 	 * obey that algorithm and just use word 61 as the document.
   11135 	 * Perhaps it's not perfect though...
   11136 	 *
   11137 	 * Example:
   11138 	 *
   11139 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11140 	 */
   11141 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11142 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11143 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11144 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11145 }
   11146 
   11147 static void
   11148 wm_nvm_version(struct wm_softc *sc)
   11149 {
   11150 	uint16_t major, minor, build, patch;
   11151 	uint16_t uid0, uid1;
   11152 	uint16_t nvm_data;
   11153 	uint16_t off;
   11154 	bool check_version = false;
   11155 	bool check_optionrom = false;
   11156 	bool have_build = false;
   11157 
   11158 	/*
   11159 	 * Version format:
   11160 	 *
   11161 	 * XYYZ
   11162 	 * X0YZ
   11163 	 * X0YY
   11164 	 *
   11165 	 * Example:
   11166 	 *
   11167 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11168 	 *	82571	0x50a6	5.10.6?
   11169 	 *	82572	0x506a	5.6.10?
   11170 	 *	82572EI	0x5069	5.6.9?
   11171 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11172 	 *		0x2013	2.1.3?
   11173 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11174 	 */
   11175 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11176 	switch (sc->sc_type) {
   11177 	case WM_T_82571:
   11178 	case WM_T_82572:
   11179 	case WM_T_82574:
   11180 	case WM_T_82583:
   11181 		check_version = true;
   11182 		check_optionrom = true;
   11183 		have_build = true;
   11184 		break;
   11185 	case WM_T_82575:
   11186 	case WM_T_82576:
   11187 	case WM_T_82580:
   11188 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11189 			check_version = true;
   11190 		break;
   11191 	case WM_T_I211:
   11192 		wm_nvm_version_invm(sc);
   11193 		goto printver;
   11194 	case WM_T_I210:
   11195 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11196 			wm_nvm_version_invm(sc);
   11197 			goto printver;
   11198 		}
   11199 		/* FALLTHROUGH */
   11200 	case WM_T_I350:
   11201 	case WM_T_I354:
   11202 		check_version = true;
   11203 		check_optionrom = true;
   11204 		break;
   11205 	default:
   11206 		return;
   11207 	}
   11208 	if (check_version) {
   11209 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11210 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11211 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11212 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11213 			build = nvm_data & NVM_BUILD_MASK;
   11214 			have_build = true;
   11215 		} else
   11216 			minor = nvm_data & 0x00ff;
   11217 
   11218 		/* Decimal */
   11219 		minor = (minor / 16) * 10 + (minor % 16);
   11220 		sc->sc_nvm_ver_major = major;
   11221 		sc->sc_nvm_ver_minor = minor;
   11222 
   11223 printver:
   11224 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11225 		    sc->sc_nvm_ver_minor);
   11226 		if (have_build) {
   11227 			sc->sc_nvm_ver_build = build;
   11228 			aprint_verbose(".%d", build);
   11229 		}
   11230 	}
   11231 	if (check_optionrom) {
   11232 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11233 		/* Option ROM Version */
   11234 		if ((off != 0x0000) && (off != 0xffff)) {
   11235 			off += NVM_COMBO_VER_OFF;
   11236 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11237 			wm_nvm_read(sc, off, 1, &uid0);
   11238 			if ((uid0 != 0) && (uid0 != 0xffff)
   11239 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11240 				/* 16bits */
   11241 				major = uid0 >> 8;
   11242 				build = (uid0 << 8) | (uid1 >> 8);
   11243 				patch = uid1 & 0x00ff;
   11244 				aprint_verbose(", option ROM Version %d.%d.%d",
   11245 				    major, build, patch);
   11246 			}
   11247 		}
   11248 	}
   11249 
   11250 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11251 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11252 }
   11253 
   11254 /*
   11255  * wm_nvm_read:
   11256  *
   11257  *	Read data from the serial EEPROM.
   11258  */
   11259 static int
   11260 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11261 {
   11262 	int rv;
   11263 
   11264 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11265 		device_xname(sc->sc_dev), __func__));
   11266 
   11267 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11268 		return 1;
   11269 
   11270 	if (wm_nvm_acquire(sc))
   11271 		return 1;
   11272 
   11273 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11274 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11275 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11276 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11277 	else if (sc->sc_type == WM_T_PCH_SPT)
   11278 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11279 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11280 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11281 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11282 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11283 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11284 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11285 	else
   11286 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11287 
   11288 	wm_nvm_release(sc);
   11289 	return rv;
   11290 }
   11291 
   11292 /*
   11293  * Hardware semaphores.
   11294  * Very complexed...
   11295  */
   11296 
   11297 static int
   11298 wm_get_null(struct wm_softc *sc)
   11299 {
   11300 
   11301 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11302 		device_xname(sc->sc_dev), __func__));
   11303 	return 0;
   11304 }
   11305 
   11306 static void
   11307 wm_put_null(struct wm_softc *sc)
   11308 {
   11309 
   11310 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11311 		device_xname(sc->sc_dev), __func__));
   11312 	return;
   11313 }
   11314 
   11315 /*
   11316  * Get hardware semaphore.
   11317  * Same as e1000_get_hw_semaphore_generic()
   11318  */
   11319 static int
   11320 wm_get_swsm_semaphore(struct wm_softc *sc)
   11321 {
   11322 	int32_t timeout;
   11323 	uint32_t swsm;
   11324 
   11325 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11326 		device_xname(sc->sc_dev), __func__));
   11327 	KASSERT(sc->sc_nvm_wordsize > 0);
   11328 
   11329 	/* Get the SW semaphore. */
   11330 	timeout = sc->sc_nvm_wordsize + 1;
   11331 	while (timeout) {
   11332 		swsm = CSR_READ(sc, WMREG_SWSM);
   11333 
   11334 		if ((swsm & SWSM_SMBI) == 0)
   11335 			break;
   11336 
   11337 		delay(50);
   11338 		timeout--;
   11339 	}
   11340 
   11341 	if (timeout == 0) {
   11342 		aprint_error_dev(sc->sc_dev,
   11343 		    "could not acquire SWSM SMBI\n");
   11344 		return 1;
   11345 	}
   11346 
   11347 	/* Get the FW semaphore. */
   11348 	timeout = sc->sc_nvm_wordsize + 1;
   11349 	while (timeout) {
   11350 		swsm = CSR_READ(sc, WMREG_SWSM);
   11351 		swsm |= SWSM_SWESMBI;
   11352 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11353 		/* If we managed to set the bit we got the semaphore. */
   11354 		swsm = CSR_READ(sc, WMREG_SWSM);
   11355 		if (swsm & SWSM_SWESMBI)
   11356 			break;
   11357 
   11358 		delay(50);
   11359 		timeout--;
   11360 	}
   11361 
   11362 	if (timeout == 0) {
   11363 		aprint_error_dev(sc->sc_dev,
   11364 		    "could not acquire SWSM SWESMBI\n");
   11365 		/* Release semaphores */
   11366 		wm_put_swsm_semaphore(sc);
   11367 		return 1;
   11368 	}
   11369 	return 0;
   11370 }
   11371 
   11372 /*
   11373  * Put hardware semaphore.
   11374  * Same as e1000_put_hw_semaphore_generic()
   11375  */
   11376 static void
   11377 wm_put_swsm_semaphore(struct wm_softc *sc)
   11378 {
   11379 	uint32_t swsm;
   11380 
   11381 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11382 		device_xname(sc->sc_dev), __func__));
   11383 
   11384 	swsm = CSR_READ(sc, WMREG_SWSM);
   11385 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11386 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11387 }
   11388 
   11389 /*
   11390  * Get SW/FW semaphore.
   11391  * Same as e1000_acquire_swfw_sync_82575().
   11392  */
   11393 static int
   11394 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11395 {
   11396 	uint32_t swfw_sync;
   11397 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11398 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11399 	int timeout = 200;
   11400 
   11401 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11402 		device_xname(sc->sc_dev), __func__));
   11403 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11404 
   11405 	for (timeout = 0; timeout < 200; timeout++) {
   11406 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11407 			if (wm_get_swsm_semaphore(sc)) {
   11408 				aprint_error_dev(sc->sc_dev,
   11409 				    "%s: failed to get semaphore\n",
   11410 				    __func__);
   11411 				return 1;
   11412 			}
   11413 		}
   11414 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11415 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11416 			swfw_sync |= swmask;
   11417 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11418 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11419 				wm_put_swsm_semaphore(sc);
   11420 			return 0;
   11421 		}
   11422 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11423 			wm_put_swsm_semaphore(sc);
   11424 		delay(5000);
   11425 	}
   11426 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11427 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11428 	return 1;
   11429 }
   11430 
   11431 static void
   11432 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11433 {
   11434 	uint32_t swfw_sync;
   11435 
   11436 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11437 		device_xname(sc->sc_dev), __func__));
   11438 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11439 
   11440 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11441 		while (wm_get_swsm_semaphore(sc) != 0)
   11442 			continue;
   11443 	}
   11444 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11445 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11446 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11447 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11448 		wm_put_swsm_semaphore(sc);
   11449 }
   11450 
   11451 static int
   11452 wm_get_phy_82575(struct wm_softc *sc)
   11453 {
   11454 
   11455 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11456 		device_xname(sc->sc_dev), __func__));
   11457 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11458 }
   11459 
   11460 static void
   11461 wm_put_phy_82575(struct wm_softc *sc)
   11462 {
   11463 
   11464 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11465 		device_xname(sc->sc_dev), __func__));
   11466 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11467 }
   11468 
   11469 static int
   11470 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11471 {
   11472 	uint32_t ext_ctrl;
   11473 	int timeout = 200;
   11474 
   11475 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11476 		device_xname(sc->sc_dev), __func__));
   11477 
   11478 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11479 	for (timeout = 0; timeout < 200; timeout++) {
   11480 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11481 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11482 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11483 
   11484 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11485 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11486 			return 0;
   11487 		delay(5000);
   11488 	}
   11489 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11490 	    device_xname(sc->sc_dev), ext_ctrl);
   11491 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11492 	return 1;
   11493 }
   11494 
   11495 static void
   11496 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11497 {
   11498 	uint32_t ext_ctrl;
   11499 
   11500 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11501 		device_xname(sc->sc_dev), __func__));
   11502 
   11503 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11504 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11505 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11506 
   11507 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11508 }
   11509 
   11510 static int
   11511 wm_get_swflag_ich8lan(struct wm_softc *sc)
   11512 {
   11513 	uint32_t ext_ctrl;
   11514 	int timeout;
   11515 
   11516 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11517 		device_xname(sc->sc_dev), __func__));
   11518 	mutex_enter(sc->sc_ich_phymtx);
   11519 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   11520 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11521 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   11522 			break;
   11523 		delay(1000);
   11524 	}
   11525 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   11526 		printf("%s: SW has already locked the resource\n",
   11527 		    device_xname(sc->sc_dev));
   11528 		goto out;
   11529 	}
   11530 
   11531 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11532 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11533 	for (timeout = 0; timeout < 1000; timeout++) {
   11534 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11535 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11536 			break;
   11537 		delay(1000);
   11538 	}
   11539 	if (timeout >= 1000) {
   11540 		printf("%s: failed to acquire semaphore\n",
   11541 		    device_xname(sc->sc_dev));
   11542 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11543 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11544 		goto out;
   11545 	}
   11546 	return 0;
   11547 
   11548 out:
   11549 	mutex_exit(sc->sc_ich_phymtx);
   11550 	return 1;
   11551 }
   11552 
   11553 static void
   11554 wm_put_swflag_ich8lan(struct wm_softc *sc)
   11555 {
   11556 	uint32_t ext_ctrl;
   11557 
   11558 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11559 		device_xname(sc->sc_dev), __func__));
   11560 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11561 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   11562 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11563 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11564 	} else {
   11565 		printf("%s: Semaphore unexpectedly released\n",
   11566 		    device_xname(sc->sc_dev));
   11567 	}
   11568 
   11569 	mutex_exit(sc->sc_ich_phymtx);
   11570 }
   11571 
   11572 static int
   11573 wm_get_nvm_ich8lan(struct wm_softc *sc)
   11574 {
   11575 
   11576 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11577 		device_xname(sc->sc_dev), __func__));
   11578 	mutex_enter(sc->sc_ich_nvmmtx);
   11579 
   11580 	return 0;
   11581 }
   11582 
   11583 static void
   11584 wm_put_nvm_ich8lan(struct wm_softc *sc)
   11585 {
   11586 
   11587 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11588 		device_xname(sc->sc_dev), __func__));
   11589 	mutex_exit(sc->sc_ich_nvmmtx);
   11590 }
   11591 
   11592 static int
   11593 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11594 {
   11595 	int i = 0;
   11596 	uint32_t reg;
   11597 
   11598 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11599 		device_xname(sc->sc_dev), __func__));
   11600 
   11601 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11602 	do {
   11603 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11604 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11605 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11606 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11607 			break;
   11608 		delay(2*1000);
   11609 		i++;
   11610 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11611 
   11612 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11613 		wm_put_hw_semaphore_82573(sc);
   11614 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11615 		    device_xname(sc->sc_dev));
   11616 		return -1;
   11617 	}
   11618 
   11619 	return 0;
   11620 }
   11621 
   11622 static void
   11623 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11624 {
   11625 	uint32_t reg;
   11626 
   11627 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11628 		device_xname(sc->sc_dev), __func__));
   11629 
   11630 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11631 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11632 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11633 }
   11634 
   11635 /*
   11636  * Management mode and power management related subroutines.
   11637  * BMC, AMT, suspend/resume and EEE.
   11638  */
   11639 
   11640 #ifdef WM_WOL
   11641 static int
   11642 wm_check_mng_mode(struct wm_softc *sc)
   11643 {
   11644 	int rv;
   11645 
   11646 	switch (sc->sc_type) {
   11647 	case WM_T_ICH8:
   11648 	case WM_T_ICH9:
   11649 	case WM_T_ICH10:
   11650 	case WM_T_PCH:
   11651 	case WM_T_PCH2:
   11652 	case WM_T_PCH_LPT:
   11653 	case WM_T_PCH_SPT:
   11654 		rv = wm_check_mng_mode_ich8lan(sc);
   11655 		break;
   11656 	case WM_T_82574:
   11657 	case WM_T_82583:
   11658 		rv = wm_check_mng_mode_82574(sc);
   11659 		break;
   11660 	case WM_T_82571:
   11661 	case WM_T_82572:
   11662 	case WM_T_82573:
   11663 	case WM_T_80003:
   11664 		rv = wm_check_mng_mode_generic(sc);
   11665 		break;
   11666 	default:
   11667 		/* noting to do */
   11668 		rv = 0;
   11669 		break;
   11670 	}
   11671 
   11672 	return rv;
   11673 }
   11674 
   11675 static int
   11676 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11677 {
   11678 	uint32_t fwsm;
   11679 
   11680 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11681 
   11682 	if (((fwsm & FWSM_FW_VALID) != 0)
   11683 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11684 		return 1;
   11685 
   11686 	return 0;
   11687 }
   11688 
   11689 static int
   11690 wm_check_mng_mode_82574(struct wm_softc *sc)
   11691 {
   11692 	uint16_t data;
   11693 
   11694 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11695 
   11696 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11697 		return 1;
   11698 
   11699 	return 0;
   11700 }
   11701 
   11702 static int
   11703 wm_check_mng_mode_generic(struct wm_softc *sc)
   11704 {
   11705 	uint32_t fwsm;
   11706 
   11707 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11708 
   11709 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11710 		return 1;
   11711 
   11712 	return 0;
   11713 }
   11714 #endif /* WM_WOL */
   11715 
   11716 static int
   11717 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11718 {
   11719 	uint32_t manc, fwsm, factps;
   11720 
   11721 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11722 		return 0;
   11723 
   11724 	manc = CSR_READ(sc, WMREG_MANC);
   11725 
   11726 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11727 		device_xname(sc->sc_dev), manc));
   11728 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11729 		return 0;
   11730 
   11731 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11732 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11733 		factps = CSR_READ(sc, WMREG_FACTPS);
   11734 		if (((factps & FACTPS_MNGCG) == 0)
   11735 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11736 			return 1;
   11737 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11738 		uint16_t data;
   11739 
   11740 		factps = CSR_READ(sc, WMREG_FACTPS);
   11741 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11742 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11743 			device_xname(sc->sc_dev), factps, data));
   11744 		if (((factps & FACTPS_MNGCG) == 0)
   11745 		    && ((data & NVM_CFG2_MNGM_MASK)
   11746 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11747 			return 1;
   11748 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11749 	    && ((manc & MANC_ASF_EN) == 0))
   11750 		return 1;
   11751 
   11752 	return 0;
   11753 }
   11754 
   11755 static bool
   11756 wm_phy_resetisblocked(struct wm_softc *sc)
   11757 {
   11758 	bool blocked = false;
   11759 	uint32_t reg;
   11760 	int i = 0;
   11761 
   11762 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11763 		device_xname(sc->sc_dev), __func__));
   11764 
   11765 	switch (sc->sc_type) {
   11766 	case WM_T_ICH8:
   11767 	case WM_T_ICH9:
   11768 	case WM_T_ICH10:
   11769 	case WM_T_PCH:
   11770 	case WM_T_PCH2:
   11771 	case WM_T_PCH_LPT:
   11772 	case WM_T_PCH_SPT:
   11773 		do {
   11774 			reg = CSR_READ(sc, WMREG_FWSM);
   11775 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11776 				blocked = true;
   11777 				delay(10*1000);
   11778 				continue;
   11779 			}
   11780 			blocked = false;
   11781 		} while (blocked && (i++ < 30));
   11782 		return blocked;
   11783 		break;
   11784 	case WM_T_82571:
   11785 	case WM_T_82572:
   11786 	case WM_T_82573:
   11787 	case WM_T_82574:
   11788 	case WM_T_82583:
   11789 	case WM_T_80003:
   11790 		reg = CSR_READ(sc, WMREG_MANC);
   11791 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11792 			return true;
   11793 		else
   11794 			return false;
   11795 		break;
   11796 	default:
   11797 		/* no problem */
   11798 		break;
   11799 	}
   11800 
   11801 	return false;
   11802 }
   11803 
   11804 static void
   11805 wm_get_hw_control(struct wm_softc *sc)
   11806 {
   11807 	uint32_t reg;
   11808 
   11809 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11810 		device_xname(sc->sc_dev), __func__));
   11811 
   11812 	switch (sc->sc_type) {
   11813 	case WM_T_82573:
   11814 		reg = CSR_READ(sc, WMREG_SWSM);
   11815 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11816 		break;
   11817 	case WM_T_82571:
   11818 	case WM_T_82572:
   11819 	case WM_T_82574:
   11820 	case WM_T_82583:
   11821 	case WM_T_80003:
   11822 	case WM_T_ICH8:
   11823 	case WM_T_ICH9:
   11824 	case WM_T_ICH10:
   11825 	case WM_T_PCH:
   11826 	case WM_T_PCH2:
   11827 	case WM_T_PCH_LPT:
   11828 	case WM_T_PCH_SPT:
   11829 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11830 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11831 		break;
   11832 	default:
   11833 		break;
   11834 	}
   11835 }
   11836 
   11837 static void
   11838 wm_release_hw_control(struct wm_softc *sc)
   11839 {
   11840 	uint32_t reg;
   11841 
   11842 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11843 		device_xname(sc->sc_dev), __func__));
   11844 
   11845 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11846 		return;
   11847 
   11848 	if (sc->sc_type == WM_T_82573) {
   11849 		reg = CSR_READ(sc, WMREG_SWSM);
   11850 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11851 	} else {
   11852 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11853 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11854 	}
   11855 }
   11856 
   11857 static void
   11858 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11859 {
   11860 	uint32_t reg;
   11861 
   11862 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11863 		device_xname(sc->sc_dev), __func__));
   11864 
   11865 	if (sc->sc_type < WM_T_PCH2)
   11866 		return;
   11867 
   11868 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11869 
   11870 	if (gate)
   11871 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11872 	else
   11873 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11874 
   11875 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11876 }
   11877 
   11878 static void
   11879 wm_smbustopci(struct wm_softc *sc)
   11880 {
   11881 	uint32_t fwsm, reg;
   11882 
   11883 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11884 		device_xname(sc->sc_dev), __func__));
   11885 
   11886 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11887 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11888 
   11889 	/* Acquire PHY semaphore */
   11890 	sc->phy.acquire(sc);
   11891 
   11892 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11893 	if (((fwsm & FWSM_FW_VALID) == 0)
   11894 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11895 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11896 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11897 			reg |= CTRL_EXT_FORCE_SMBUS;
   11898 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11899 			CSR_WRITE_FLUSH(sc);
   11900 			delay(50*1000);
   11901 		}
   11902 
   11903 		/* Toggle LANPHYPC */
   11904 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11905 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11906 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11907 		CSR_WRITE_FLUSH(sc);
   11908 		delay(1000);
   11909 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11910 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11911 		CSR_WRITE_FLUSH(sc);
   11912 		delay(50*1000);
   11913 
   11914 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11915 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11916 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11917 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11918 		}
   11919 	}
   11920 
   11921 	/* Release semaphore */
   11922 	sc->phy.release(sc);
   11923 
   11924 	/*
   11925 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11926 	 */
   11927 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11928 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11929 }
   11930 
   11931 static void
   11932 wm_init_manageability(struct wm_softc *sc)
   11933 {
   11934 
   11935 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11936 		device_xname(sc->sc_dev), __func__));
   11937 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11938 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11939 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11940 
   11941 		/* Disable hardware interception of ARP */
   11942 		manc &= ~MANC_ARP_EN;
   11943 
   11944 		/* Enable receiving management packets to the host */
   11945 		if (sc->sc_type >= WM_T_82571) {
   11946 			manc |= MANC_EN_MNG2HOST;
   11947 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11948 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11949 		}
   11950 
   11951 		CSR_WRITE(sc, WMREG_MANC, manc);
   11952 	}
   11953 }
   11954 
   11955 static void
   11956 wm_release_manageability(struct wm_softc *sc)
   11957 {
   11958 
   11959 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11960 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11961 
   11962 		manc |= MANC_ARP_EN;
   11963 		if (sc->sc_type >= WM_T_82571)
   11964 			manc &= ~MANC_EN_MNG2HOST;
   11965 
   11966 		CSR_WRITE(sc, WMREG_MANC, manc);
   11967 	}
   11968 }
   11969 
   11970 static void
   11971 wm_get_wakeup(struct wm_softc *sc)
   11972 {
   11973 
   11974 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11975 	switch (sc->sc_type) {
   11976 	case WM_T_82573:
   11977 	case WM_T_82583:
   11978 		sc->sc_flags |= WM_F_HAS_AMT;
   11979 		/* FALLTHROUGH */
   11980 	case WM_T_80003:
   11981 	case WM_T_82541:
   11982 	case WM_T_82547:
   11983 	case WM_T_82571:
   11984 	case WM_T_82572:
   11985 	case WM_T_82574:
   11986 	case WM_T_82575:
   11987 	case WM_T_82576:
   11988 	case WM_T_82580:
   11989 	case WM_T_I350:
   11990 	case WM_T_I354:
   11991 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11992 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11993 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11994 		break;
   11995 	case WM_T_ICH8:
   11996 	case WM_T_ICH9:
   11997 	case WM_T_ICH10:
   11998 	case WM_T_PCH:
   11999 	case WM_T_PCH2:
   12000 	case WM_T_PCH_LPT:
   12001 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   12002 		sc->sc_flags |= WM_F_HAS_AMT;
   12003 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12004 		break;
   12005 	default:
   12006 		break;
   12007 	}
   12008 
   12009 	/* 1: HAS_MANAGE */
   12010 	if (wm_enable_mng_pass_thru(sc) != 0)
   12011 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12012 
   12013 #ifdef WM_DEBUG
   12014 	printf("\n");
   12015 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12016 		printf("HAS_AMT,");
   12017 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12018 		printf("ARC_SUBSYS_VALID,");
   12019 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12020 		printf("ASF_FIRMWARE_PRES,");
   12021 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12022 		printf("HAS_MANAGE,");
   12023 	printf("\n");
   12024 #endif
   12025 	/*
   12026 	 * Note that the WOL flags is set after the resetting of the eeprom
   12027 	 * stuff
   12028 	 */
   12029 }
   12030 
   12031 /* WOL in the newer chipset interfaces (pchlan) */
   12032 static void
   12033 wm_enable_phy_wakeup(struct wm_softc *sc)
   12034 {
   12035 #if 0
   12036 	uint16_t preg;
   12037 
   12038 	/* Copy MAC RARs to PHY RARs */
   12039 
   12040 	/* Copy MAC MTA to PHY MTA */
   12041 
   12042 	/* Configure PHY Rx Control register */
   12043 
   12044 	/* Enable PHY wakeup in MAC register */
   12045 
   12046 	/* Configure and enable PHY wakeup in PHY registers */
   12047 
   12048 	/* Activate PHY wakeup */
   12049 
   12050 	/* XXX */
   12051 #endif
   12052 }
   12053 
   12054 /* Power down workaround on D3 */
   12055 static void
   12056 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12057 {
   12058 	uint32_t reg;
   12059 	int i;
   12060 
   12061 	for (i = 0; i < 2; i++) {
   12062 		/* Disable link */
   12063 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12064 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12065 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12066 
   12067 		/*
   12068 		 * Call gig speed drop workaround on Gig disable before
   12069 		 * accessing any PHY registers
   12070 		 */
   12071 		if (sc->sc_type == WM_T_ICH8)
   12072 			wm_gig_downshift_workaround_ich8lan(sc);
   12073 
   12074 		/* Write VR power-down enable */
   12075 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12076 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12077 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12078 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12079 
   12080 		/* Read it back and test */
   12081 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12082 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12083 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12084 			break;
   12085 
   12086 		/* Issue PHY reset and repeat at most one more time */
   12087 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12088 	}
   12089 }
   12090 
   12091 static void
   12092 wm_enable_wakeup(struct wm_softc *sc)
   12093 {
   12094 	uint32_t reg, pmreg;
   12095 	pcireg_t pmode;
   12096 
   12097 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12098 		device_xname(sc->sc_dev), __func__));
   12099 
   12100 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12101 		&pmreg, NULL) == 0)
   12102 		return;
   12103 
   12104 	/* Advertise the wakeup capability */
   12105 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12106 	    | CTRL_SWDPIN(3));
   12107 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12108 
   12109 	/* ICH workaround */
   12110 	switch (sc->sc_type) {
   12111 	case WM_T_ICH8:
   12112 	case WM_T_ICH9:
   12113 	case WM_T_ICH10:
   12114 	case WM_T_PCH:
   12115 	case WM_T_PCH2:
   12116 	case WM_T_PCH_LPT:
   12117 	case WM_T_PCH_SPT:
   12118 		/* Disable gig during WOL */
   12119 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12120 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12121 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12122 		if (sc->sc_type == WM_T_PCH)
   12123 			wm_gmii_reset(sc);
   12124 
   12125 		/* Power down workaround */
   12126 		if (sc->sc_phytype == WMPHY_82577) {
   12127 			struct mii_softc *child;
   12128 
   12129 			/* Assume that the PHY is copper */
   12130 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12131 			if (child->mii_mpd_rev <= 2)
   12132 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12133 				    (768 << 5) | 25, 0x0444); /* magic num */
   12134 		}
   12135 		break;
   12136 	default:
   12137 		break;
   12138 	}
   12139 
   12140 	/* Keep the laser running on fiber adapters */
   12141 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12142 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12143 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12144 		reg |= CTRL_EXT_SWDPIN(3);
   12145 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12146 	}
   12147 
   12148 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12149 #if 0	/* for the multicast packet */
   12150 	reg |= WUFC_MC;
   12151 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12152 #endif
   12153 
   12154 	if (sc->sc_type >= WM_T_PCH)
   12155 		wm_enable_phy_wakeup(sc);
   12156 	else {
   12157 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   12158 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12159 	}
   12160 
   12161 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12162 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12163 		|| (sc->sc_type == WM_T_PCH2))
   12164 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12165 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12166 
   12167 	/* Request PME */
   12168 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12169 #if 0
   12170 	/* Disable WOL */
   12171 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12172 #else
   12173 	/* For WOL */
   12174 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12175 #endif
   12176 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12177 }
   12178 
   12179 /* LPLU */
   12180 
   12181 static void
   12182 wm_lplu_d0_disable(struct wm_softc *sc)
   12183 {
   12184 	uint32_t reg;
   12185 
   12186 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12187 		device_xname(sc->sc_dev), __func__));
   12188 
   12189 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12190 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12191 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12192 }
   12193 
   12194 static void
   12195 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12196 {
   12197 	uint32_t reg;
   12198 
   12199 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12200 		device_xname(sc->sc_dev), __func__));
   12201 
   12202 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12203 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12204 	reg |= HV_OEM_BITS_ANEGNOW;
   12205 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12206 }
   12207 
   12208 /* EEE */
   12209 
   12210 static void
   12211 wm_set_eee_i350(struct wm_softc *sc)
   12212 {
   12213 	uint32_t ipcnfg, eeer;
   12214 
   12215 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12216 	eeer = CSR_READ(sc, WMREG_EEER);
   12217 
   12218 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12219 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12220 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12221 		    | EEER_LPI_FC);
   12222 	} else {
   12223 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12224 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12225 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12226 		    | EEER_LPI_FC);
   12227 	}
   12228 
   12229 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12230 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12231 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12232 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12233 }
   12234 
   12235 /*
   12236  * Workarounds (mainly PHY related).
   12237  * Basically, PHY's workarounds are in the PHY drivers.
   12238  */
   12239 
   12240 /* Work-around for 82566 Kumeran PCS lock loss */
   12241 static void
   12242 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12243 {
   12244 #if 0
   12245 	int miistatus, active, i;
   12246 	int reg;
   12247 
   12248 	miistatus = sc->sc_mii.mii_media_status;
   12249 
   12250 	/* If the link is not up, do nothing */
   12251 	if ((miistatus & IFM_ACTIVE) == 0)
   12252 		return;
   12253 
   12254 	active = sc->sc_mii.mii_media_active;
   12255 
   12256 	/* Nothing to do if the link is other than 1Gbps */
   12257 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12258 		return;
   12259 
   12260 	for (i = 0; i < 10; i++) {
   12261 		/* read twice */
   12262 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12263 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12264 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12265 			goto out;	/* GOOD! */
   12266 
   12267 		/* Reset the PHY */
   12268 		wm_gmii_reset(sc);
   12269 		delay(5*1000);
   12270 	}
   12271 
   12272 	/* Disable GigE link negotiation */
   12273 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12274 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12275 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12276 
   12277 	/*
   12278 	 * Call gig speed drop workaround on Gig disable before accessing
   12279 	 * any PHY registers.
   12280 	 */
   12281 	wm_gig_downshift_workaround_ich8lan(sc);
   12282 
   12283 out:
   12284 	return;
   12285 #endif
   12286 }
   12287 
   12288 /* WOL from S5 stops working */
   12289 static void
   12290 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12291 {
   12292 	uint16_t kmrn_reg;
   12293 
   12294 	/* Only for igp3 */
   12295 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12296 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12297 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12298 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12299 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12300 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12301 	}
   12302 }
   12303 
   12304 /*
   12305  * Workaround for pch's PHYs
   12306  * XXX should be moved to new PHY driver?
   12307  */
   12308 static void
   12309 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12310 {
   12311 
   12312 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12313 		device_xname(sc->sc_dev), __func__));
   12314 	KASSERT(sc->sc_type == WM_T_PCH);
   12315 
   12316 	if (sc->sc_phytype == WMPHY_82577)
   12317 		wm_set_mdio_slow_mode_hv(sc);
   12318 
   12319 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12320 
   12321 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12322 
   12323 	/* 82578 */
   12324 	if (sc->sc_phytype == WMPHY_82578) {
   12325 		struct mii_softc *child;
   12326 
   12327 		/*
   12328 		 * Return registers to default by doing a soft reset then
   12329 		 * writing 0x3140 to the control register
   12330 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   12331 		 */
   12332 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12333 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   12334 			PHY_RESET(child);
   12335 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   12336 			    0x3140);
   12337 		}
   12338 	}
   12339 
   12340 	/* Select page 0 */
   12341 	sc->phy.acquire(sc);
   12342 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12343 	sc->phy.release(sc);
   12344 
   12345 	/*
   12346 	 * Configure the K1 Si workaround during phy reset assuming there is
   12347 	 * link so that it disables K1 if link is in 1Gbps.
   12348 	 */
   12349 	wm_k1_gig_workaround_hv(sc, 1);
   12350 }
   12351 
   12352 static void
   12353 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12354 {
   12355 
   12356 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12357 		device_xname(sc->sc_dev), __func__));
   12358 	KASSERT(sc->sc_type == WM_T_PCH2);
   12359 
   12360 	wm_set_mdio_slow_mode_hv(sc);
   12361 }
   12362 
   12363 static int
   12364 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12365 {
   12366 	int k1_enable = sc->sc_nvm_k1_enabled;
   12367 
   12368 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12369 		device_xname(sc->sc_dev), __func__));
   12370 
   12371 	if (sc->phy.acquire(sc) != 0)
   12372 		return -1;
   12373 
   12374 	if (link) {
   12375 		k1_enable = 0;
   12376 
   12377 		/* Link stall fix for link up */
   12378 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12379 	} else {
   12380 		/* Link stall fix for link down */
   12381 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12382 	}
   12383 
   12384 	wm_configure_k1_ich8lan(sc, k1_enable);
   12385 	sc->phy.release(sc);
   12386 
   12387 	return 0;
   12388 }
   12389 
   12390 static void
   12391 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12392 {
   12393 	uint32_t reg;
   12394 
   12395 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12396 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12397 	    reg | HV_KMRN_MDIO_SLOW);
   12398 }
   12399 
   12400 static void
   12401 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12402 {
   12403 	uint32_t ctrl, ctrl_ext, tmp;
   12404 	uint16_t kmrn_reg;
   12405 
   12406 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12407 
   12408 	if (k1_enable)
   12409 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12410 	else
   12411 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12412 
   12413 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12414 
   12415 	delay(20);
   12416 
   12417 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12418 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12419 
   12420 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12421 	tmp |= CTRL_FRCSPD;
   12422 
   12423 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12424 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12425 	CSR_WRITE_FLUSH(sc);
   12426 	delay(20);
   12427 
   12428 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12429 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12430 	CSR_WRITE_FLUSH(sc);
   12431 	delay(20);
   12432 }
   12433 
   12434 /* special case - for 82575 - need to do manual init ... */
   12435 static void
   12436 wm_reset_init_script_82575(struct wm_softc *sc)
   12437 {
   12438 	/*
   12439 	 * remark: this is untested code - we have no board without EEPROM
   12440 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12441 	 */
   12442 
   12443 	/* SerDes configuration via SERDESCTRL */
   12444 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12445 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12446 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12447 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12448 
   12449 	/* CCM configuration via CCMCTL register */
   12450 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12451 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12452 
   12453 	/* PCIe lanes configuration */
   12454 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12455 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12456 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12457 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12458 
   12459 	/* PCIe PLL Configuration */
   12460 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12461 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12462 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12463 }
   12464 
   12465 static void
   12466 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12467 {
   12468 	uint32_t reg;
   12469 	uint16_t nvmword;
   12470 	int rv;
   12471 
   12472 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12473 		return;
   12474 
   12475 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12476 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12477 	if (rv != 0) {
   12478 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12479 		    __func__);
   12480 		return;
   12481 	}
   12482 
   12483 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12484 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12485 		reg |= MDICNFG_DEST;
   12486 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12487 		reg |= MDICNFG_COM_MDIO;
   12488 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12489 }
   12490 
   12491 /*
   12492  * I210 Errata 25 and I211 Errata 10
   12493  * Slow System Clock.
   12494  */
   12495 static void
   12496 wm_pll_workaround_i210(struct wm_softc *sc)
   12497 {
   12498 	uint32_t mdicnfg, wuc;
   12499 	uint32_t reg;
   12500 	pcireg_t pcireg;
   12501 	uint32_t pmreg;
   12502 	uint16_t nvmword, tmp_nvmword;
   12503 	int phyval;
   12504 	bool wa_done = false;
   12505 	int i;
   12506 
   12507 	/* Save WUC and MDICNFG registers */
   12508 	wuc = CSR_READ(sc, WMREG_WUC);
   12509 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12510 
   12511 	reg = mdicnfg & ~MDICNFG_DEST;
   12512 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12513 
   12514 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12515 		nvmword = INVM_DEFAULT_AL;
   12516 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12517 
   12518 	/* Get Power Management cap offset */
   12519 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12520 		&pmreg, NULL) == 0)
   12521 		return;
   12522 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12523 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12524 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12525 
   12526 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12527 			break; /* OK */
   12528 		}
   12529 
   12530 		wa_done = true;
   12531 		/* Directly reset the internal PHY */
   12532 		reg = CSR_READ(sc, WMREG_CTRL);
   12533 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12534 
   12535 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12536 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12537 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12538 
   12539 		CSR_WRITE(sc, WMREG_WUC, 0);
   12540 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12541 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12542 
   12543 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12544 		    pmreg + PCI_PMCSR);
   12545 		pcireg |= PCI_PMCSR_STATE_D3;
   12546 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12547 		    pmreg + PCI_PMCSR, pcireg);
   12548 		delay(1000);
   12549 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12550 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12551 		    pmreg + PCI_PMCSR, pcireg);
   12552 
   12553 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12554 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12555 
   12556 		/* Restore WUC register */
   12557 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12558 	}
   12559 
   12560 	/* Restore MDICNFG setting */
   12561 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12562 	if (wa_done)
   12563 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12564 }
   12565