Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.455
      1 /*	$NetBSD: if_wm.c,v 1.455 2016/12/02 01:48:44 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.455 2016/12/02 01:48:44 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 	bool txq_stopping;
    328 
    329 #ifdef WM_EVENT_COUNTERS
    330 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    331 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    332 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    333 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    334 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    335 						/* XXX not used? */
    336 
    337 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    339 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    340 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    341 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    342 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    345 
    346 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    347 
    348 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    349 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    350 #endif /* WM_EVENT_COUNTERS */
    351 };
    352 
    353 struct wm_rxqueue {
    354 	kmutex_t *rxq_lock;		/* lock for rx operations */
    355 
    356 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    357 
    358 	/* Software state for the receive descriptors. */
    359 	wiseman_rxdesc_t *rxq_descs;
    360 
    361 	/* RX control data structures. */
    362 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    363 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    364 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    365 	int rxq_desc_rseg;		/* real number of control segment */
    366 	size_t rxq_desc_size;		/* control data size */
    367 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    368 
    369 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    370 
    371 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    372 	int rxq_discard;
    373 	int rxq_len;
    374 	struct mbuf *rxq_head;
    375 	struct mbuf *rxq_tail;
    376 	struct mbuf **rxq_tailp;
    377 
    378 	bool rxq_stopping;
    379 
    380 #ifdef WM_EVENT_COUNTERS
    381 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    382 
    383 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    384 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    385 #endif
    386 };
    387 
    388 struct wm_queue {
    389 	int wmq_id;			/* index of transmit and receive queues */
    390 	int wmq_intr_idx;		/* index of MSI-X tables */
    391 
    392 	struct wm_txqueue wmq_txq;
    393 	struct wm_rxqueue wmq_rxq;
    394 };
    395 
    396 struct wm_phyop {
    397 	int (*acquire)(struct wm_softc *);
    398 	void (*release)(struct wm_softc *);
    399 	int reset_delay_us;
    400 };
    401 
    402 /*
    403  * Software state per device.
    404  */
    405 struct wm_softc {
    406 	device_t sc_dev;		/* generic device information */
    407 	bus_space_tag_t sc_st;		/* bus space tag */
    408 	bus_space_handle_t sc_sh;	/* bus space handle */
    409 	bus_size_t sc_ss;		/* bus space size */
    410 	bus_space_tag_t sc_iot;		/* I/O space tag */
    411 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    412 	bus_size_t sc_ios;		/* I/O space size */
    413 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    414 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    415 	bus_size_t sc_flashs;		/* flash registers space size */
    416 	off_t sc_flashreg_offset;	/*
    417 					 * offset to flash registers from
    418 					 * start of BAR
    419 					 */
    420 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    421 
    422 	struct ethercom sc_ethercom;	/* ethernet common data */
    423 	struct mii_data sc_mii;		/* MII/media information */
    424 
    425 	pci_chipset_tag_t sc_pc;
    426 	pcitag_t sc_pcitag;
    427 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    428 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    429 
    430 	uint16_t sc_pcidevid;		/* PCI device ID */
    431 	wm_chip_type sc_type;		/* MAC type */
    432 	int sc_rev;			/* MAC revision */
    433 	wm_phy_type sc_phytype;		/* PHY type */
    434 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    435 #define	WM_MEDIATYPE_UNKNOWN		0x00
    436 #define	WM_MEDIATYPE_FIBER		0x01
    437 #define	WM_MEDIATYPE_COPPER		0x02
    438 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    439 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    440 	int sc_flags;			/* flags; see below */
    441 	int sc_if_flags;		/* last if_flags */
    442 	int sc_flowflags;		/* 802.3x flow control flags */
    443 	int sc_align_tweak;
    444 
    445 	void *sc_ihs[WM_MAX_NINTR];	/*
    446 					 * interrupt cookie.
    447 					 * legacy and msi use sc_ihs[0].
    448 					 */
    449 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    450 	int sc_nintrs;			/* number of interrupts */
    451 
    452 	int sc_link_intr_idx;		/* index of MSI-X tables */
    453 
    454 	callout_t sc_tick_ch;		/* tick callout */
    455 	bool sc_core_stopping;
    456 
    457 	int sc_nvm_ver_major;
    458 	int sc_nvm_ver_minor;
    459 	int sc_nvm_ver_build;
    460 	int sc_nvm_addrbits;		/* NVM address bits */
    461 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    462 	int sc_ich8_flash_base;
    463 	int sc_ich8_flash_bank_size;
    464 	int sc_nvm_k1_enabled;
    465 
    466 	int sc_nqueues;
    467 	struct wm_queue *sc_queue;
    468 
    469 	int sc_affinity_offset;
    470 
    471 #ifdef WM_EVENT_COUNTERS
    472 	/* Event counters. */
    473 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    474 
    475         /* WM_T_82542_2_1 only */
    476 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    477 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    478 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    479 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    480 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    481 #endif /* WM_EVENT_COUNTERS */
    482 
    483 	/* This variable are used only on the 82547. */
    484 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    485 
    486 	uint32_t sc_ctrl;		/* prototype CTRL register */
    487 #if 0
    488 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    489 #endif
    490 	uint32_t sc_icr;		/* prototype interrupt bits */
    491 	uint32_t sc_itr;		/* prototype intr throttling reg */
    492 	uint32_t sc_tctl;		/* prototype TCTL register */
    493 	uint32_t sc_rctl;		/* prototype RCTL register */
    494 	uint32_t sc_txcw;		/* prototype TXCW register */
    495 	uint32_t sc_tipg;		/* prototype TIPG register */
    496 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    497 	uint32_t sc_pba;		/* prototype PBA register */
    498 
    499 	int sc_tbi_linkup;		/* TBI link status */
    500 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    501 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    502 
    503 	int sc_mchash_type;		/* multicast filter offset */
    504 
    505 	krndsource_t rnd_source;	/* random source */
    506 
    507 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    508 
    509 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    510 	kmutex_t *sc_ich_phymtx;	/*
    511 					 * 82574/82583/ICH/PCH specific PHY
    512 					 * mutex. For 82574/82583, the mutex
    513 					 * is used for both PHY and NVM.
    514 					 */
    515 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    516 
    517 	struct wm_phyop phy;
    518 };
    519 
    520 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    521 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    522 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    523 
    524 #ifdef WM_MPSAFE
    525 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    526 #else
    527 #define CALLOUT_FLAGS	0
    528 #endif
    529 
    530 #define	WM_RXCHAIN_RESET(rxq)						\
    531 do {									\
    532 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    533 	*(rxq)->rxq_tailp = NULL;					\
    534 	(rxq)->rxq_len = 0;						\
    535 } while (/*CONSTCOND*/0)
    536 
    537 #define	WM_RXCHAIN_LINK(rxq, m)						\
    538 do {									\
    539 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    540 	(rxq)->rxq_tailp = &(m)->m_next;				\
    541 } while (/*CONSTCOND*/0)
    542 
    543 #ifdef WM_EVENT_COUNTERS
    544 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    545 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    546 
    547 #define WM_Q_EVCNT_INCR(qname, evname)			\
    548 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    549 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    550 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    551 #else /* !WM_EVENT_COUNTERS */
    552 #define	WM_EVCNT_INCR(ev)	/* nothing */
    553 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    554 
    555 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    556 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    557 #endif /* !WM_EVENT_COUNTERS */
    558 
    559 #define	CSR_READ(sc, reg)						\
    560 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    561 #define	CSR_WRITE(sc, reg, val)						\
    562 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    563 #define	CSR_WRITE_FLUSH(sc)						\
    564 	(void) CSR_READ((sc), WMREG_STATUS)
    565 
    566 #define ICH8_FLASH_READ32(sc, reg)					\
    567 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    568 	    (reg) + sc->sc_flashreg_offset)
    569 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    570 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    571 	    (reg) + sc->sc_flashreg_offset, (data))
    572 
    573 #define ICH8_FLASH_READ16(sc, reg)					\
    574 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    575 	    (reg) + sc->sc_flashreg_offset)
    576 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    577 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    578 	    (reg) + sc->sc_flashreg_offset, (data))
    579 
    580 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    581 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    582 
    583 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    584 #define	WM_CDTXADDR_HI(txq, x)						\
    585 	(sizeof(bus_addr_t) == 8 ?					\
    586 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    587 
    588 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    589 #define	WM_CDRXADDR_HI(rxq, x)						\
    590 	(sizeof(bus_addr_t) == 8 ?					\
    591 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    592 
    593 /*
    594  * Register read/write functions.
    595  * Other than CSR_{READ|WRITE}().
    596  */
    597 #if 0
    598 static inline uint32_t wm_io_read(struct wm_softc *, int);
    599 #endif
    600 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    601 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    602 	uint32_t, uint32_t);
    603 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    604 
    605 /*
    606  * Descriptor sync/init functions.
    607  */
    608 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    609 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    610 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    611 
    612 /*
    613  * Device driver interface functions and commonly used functions.
    614  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    615  */
    616 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    617 static int	wm_match(device_t, cfdata_t, void *);
    618 static void	wm_attach(device_t, device_t, void *);
    619 static int	wm_detach(device_t, int);
    620 static bool	wm_suspend(device_t, const pmf_qual_t *);
    621 static bool	wm_resume(device_t, const pmf_qual_t *);
    622 static void	wm_watchdog(struct ifnet *);
    623 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    624 static void	wm_tick(void *);
    625 static int	wm_ifflags_cb(struct ethercom *);
    626 static int	wm_ioctl(struct ifnet *, u_long, void *);
    627 /* MAC address related */
    628 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    629 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    630 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    631 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    632 static void	wm_set_filter(struct wm_softc *);
    633 /* Reset and init related */
    634 static void	wm_set_vlan(struct wm_softc *);
    635 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    636 static void	wm_get_auto_rd_done(struct wm_softc *);
    637 static void	wm_lan_init_done(struct wm_softc *);
    638 static void	wm_get_cfg_done(struct wm_softc *);
    639 static void	wm_initialize_hardware_bits(struct wm_softc *);
    640 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    641 static void	wm_reset_phy(struct wm_softc *);
    642 static void	wm_flush_desc_rings(struct wm_softc *);
    643 static void	wm_reset(struct wm_softc *);
    644 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    645 static void	wm_rxdrain(struct wm_rxqueue *);
    646 static void	wm_rss_getkey(uint8_t *);
    647 static void	wm_init_rss(struct wm_softc *);
    648 static void	wm_adjust_qnum(struct wm_softc *, int);
    649 static int	wm_setup_legacy(struct wm_softc *);
    650 static int	wm_setup_msix(struct wm_softc *);
    651 static int	wm_init(struct ifnet *);
    652 static int	wm_init_locked(struct ifnet *);
    653 static void	wm_turnon(struct wm_softc *);
    654 static void	wm_turnoff(struct wm_softc *);
    655 static void	wm_stop(struct ifnet *, int);
    656 static void	wm_stop_locked(struct ifnet *, int);
    657 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    658 static void	wm_82547_txfifo_stall(void *);
    659 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    660 /* DMA related */
    661 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    662 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    663 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    664 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    665     struct wm_txqueue *);
    666 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    667 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    668 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    669     struct wm_rxqueue *);
    670 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    671 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    672 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    673 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    674 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    675 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    676 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    677     struct wm_txqueue *);
    678 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    679     struct wm_rxqueue *);
    680 static int	wm_alloc_txrx_queues(struct wm_softc *);
    681 static void	wm_free_txrx_queues(struct wm_softc *);
    682 static int	wm_init_txrx_queues(struct wm_softc *);
    683 /* Start */
    684 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    685     uint32_t *, uint8_t *);
    686 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    687 static void	wm_start(struct ifnet *);
    688 static void	wm_start_locked(struct ifnet *);
    689 static int	wm_transmit(struct ifnet *, struct mbuf *);
    690 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    691 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    692 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    693     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    694 static void	wm_nq_start(struct ifnet *);
    695 static void	wm_nq_start_locked(struct ifnet *);
    696 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    697 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    698 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    699 /* Interrupt */
    700 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    701 static void	wm_rxeof(struct wm_rxqueue *);
    702 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    703 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    704 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    705 static void	wm_linkintr(struct wm_softc *, uint32_t);
    706 static int	wm_intr_legacy(void *);
    707 static int	wm_txrxintr_msix(void *);
    708 static int	wm_linkintr_msix(void *);
    709 
    710 /*
    711  * Media related.
    712  * GMII, SGMII, TBI, SERDES and SFP.
    713  */
    714 /* Common */
    715 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    716 /* GMII related */
    717 static void	wm_gmii_reset(struct wm_softc *);
    718 static int	wm_get_phy_id_82575(struct wm_softc *);
    719 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    720 static int	wm_gmii_mediachange(struct ifnet *);
    721 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    722 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    723 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    724 static int	wm_gmii_i82543_readreg(device_t, int, int);
    725 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    726 static int	wm_gmii_mdic_readreg(device_t, int, int);
    727 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    728 static int	wm_gmii_i82544_readreg(device_t, int, int);
    729 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    730 static int	wm_gmii_i80003_readreg(device_t, int, int);
    731 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    732 static int	wm_gmii_bm_readreg(device_t, int, int);
    733 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    734 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    735 static int	wm_gmii_hv_readreg(device_t, int, int);
    736 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    737 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    738 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    739 static int	wm_gmii_82580_readreg(device_t, int, int);
    740 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    741 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    742 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    743 static void	wm_gmii_statchg(struct ifnet *);
    744 /*
    745  * kumeran related (80003, ICH* and PCH*).
    746  * These functions are not for accessing MII registers but for accessing
    747  * kumeran specific registers.
    748  */
    749 static int	wm_kmrn_readreg(struct wm_softc *, int);
    750 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    751 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    752 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    753 /* SGMII */
    754 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    755 static int	wm_sgmii_readreg(device_t, int, int);
    756 static void	wm_sgmii_writereg(device_t, int, int, int);
    757 /* TBI related */
    758 static void	wm_tbi_mediainit(struct wm_softc *);
    759 static int	wm_tbi_mediachange(struct ifnet *);
    760 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    761 static int	wm_check_for_link(struct wm_softc *);
    762 static void	wm_tbi_tick(struct wm_softc *);
    763 /* SERDES related */
    764 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    765 static int	wm_serdes_mediachange(struct ifnet *);
    766 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    767 static void	wm_serdes_tick(struct wm_softc *);
    768 /* SFP related */
    769 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    770 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    771 
    772 /*
    773  * NVM related.
    774  * Microwire, SPI (w/wo EERD) and Flash.
    775  */
    776 /* Misc functions */
    777 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    778 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    779 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    780 /* Microwire */
    781 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    782 /* SPI */
    783 static int	wm_nvm_ready_spi(struct wm_softc *);
    784 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    785 /* Using with EERD */
    786 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    787 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    788 /* Flash */
    789 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    790     unsigned int *);
    791 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    792 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    793 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    794 	uint32_t *);
    795 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    796 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    797 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    798 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    799 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    800 /* iNVM */
    801 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    802 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    803 /* Lock, detecting NVM type, validate checksum and read */
    804 static int	wm_nvm_acquire(struct wm_softc *);
    805 static void	wm_nvm_release(struct wm_softc *);
    806 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    807 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    808 static int	wm_nvm_validate_checksum(struct wm_softc *);
    809 static void	wm_nvm_version_invm(struct wm_softc *);
    810 static void	wm_nvm_version(struct wm_softc *);
    811 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    812 
    813 /*
    814  * Hardware semaphores.
    815  * Very complexed...
    816  */
    817 static int	wm_get_null(struct wm_softc *);
    818 static void	wm_put_null(struct wm_softc *);
    819 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    820 static void	wm_put_swsm_semaphore(struct wm_softc *);
    821 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    822 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    823 static int	wm_get_phy_82575(struct wm_softc *);
    824 static void	wm_put_phy_82575(struct wm_softc *);
    825 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    826 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    827 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    828 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    829 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    830 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    831 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    832 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    833 
    834 /*
    835  * Management mode and power management related subroutines.
    836  * BMC, AMT, suspend/resume and EEE.
    837  */
    838 #if 0
    839 static int	wm_check_mng_mode(struct wm_softc *);
    840 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    841 static int	wm_check_mng_mode_82574(struct wm_softc *);
    842 static int	wm_check_mng_mode_generic(struct wm_softc *);
    843 #endif
    844 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    845 static bool	wm_phy_resetisblocked(struct wm_softc *);
    846 static void	wm_get_hw_control(struct wm_softc *);
    847 static void	wm_release_hw_control(struct wm_softc *);
    848 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    849 static void	wm_smbustopci(struct wm_softc *);
    850 static void	wm_init_manageability(struct wm_softc *);
    851 static void	wm_release_manageability(struct wm_softc *);
    852 static void	wm_get_wakeup(struct wm_softc *);
    853 static void	wm_ulp_disable(struct wm_softc *);
    854 static void	wm_enable_phy_wakeup(struct wm_softc *);
    855 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    856 static void	wm_enable_wakeup(struct wm_softc *);
    857 /* LPLU (Low Power Link Up) */
    858 static void	wm_lplu_d0_disable(struct wm_softc *);
    859 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    860 /* EEE */
    861 static void	wm_set_eee_i350(struct wm_softc *);
    862 
    863 /*
    864  * Workarounds (mainly PHY related).
    865  * Basically, PHY's workarounds are in the PHY drivers.
    866  */
    867 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    868 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    869 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    870 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    871 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    872 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    873 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    874 static void	wm_reset_init_script_82575(struct wm_softc *);
    875 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    876 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    877 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    878 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    879 static void	wm_pll_workaround_i210(struct wm_softc *);
    880 
    881 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    882     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    883 
    884 /*
    885  * Devices supported by this driver.
    886  */
    887 static const struct wm_product {
    888 	pci_vendor_id_t		wmp_vendor;
    889 	pci_product_id_t	wmp_product;
    890 	const char		*wmp_name;
    891 	wm_chip_type		wmp_type;
    892 	uint32_t		wmp_flags;
    893 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    894 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    895 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    896 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    897 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    898 } wm_products[] = {
    899 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    900 	  "Intel i82542 1000BASE-X Ethernet",
    901 	  WM_T_82542_2_1,	WMP_F_FIBER },
    902 
    903 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    904 	  "Intel i82543GC 1000BASE-X Ethernet",
    905 	  WM_T_82543,		WMP_F_FIBER },
    906 
    907 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    908 	  "Intel i82543GC 1000BASE-T Ethernet",
    909 	  WM_T_82543,		WMP_F_COPPER },
    910 
    911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    912 	  "Intel i82544EI 1000BASE-T Ethernet",
    913 	  WM_T_82544,		WMP_F_COPPER },
    914 
    915 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    916 	  "Intel i82544EI 1000BASE-X Ethernet",
    917 	  WM_T_82544,		WMP_F_FIBER },
    918 
    919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    920 	  "Intel i82544GC 1000BASE-T Ethernet",
    921 	  WM_T_82544,		WMP_F_COPPER },
    922 
    923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    924 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    925 	  WM_T_82544,		WMP_F_COPPER },
    926 
    927 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    928 	  "Intel i82540EM 1000BASE-T Ethernet",
    929 	  WM_T_82540,		WMP_F_COPPER },
    930 
    931 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    932 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    933 	  WM_T_82540,		WMP_F_COPPER },
    934 
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    936 	  "Intel i82540EP 1000BASE-T Ethernet",
    937 	  WM_T_82540,		WMP_F_COPPER },
    938 
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    940 	  "Intel i82540EP 1000BASE-T Ethernet",
    941 	  WM_T_82540,		WMP_F_COPPER },
    942 
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    944 	  "Intel i82540EP 1000BASE-T Ethernet",
    945 	  WM_T_82540,		WMP_F_COPPER },
    946 
    947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    948 	  "Intel i82545EM 1000BASE-T Ethernet",
    949 	  WM_T_82545,		WMP_F_COPPER },
    950 
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    952 	  "Intel i82545GM 1000BASE-T Ethernet",
    953 	  WM_T_82545_3,		WMP_F_COPPER },
    954 
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    956 	  "Intel i82545GM 1000BASE-X Ethernet",
    957 	  WM_T_82545_3,		WMP_F_FIBER },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    960 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    961 	  WM_T_82545_3,		WMP_F_SERDES },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    964 	  "Intel i82546EB 1000BASE-T Ethernet",
    965 	  WM_T_82546,		WMP_F_COPPER },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    968 	  "Intel i82546EB 1000BASE-T Ethernet",
    969 	  WM_T_82546,		WMP_F_COPPER },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    972 	  "Intel i82545EM 1000BASE-X Ethernet",
    973 	  WM_T_82545,		WMP_F_FIBER },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    976 	  "Intel i82546EB 1000BASE-X Ethernet",
    977 	  WM_T_82546,		WMP_F_FIBER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    980 	  "Intel i82546GB 1000BASE-T Ethernet",
    981 	  WM_T_82546_3,		WMP_F_COPPER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    984 	  "Intel i82546GB 1000BASE-X Ethernet",
    985 	  WM_T_82546_3,		WMP_F_FIBER },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    988 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    989 	  WM_T_82546_3,		WMP_F_SERDES },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    992 	  "i82546GB quad-port Gigabit Ethernet",
    993 	  WM_T_82546_3,		WMP_F_COPPER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    996 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    997 	  WM_T_82546_3,		WMP_F_COPPER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1000 	  "Intel PRO/1000MT (82546GB)",
   1001 	  WM_T_82546_3,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1004 	  "Intel i82541EI 1000BASE-T Ethernet",
   1005 	  WM_T_82541,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1008 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1009 	  WM_T_82541,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1012 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1013 	  WM_T_82541,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1016 	  "Intel i82541ER 1000BASE-T Ethernet",
   1017 	  WM_T_82541_2,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1020 	  "Intel i82541GI 1000BASE-T Ethernet",
   1021 	  WM_T_82541_2,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1024 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1025 	  WM_T_82541_2,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1028 	  "Intel i82541PI 1000BASE-T Ethernet",
   1029 	  WM_T_82541_2,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1032 	  "Intel i82547EI 1000BASE-T Ethernet",
   1033 	  WM_T_82547,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1036 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1037 	  WM_T_82547,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1040 	  "Intel i82547GI 1000BASE-T Ethernet",
   1041 	  WM_T_82547_2,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1044 	  "Intel PRO/1000 PT (82571EB)",
   1045 	  WM_T_82571,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1048 	  "Intel PRO/1000 PF (82571EB)",
   1049 	  WM_T_82571,		WMP_F_FIBER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1052 	  "Intel PRO/1000 PB (82571EB)",
   1053 	  WM_T_82571,		WMP_F_SERDES },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1056 	  "Intel PRO/1000 QT (82571EB)",
   1057 	  WM_T_82571,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1060 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1061 	  WM_T_82571,		WMP_F_COPPER, },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1064 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1065 	  WM_T_82571,		WMP_F_COPPER, },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1068 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1069 	  WM_T_82571,		WMP_F_SERDES, },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1072 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1073 	  WM_T_82571,		WMP_F_SERDES, },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1076 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1077 	  WM_T_82571,		WMP_F_FIBER, },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1080 	  "Intel i82572EI 1000baseT Ethernet",
   1081 	  WM_T_82572,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1084 	  "Intel i82572EI 1000baseX Ethernet",
   1085 	  WM_T_82572,		WMP_F_FIBER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1088 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1089 	  WM_T_82572,		WMP_F_SERDES },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1092 	  "Intel i82572EI 1000baseT Ethernet",
   1093 	  WM_T_82572,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1096 	  "Intel i82573E",
   1097 	  WM_T_82573,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1100 	  "Intel i82573E IAMT",
   1101 	  WM_T_82573,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1104 	  "Intel i82573L Gigabit Ethernet",
   1105 	  WM_T_82573,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1108 	  "Intel i82574L",
   1109 	  WM_T_82574,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1112 	  "Intel i82574L",
   1113 	  WM_T_82574,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1116 	  "Intel i82583V",
   1117 	  WM_T_82583,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1120 	  "i80003 dual 1000baseT Ethernet",
   1121 	  WM_T_80003,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1124 	  "i80003 dual 1000baseX Ethernet",
   1125 	  WM_T_80003,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1128 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1129 	  WM_T_80003,		WMP_F_SERDES },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1132 	  "Intel i80003 1000baseT Ethernet",
   1133 	  WM_T_80003,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1136 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1137 	  WM_T_80003,		WMP_F_SERDES },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1140 	  "Intel i82801H (M_AMT) LAN Controller",
   1141 	  WM_T_ICH8,		WMP_F_COPPER },
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1143 	  "Intel i82801H (AMT) LAN Controller",
   1144 	  WM_T_ICH8,		WMP_F_COPPER },
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1146 	  "Intel i82801H LAN Controller",
   1147 	  WM_T_ICH8,		WMP_F_COPPER },
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1149 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1150 	  WM_T_ICH8,		WMP_F_COPPER },
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1152 	  "Intel i82801H (M) LAN Controller",
   1153 	  WM_T_ICH8,		WMP_F_COPPER },
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1155 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1156 	  WM_T_ICH8,		WMP_F_COPPER },
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1158 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1159 	  WM_T_ICH8,		WMP_F_COPPER },
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1161 	  "82567V-3 LAN Controller",
   1162 	  WM_T_ICH8,		WMP_F_COPPER },
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1164 	  "82801I (AMT) LAN Controller",
   1165 	  WM_T_ICH9,		WMP_F_COPPER },
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1167 	  "82801I 10/100 LAN Controller",
   1168 	  WM_T_ICH9,		WMP_F_COPPER },
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1170 	  "82801I (G) 10/100 LAN Controller",
   1171 	  WM_T_ICH9,		WMP_F_COPPER },
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1173 	  "82801I (GT) 10/100 LAN Controller",
   1174 	  WM_T_ICH9,		WMP_F_COPPER },
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1176 	  "82801I (C) LAN Controller",
   1177 	  WM_T_ICH9,		WMP_F_COPPER },
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1179 	  "82801I mobile LAN Controller",
   1180 	  WM_T_ICH9,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1182 	  "82801I mobile (V) LAN Controller",
   1183 	  WM_T_ICH9,		WMP_F_COPPER },
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1185 	  "82801I mobile (AMT) LAN Controller",
   1186 	  WM_T_ICH9,		WMP_F_COPPER },
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1188 	  "82567LM-4 LAN Controller",
   1189 	  WM_T_ICH9,		WMP_F_COPPER },
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1191 	  "82567LM-2 LAN Controller",
   1192 	  WM_T_ICH10,		WMP_F_COPPER },
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1194 	  "82567LF-2 LAN Controller",
   1195 	  WM_T_ICH10,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1197 	  "82567LM-3 LAN Controller",
   1198 	  WM_T_ICH10,		WMP_F_COPPER },
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1200 	  "82567LF-3 LAN Controller",
   1201 	  WM_T_ICH10,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1203 	  "82567V-2 LAN Controller",
   1204 	  WM_T_ICH10,		WMP_F_COPPER },
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1206 	  "82567V-3? LAN Controller",
   1207 	  WM_T_ICH10,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1209 	  "HANKSVILLE LAN Controller",
   1210 	  WM_T_ICH10,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1212 	  "PCH LAN (82577LM) Controller",
   1213 	  WM_T_PCH,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1215 	  "PCH LAN (82577LC) Controller",
   1216 	  WM_T_PCH,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1218 	  "PCH LAN (82578DM) Controller",
   1219 	  WM_T_PCH,		WMP_F_COPPER },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1221 	  "PCH LAN (82578DC) Controller",
   1222 	  WM_T_PCH,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1224 	  "PCH2 LAN (82579LM) Controller",
   1225 	  WM_T_PCH2,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1227 	  "PCH2 LAN (82579V) Controller",
   1228 	  WM_T_PCH2,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1230 	  "82575EB dual-1000baseT Ethernet",
   1231 	  WM_T_82575,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1233 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1234 	  WM_T_82575,		WMP_F_SERDES },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1236 	  "82575GB quad-1000baseT Ethernet",
   1237 	  WM_T_82575,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1239 	  "82575GB quad-1000baseT Ethernet (PM)",
   1240 	  WM_T_82575,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1242 	  "82576 1000BaseT Ethernet",
   1243 	  WM_T_82576,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1245 	  "82576 1000BaseX Ethernet",
   1246 	  WM_T_82576,		WMP_F_FIBER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1249 	  "82576 gigabit Ethernet (SERDES)",
   1250 	  WM_T_82576,		WMP_F_SERDES },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1253 	  "82576 quad-1000BaseT Ethernet",
   1254 	  WM_T_82576,		WMP_F_COPPER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1257 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1258 	  WM_T_82576,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1261 	  "82576 gigabit Ethernet",
   1262 	  WM_T_82576,		WMP_F_COPPER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1265 	  "82576 gigabit Ethernet (SERDES)",
   1266 	  WM_T_82576,		WMP_F_SERDES },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1268 	  "82576 quad-gigabit Ethernet (SERDES)",
   1269 	  WM_T_82576,		WMP_F_SERDES },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1272 	  "82580 1000BaseT Ethernet",
   1273 	  WM_T_82580,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1275 	  "82580 1000BaseX Ethernet",
   1276 	  WM_T_82580,		WMP_F_FIBER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1279 	  "82580 1000BaseT Ethernet (SERDES)",
   1280 	  WM_T_82580,		WMP_F_SERDES },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1283 	  "82580 gigabit Ethernet (SGMII)",
   1284 	  WM_T_82580,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1286 	  "82580 dual-1000BaseT Ethernet",
   1287 	  WM_T_82580,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1290 	  "82580 quad-1000BaseX Ethernet",
   1291 	  WM_T_82580,		WMP_F_FIBER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1294 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1295 	  WM_T_82580,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1298 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1299 	  WM_T_82580,		WMP_F_SERDES },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1302 	  "DH89XXCC 1000BASE-KX Ethernet",
   1303 	  WM_T_82580,		WMP_F_SERDES },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1306 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1307 	  WM_T_82580,		WMP_F_SERDES },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1310 	  "I350 Gigabit Network Connection",
   1311 	  WM_T_I350,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1314 	  "I350 Gigabit Fiber Network Connection",
   1315 	  WM_T_I350,		WMP_F_FIBER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1318 	  "I350 Gigabit Backplane Connection",
   1319 	  WM_T_I350,		WMP_F_SERDES },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1322 	  "I350 Quad Port Gigabit Ethernet",
   1323 	  WM_T_I350,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1326 	  "I350 Gigabit Connection",
   1327 	  WM_T_I350,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1330 	  "I354 Gigabit Ethernet (KX)",
   1331 	  WM_T_I354,		WMP_F_SERDES },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1334 	  "I354 Gigabit Ethernet (SGMII)",
   1335 	  WM_T_I354,		WMP_F_COPPER },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1338 	  "I354 Gigabit Ethernet (2.5G)",
   1339 	  WM_T_I354,		WMP_F_COPPER },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1342 	  "I210-T1 Ethernet Server Adapter",
   1343 	  WM_T_I210,		WMP_F_COPPER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1346 	  "I210 Ethernet (Copper OEM)",
   1347 	  WM_T_I210,		WMP_F_COPPER },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1350 	  "I210 Ethernet (Copper IT)",
   1351 	  WM_T_I210,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1354 	  "I210 Ethernet (FLASH less)",
   1355 	  WM_T_I210,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1358 	  "I210 Gigabit Ethernet (Fiber)",
   1359 	  WM_T_I210,		WMP_F_FIBER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1362 	  "I210 Gigabit Ethernet (SERDES)",
   1363 	  WM_T_I210,		WMP_F_SERDES },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1366 	  "I210 Gigabit Ethernet (FLASH less)",
   1367 	  WM_T_I210,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1370 	  "I210 Gigabit Ethernet (SGMII)",
   1371 	  WM_T_I210,		WMP_F_COPPER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1374 	  "I211 Ethernet (COPPER)",
   1375 	  WM_T_I211,		WMP_F_COPPER },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1377 	  "I217 V Ethernet Connection",
   1378 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1380 	  "I217 LM Ethernet Connection",
   1381 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1383 	  "I218 V Ethernet Connection",
   1384 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1386 	  "I218 V Ethernet Connection",
   1387 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1389 	  "I218 V Ethernet Connection",
   1390 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1392 	  "I218 LM Ethernet Connection",
   1393 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1395 	  "I218 LM Ethernet Connection",
   1396 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1398 	  "I218 LM Ethernet Connection",
   1399 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1400 #if 0
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1402 	  "I219 V Ethernet Connection",
   1403 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1405 	  "I219 V Ethernet Connection",
   1406 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1408 	  "I219 V Ethernet Connection",
   1409 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1411 	  "I219 V Ethernet Connection",
   1412 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1414 	  "I219 LM Ethernet Connection",
   1415 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1417 	  "I219 LM Ethernet Connection",
   1418 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1420 	  "I219 LM Ethernet Connection",
   1421 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1423 	  "I219 LM Ethernet Connection",
   1424 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1426 	  "I219 LM Ethernet Connection",
   1427 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1428 #endif
   1429 	{ 0,			0,
   1430 	  NULL,
   1431 	  0,			0 },
   1432 };
   1433 
   1434 /*
   1435  * Register read/write functions.
   1436  * Other than CSR_{READ|WRITE}().
   1437  */
   1438 
   1439 #if 0 /* Not currently used */
   1440 static inline uint32_t
   1441 wm_io_read(struct wm_softc *sc, int reg)
   1442 {
   1443 
   1444 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1445 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1446 }
   1447 #endif
   1448 
   1449 static inline void
   1450 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1451 {
   1452 
   1453 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1454 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1455 }
   1456 
   1457 static inline void
   1458 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1459     uint32_t data)
   1460 {
   1461 	uint32_t regval;
   1462 	int i;
   1463 
   1464 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1465 
   1466 	CSR_WRITE(sc, reg, regval);
   1467 
   1468 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1469 		delay(5);
   1470 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1471 			break;
   1472 	}
   1473 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1474 		aprint_error("%s: WARNING:"
   1475 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1476 		    device_xname(sc->sc_dev), reg);
   1477 	}
   1478 }
   1479 
   1480 static inline void
   1481 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1482 {
   1483 	wa->wa_low = htole32(v & 0xffffffffU);
   1484 	if (sizeof(bus_addr_t) == 8)
   1485 		wa->wa_high = htole32((uint64_t) v >> 32);
   1486 	else
   1487 		wa->wa_high = 0;
   1488 }
   1489 
   1490 /*
   1491  * Descriptor sync/init functions.
   1492  */
   1493 static inline void
   1494 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1495 {
   1496 	struct wm_softc *sc = txq->txq_sc;
   1497 
   1498 	/* If it will wrap around, sync to the end of the ring. */
   1499 	if ((start + num) > WM_NTXDESC(txq)) {
   1500 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1501 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1502 		    (WM_NTXDESC(txq) - start), ops);
   1503 		num -= (WM_NTXDESC(txq) - start);
   1504 		start = 0;
   1505 	}
   1506 
   1507 	/* Now sync whatever is left. */
   1508 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1509 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1510 }
   1511 
   1512 static inline void
   1513 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1514 {
   1515 	struct wm_softc *sc = rxq->rxq_sc;
   1516 
   1517 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1518 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1519 }
   1520 
   1521 static inline void
   1522 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1523 {
   1524 	struct wm_softc *sc = rxq->rxq_sc;
   1525 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1526 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1527 	struct mbuf *m = rxs->rxs_mbuf;
   1528 
   1529 	/*
   1530 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1531 	 * so that the payload after the Ethernet header is aligned
   1532 	 * to a 4-byte boundary.
   1533 
   1534 	 * XXX BRAINDAMAGE ALERT!
   1535 	 * The stupid chip uses the same size for every buffer, which
   1536 	 * is set in the Receive Control register.  We are using the 2K
   1537 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1538 	 * reason, we can't "scoot" packets longer than the standard
   1539 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1540 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1541 	 * the upper layer copy the headers.
   1542 	 */
   1543 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1544 
   1545 	wm_set_dma_addr(&rxd->wrx_addr,
   1546 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1547 	rxd->wrx_len = 0;
   1548 	rxd->wrx_cksum = 0;
   1549 	rxd->wrx_status = 0;
   1550 	rxd->wrx_errors = 0;
   1551 	rxd->wrx_special = 0;
   1552 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1553 
   1554 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1555 }
   1556 
   1557 /*
   1558  * Device driver interface functions and commonly used functions.
   1559  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1560  */
   1561 
   1562 /* Lookup supported device table */
   1563 static const struct wm_product *
   1564 wm_lookup(const struct pci_attach_args *pa)
   1565 {
   1566 	const struct wm_product *wmp;
   1567 
   1568 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1569 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1570 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1571 			return wmp;
   1572 	}
   1573 	return NULL;
   1574 }
   1575 
   1576 /* The match function (ca_match) */
   1577 static int
   1578 wm_match(device_t parent, cfdata_t cf, void *aux)
   1579 {
   1580 	struct pci_attach_args *pa = aux;
   1581 
   1582 	if (wm_lookup(pa) != NULL)
   1583 		return 1;
   1584 
   1585 	return 0;
   1586 }
   1587 
   1588 /* The attach function (ca_attach) */
   1589 static void
   1590 wm_attach(device_t parent, device_t self, void *aux)
   1591 {
   1592 	struct wm_softc *sc = device_private(self);
   1593 	struct pci_attach_args *pa = aux;
   1594 	prop_dictionary_t dict;
   1595 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1596 	pci_chipset_tag_t pc = pa->pa_pc;
   1597 	int counts[PCI_INTR_TYPE_SIZE];
   1598 	pci_intr_type_t max_type;
   1599 	const char *eetype, *xname;
   1600 	bus_space_tag_t memt;
   1601 	bus_space_handle_t memh;
   1602 	bus_size_t memsize;
   1603 	int memh_valid;
   1604 	int i, error;
   1605 	const struct wm_product *wmp;
   1606 	prop_data_t ea;
   1607 	prop_number_t pn;
   1608 	uint8_t enaddr[ETHER_ADDR_LEN];
   1609 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1610 	pcireg_t preg, memtype;
   1611 	uint16_t eeprom_data, apme_mask;
   1612 	bool force_clear_smbi;
   1613 	uint32_t link_mode;
   1614 	uint32_t reg;
   1615 
   1616 	sc->sc_dev = self;
   1617 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1618 	sc->sc_core_stopping = false;
   1619 
   1620 	wmp = wm_lookup(pa);
   1621 #ifdef DIAGNOSTIC
   1622 	if (wmp == NULL) {
   1623 		printf("\n");
   1624 		panic("wm_attach: impossible");
   1625 	}
   1626 #endif
   1627 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1628 
   1629 	sc->sc_pc = pa->pa_pc;
   1630 	sc->sc_pcitag = pa->pa_tag;
   1631 
   1632 	if (pci_dma64_available(pa))
   1633 		sc->sc_dmat = pa->pa_dmat64;
   1634 	else
   1635 		sc->sc_dmat = pa->pa_dmat;
   1636 
   1637 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1638 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1639 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1640 
   1641 	sc->sc_type = wmp->wmp_type;
   1642 
   1643 	/* Set default function pointers */
   1644 	sc->phy.acquire = wm_get_null;
   1645 	sc->phy.release = wm_put_null;
   1646 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1647 
   1648 	if (sc->sc_type < WM_T_82543) {
   1649 		if (sc->sc_rev < 2) {
   1650 			aprint_error_dev(sc->sc_dev,
   1651 			    "i82542 must be at least rev. 2\n");
   1652 			return;
   1653 		}
   1654 		if (sc->sc_rev < 3)
   1655 			sc->sc_type = WM_T_82542_2_0;
   1656 	}
   1657 
   1658 	/*
   1659 	 * Disable MSI for Errata:
   1660 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1661 	 *
   1662 	 *  82544: Errata 25
   1663 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1664 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1665 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1666 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1667 	 *
   1668 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1669 	 *
   1670 	 *  82571 & 82572: Errata 63
   1671 	 */
   1672 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1673 	    || (sc->sc_type == WM_T_82572))
   1674 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1675 
   1676 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1677 	    || (sc->sc_type == WM_T_82580)
   1678 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1679 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1680 		sc->sc_flags |= WM_F_NEWQUEUE;
   1681 
   1682 	/* Set device properties (mactype) */
   1683 	dict = device_properties(sc->sc_dev);
   1684 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1685 
   1686 	/*
   1687 	 * Map the device.  All devices support memory-mapped acccess,
   1688 	 * and it is really required for normal operation.
   1689 	 */
   1690 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1691 	switch (memtype) {
   1692 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1693 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1694 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1695 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1696 		break;
   1697 	default:
   1698 		memh_valid = 0;
   1699 		break;
   1700 	}
   1701 
   1702 	if (memh_valid) {
   1703 		sc->sc_st = memt;
   1704 		sc->sc_sh = memh;
   1705 		sc->sc_ss = memsize;
   1706 	} else {
   1707 		aprint_error_dev(sc->sc_dev,
   1708 		    "unable to map device registers\n");
   1709 		return;
   1710 	}
   1711 
   1712 	/*
   1713 	 * In addition, i82544 and later support I/O mapped indirect
   1714 	 * register access.  It is not desirable (nor supported in
   1715 	 * this driver) to use it for normal operation, though it is
   1716 	 * required to work around bugs in some chip versions.
   1717 	 */
   1718 	if (sc->sc_type >= WM_T_82544) {
   1719 		/* First we have to find the I/O BAR. */
   1720 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1721 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1722 			if (memtype == PCI_MAPREG_TYPE_IO)
   1723 				break;
   1724 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1725 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1726 				i += 4;	/* skip high bits, too */
   1727 		}
   1728 		if (i < PCI_MAPREG_END) {
   1729 			/*
   1730 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1731 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1732 			 * It's no problem because newer chips has no this
   1733 			 * bug.
   1734 			 *
   1735 			 * The i8254x doesn't apparently respond when the
   1736 			 * I/O BAR is 0, which looks somewhat like it's not
   1737 			 * been configured.
   1738 			 */
   1739 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1740 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1741 				aprint_error_dev(sc->sc_dev,
   1742 				    "WARNING: I/O BAR at zero.\n");
   1743 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1744 					0, &sc->sc_iot, &sc->sc_ioh,
   1745 					NULL, &sc->sc_ios) == 0) {
   1746 				sc->sc_flags |= WM_F_IOH_VALID;
   1747 			} else {
   1748 				aprint_error_dev(sc->sc_dev,
   1749 				    "WARNING: unable to map I/O space\n");
   1750 			}
   1751 		}
   1752 
   1753 	}
   1754 
   1755 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1756 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1757 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1758 	if (sc->sc_type < WM_T_82542_2_1)
   1759 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1760 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1761 
   1762 	/* power up chip */
   1763 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1764 	    NULL)) && error != EOPNOTSUPP) {
   1765 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1766 		return;
   1767 	}
   1768 
   1769 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1770 
   1771 	/* Allocation settings */
   1772 	max_type = PCI_INTR_TYPE_MSIX;
   1773 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1774 	counts[PCI_INTR_TYPE_MSI] = 1;
   1775 	counts[PCI_INTR_TYPE_INTX] = 1;
   1776 
   1777 alloc_retry:
   1778 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1779 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1780 		return;
   1781 	}
   1782 
   1783 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1784 		error = wm_setup_msix(sc);
   1785 		if (error) {
   1786 			pci_intr_release(pc, sc->sc_intrs,
   1787 			    counts[PCI_INTR_TYPE_MSIX]);
   1788 
   1789 			/* Setup for MSI: Disable MSI-X */
   1790 			max_type = PCI_INTR_TYPE_MSI;
   1791 			counts[PCI_INTR_TYPE_MSI] = 1;
   1792 			counts[PCI_INTR_TYPE_INTX] = 1;
   1793 			goto alloc_retry;
   1794 		}
   1795 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1796 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1797 		error = wm_setup_legacy(sc);
   1798 		if (error) {
   1799 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1800 			    counts[PCI_INTR_TYPE_MSI]);
   1801 
   1802 			/* The next try is for INTx: Disable MSI */
   1803 			max_type = PCI_INTR_TYPE_INTX;
   1804 			counts[PCI_INTR_TYPE_INTX] = 1;
   1805 			goto alloc_retry;
   1806 		}
   1807 	} else {
   1808 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1809 		error = wm_setup_legacy(sc);
   1810 		if (error) {
   1811 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1812 			    counts[PCI_INTR_TYPE_INTX]);
   1813 			return;
   1814 		}
   1815 	}
   1816 
   1817 	/*
   1818 	 * Check the function ID (unit number of the chip).
   1819 	 */
   1820 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1821 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1822 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1823 	    || (sc->sc_type == WM_T_82580)
   1824 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1825 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1826 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1827 	else
   1828 		sc->sc_funcid = 0;
   1829 
   1830 	/*
   1831 	 * Determine a few things about the bus we're connected to.
   1832 	 */
   1833 	if (sc->sc_type < WM_T_82543) {
   1834 		/* We don't really know the bus characteristics here. */
   1835 		sc->sc_bus_speed = 33;
   1836 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1837 		/*
   1838 		 * CSA (Communication Streaming Architecture) is about as fast
   1839 		 * a 32-bit 66MHz PCI Bus.
   1840 		 */
   1841 		sc->sc_flags |= WM_F_CSA;
   1842 		sc->sc_bus_speed = 66;
   1843 		aprint_verbose_dev(sc->sc_dev,
   1844 		    "Communication Streaming Architecture\n");
   1845 		if (sc->sc_type == WM_T_82547) {
   1846 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1847 			callout_setfunc(&sc->sc_txfifo_ch,
   1848 					wm_82547_txfifo_stall, sc);
   1849 			aprint_verbose_dev(sc->sc_dev,
   1850 			    "using 82547 Tx FIFO stall work-around\n");
   1851 		}
   1852 	} else if (sc->sc_type >= WM_T_82571) {
   1853 		sc->sc_flags |= WM_F_PCIE;
   1854 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1855 		    && (sc->sc_type != WM_T_ICH10)
   1856 		    && (sc->sc_type != WM_T_PCH)
   1857 		    && (sc->sc_type != WM_T_PCH2)
   1858 		    && (sc->sc_type != WM_T_PCH_LPT)
   1859 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1860 			/* ICH* and PCH* have no PCIe capability registers */
   1861 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1862 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1863 				NULL) == 0)
   1864 				aprint_error_dev(sc->sc_dev,
   1865 				    "unable to find PCIe capability\n");
   1866 		}
   1867 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1868 	} else {
   1869 		reg = CSR_READ(sc, WMREG_STATUS);
   1870 		if (reg & STATUS_BUS64)
   1871 			sc->sc_flags |= WM_F_BUS64;
   1872 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1873 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1874 
   1875 			sc->sc_flags |= WM_F_PCIX;
   1876 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1877 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1878 				aprint_error_dev(sc->sc_dev,
   1879 				    "unable to find PCIX capability\n");
   1880 			else if (sc->sc_type != WM_T_82545_3 &&
   1881 				 sc->sc_type != WM_T_82546_3) {
   1882 				/*
   1883 				 * Work around a problem caused by the BIOS
   1884 				 * setting the max memory read byte count
   1885 				 * incorrectly.
   1886 				 */
   1887 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1888 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1889 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1890 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1891 
   1892 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1893 				    PCIX_CMD_BYTECNT_SHIFT;
   1894 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1895 				    PCIX_STATUS_MAXB_SHIFT;
   1896 				if (bytecnt > maxb) {
   1897 					aprint_verbose_dev(sc->sc_dev,
   1898 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1899 					    512 << bytecnt, 512 << maxb);
   1900 					pcix_cmd = (pcix_cmd &
   1901 					    ~PCIX_CMD_BYTECNT_MASK) |
   1902 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1903 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1904 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1905 					    pcix_cmd);
   1906 				}
   1907 			}
   1908 		}
   1909 		/*
   1910 		 * The quad port adapter is special; it has a PCIX-PCIX
   1911 		 * bridge on the board, and can run the secondary bus at
   1912 		 * a higher speed.
   1913 		 */
   1914 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1915 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1916 								      : 66;
   1917 		} else if (sc->sc_flags & WM_F_PCIX) {
   1918 			switch (reg & STATUS_PCIXSPD_MASK) {
   1919 			case STATUS_PCIXSPD_50_66:
   1920 				sc->sc_bus_speed = 66;
   1921 				break;
   1922 			case STATUS_PCIXSPD_66_100:
   1923 				sc->sc_bus_speed = 100;
   1924 				break;
   1925 			case STATUS_PCIXSPD_100_133:
   1926 				sc->sc_bus_speed = 133;
   1927 				break;
   1928 			default:
   1929 				aprint_error_dev(sc->sc_dev,
   1930 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1931 				    reg & STATUS_PCIXSPD_MASK);
   1932 				sc->sc_bus_speed = 66;
   1933 				break;
   1934 			}
   1935 		} else
   1936 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1937 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1938 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1939 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1940 	}
   1941 
   1942 	/* clear interesting stat counters */
   1943 	CSR_READ(sc, WMREG_COLC);
   1944 	CSR_READ(sc, WMREG_RXERRC);
   1945 
   1946 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1947 	    || (sc->sc_type >= WM_T_ICH8))
   1948 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1949 	if (sc->sc_type >= WM_T_ICH8)
   1950 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1951 
   1952 	/* Set PHY, NVM mutex related stuff */
   1953 	switch (sc->sc_type) {
   1954 	case WM_T_82542_2_0:
   1955 	case WM_T_82542_2_1:
   1956 	case WM_T_82543:
   1957 	case WM_T_82544:
   1958 		/* Microwire */
   1959 		sc->sc_nvm_wordsize = 64;
   1960 		sc->sc_nvm_addrbits = 6;
   1961 		break;
   1962 	case WM_T_82540:
   1963 	case WM_T_82545:
   1964 	case WM_T_82545_3:
   1965 	case WM_T_82546:
   1966 	case WM_T_82546_3:
   1967 		/* Microwire */
   1968 		reg = CSR_READ(sc, WMREG_EECD);
   1969 		if (reg & EECD_EE_SIZE) {
   1970 			sc->sc_nvm_wordsize = 256;
   1971 			sc->sc_nvm_addrbits = 8;
   1972 		} else {
   1973 			sc->sc_nvm_wordsize = 64;
   1974 			sc->sc_nvm_addrbits = 6;
   1975 		}
   1976 		sc->sc_flags |= WM_F_LOCK_EECD;
   1977 		break;
   1978 	case WM_T_82541:
   1979 	case WM_T_82541_2:
   1980 	case WM_T_82547:
   1981 	case WM_T_82547_2:
   1982 		sc->sc_flags |= WM_F_LOCK_EECD;
   1983 		reg = CSR_READ(sc, WMREG_EECD);
   1984 		if (reg & EECD_EE_TYPE) {
   1985 			/* SPI */
   1986 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1987 			wm_nvm_set_addrbits_size_eecd(sc);
   1988 		} else {
   1989 			/* Microwire */
   1990 			if ((reg & EECD_EE_ABITS) != 0) {
   1991 				sc->sc_nvm_wordsize = 256;
   1992 				sc->sc_nvm_addrbits = 8;
   1993 			} else {
   1994 				sc->sc_nvm_wordsize = 64;
   1995 				sc->sc_nvm_addrbits = 6;
   1996 			}
   1997 		}
   1998 		break;
   1999 	case WM_T_82571:
   2000 	case WM_T_82572:
   2001 		/* SPI */
   2002 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2003 		wm_nvm_set_addrbits_size_eecd(sc);
   2004 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2005 		sc->phy.acquire = wm_get_swsm_semaphore;
   2006 		sc->phy.release = wm_put_swsm_semaphore;
   2007 		break;
   2008 	case WM_T_82573:
   2009 	case WM_T_82574:
   2010 	case WM_T_82583:
   2011 		if (sc->sc_type == WM_T_82573) {
   2012 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2013 			sc->phy.acquire = wm_get_swsm_semaphore;
   2014 			sc->phy.release = wm_put_swsm_semaphore;
   2015 		} else {
   2016 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2017 			/* Both PHY and NVM use the same semaphore. */
   2018 			sc->phy.acquire
   2019 			    = wm_get_swfwhw_semaphore;
   2020 			sc->phy.release
   2021 			    = wm_put_swfwhw_semaphore;
   2022 		}
   2023 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2024 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2025 			sc->sc_nvm_wordsize = 2048;
   2026 		} else {
   2027 			/* SPI */
   2028 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2029 			wm_nvm_set_addrbits_size_eecd(sc);
   2030 		}
   2031 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2032 		break;
   2033 	case WM_T_82575:
   2034 	case WM_T_82576:
   2035 	case WM_T_82580:
   2036 	case WM_T_I350:
   2037 	case WM_T_I354:
   2038 	case WM_T_80003:
   2039 		/* SPI */
   2040 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2041 		wm_nvm_set_addrbits_size_eecd(sc);
   2042 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2043 		    | WM_F_LOCK_SWSM;
   2044 		sc->phy.acquire = wm_get_phy_82575;
   2045 		sc->phy.release = wm_put_phy_82575;
   2046 		break;
   2047 	case WM_T_ICH8:
   2048 	case WM_T_ICH9:
   2049 	case WM_T_ICH10:
   2050 	case WM_T_PCH:
   2051 	case WM_T_PCH2:
   2052 	case WM_T_PCH_LPT:
   2053 		/* FLASH */
   2054 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2055 		sc->sc_nvm_wordsize = 2048;
   2056 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2057 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2058 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2059 			aprint_error_dev(sc->sc_dev,
   2060 			    "can't map FLASH registers\n");
   2061 			goto out;
   2062 		}
   2063 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2064 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2065 		    ICH_FLASH_SECTOR_SIZE;
   2066 		sc->sc_ich8_flash_bank_size =
   2067 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2068 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2069 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2070 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2071 		sc->sc_flashreg_offset = 0;
   2072 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2073 		sc->phy.release = wm_put_swflag_ich8lan;
   2074 		break;
   2075 	case WM_T_PCH_SPT:
   2076 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2077 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2078 		sc->sc_flasht = sc->sc_st;
   2079 		sc->sc_flashh = sc->sc_sh;
   2080 		sc->sc_ich8_flash_base = 0;
   2081 		sc->sc_nvm_wordsize =
   2082 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2083 			* NVM_SIZE_MULTIPLIER;
   2084 		/* It is size in bytes, we want words */
   2085 		sc->sc_nvm_wordsize /= 2;
   2086 		/* assume 2 banks */
   2087 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2088 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2089 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2090 		sc->phy.release = wm_put_swflag_ich8lan;
   2091 		break;
   2092 	case WM_T_I210:
   2093 	case WM_T_I211:
   2094 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2095 			wm_nvm_set_addrbits_size_eecd(sc);
   2096 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2097 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2098 		} else {
   2099 			sc->sc_nvm_wordsize = INVM_SIZE;
   2100 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2101 		}
   2102 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2103 		sc->phy.acquire = wm_get_phy_82575;
   2104 		sc->phy.release = wm_put_phy_82575;
   2105 		break;
   2106 	default:
   2107 		break;
   2108 	}
   2109 
   2110 	/* Reset the chip to a known state. */
   2111 	wm_reset(sc);
   2112 
   2113 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2114 	switch (sc->sc_type) {
   2115 	case WM_T_82571:
   2116 	case WM_T_82572:
   2117 		reg = CSR_READ(sc, WMREG_SWSM2);
   2118 		if ((reg & SWSM2_LOCK) == 0) {
   2119 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2120 			force_clear_smbi = true;
   2121 		} else
   2122 			force_clear_smbi = false;
   2123 		break;
   2124 	case WM_T_82573:
   2125 	case WM_T_82574:
   2126 	case WM_T_82583:
   2127 		force_clear_smbi = true;
   2128 		break;
   2129 	default:
   2130 		force_clear_smbi = false;
   2131 		break;
   2132 	}
   2133 	if (force_clear_smbi) {
   2134 		reg = CSR_READ(sc, WMREG_SWSM);
   2135 		if ((reg & SWSM_SMBI) != 0)
   2136 			aprint_error_dev(sc->sc_dev,
   2137 			    "Please update the Bootagent\n");
   2138 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2139 	}
   2140 
   2141 	/*
   2142 	 * Defer printing the EEPROM type until after verifying the checksum
   2143 	 * This allows the EEPROM type to be printed correctly in the case
   2144 	 * that no EEPROM is attached.
   2145 	 */
   2146 	/*
   2147 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2148 	 * this for later, so we can fail future reads from the EEPROM.
   2149 	 */
   2150 	if (wm_nvm_validate_checksum(sc)) {
   2151 		/*
   2152 		 * Read twice again because some PCI-e parts fail the
   2153 		 * first check due to the link being in sleep state.
   2154 		 */
   2155 		if (wm_nvm_validate_checksum(sc))
   2156 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2157 	}
   2158 
   2159 	/* Set device properties (macflags) */
   2160 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2161 
   2162 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2163 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2164 	else {
   2165 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2166 		    sc->sc_nvm_wordsize);
   2167 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2168 			aprint_verbose("iNVM");
   2169 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2170 			aprint_verbose("FLASH(HW)");
   2171 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2172 			aprint_verbose("FLASH");
   2173 		else {
   2174 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2175 				eetype = "SPI";
   2176 			else
   2177 				eetype = "MicroWire";
   2178 			aprint_verbose("(%d address bits) %s EEPROM",
   2179 			    sc->sc_nvm_addrbits, eetype);
   2180 		}
   2181 	}
   2182 	wm_nvm_version(sc);
   2183 	aprint_verbose("\n");
   2184 
   2185 	/* Check for I21[01] PLL workaround */
   2186 	if (sc->sc_type == WM_T_I210)
   2187 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2188 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2189 		/* NVM image release 3.25 has a workaround */
   2190 		if ((sc->sc_nvm_ver_major < 3)
   2191 		    || ((sc->sc_nvm_ver_major == 3)
   2192 			&& (sc->sc_nvm_ver_minor < 25))) {
   2193 			aprint_verbose_dev(sc->sc_dev,
   2194 			    "ROM image version %d.%d is older than 3.25\n",
   2195 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2196 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2197 		}
   2198 	}
   2199 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2200 		wm_pll_workaround_i210(sc);
   2201 
   2202 	wm_get_wakeup(sc);
   2203 
   2204 	/* Non-AMT based hardware can now take control from firmware */
   2205 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2206 		wm_get_hw_control(sc);
   2207 
   2208 	/*
   2209 	 * Read the Ethernet address from the EEPROM, if not first found
   2210 	 * in device properties.
   2211 	 */
   2212 	ea = prop_dictionary_get(dict, "mac-address");
   2213 	if (ea != NULL) {
   2214 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2215 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2216 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2217 	} else {
   2218 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2219 			aprint_error_dev(sc->sc_dev,
   2220 			    "unable to read Ethernet address\n");
   2221 			goto out;
   2222 		}
   2223 	}
   2224 
   2225 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2226 	    ether_sprintf(enaddr));
   2227 
   2228 	/*
   2229 	 * Read the config info from the EEPROM, and set up various
   2230 	 * bits in the control registers based on their contents.
   2231 	 */
   2232 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2233 	if (pn != NULL) {
   2234 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2235 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2236 	} else {
   2237 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2238 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2239 			goto out;
   2240 		}
   2241 	}
   2242 
   2243 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2244 	if (pn != NULL) {
   2245 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2246 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2247 	} else {
   2248 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2249 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2250 			goto out;
   2251 		}
   2252 	}
   2253 
   2254 	/* check for WM_F_WOL */
   2255 	switch (sc->sc_type) {
   2256 	case WM_T_82542_2_0:
   2257 	case WM_T_82542_2_1:
   2258 	case WM_T_82543:
   2259 		/* dummy? */
   2260 		eeprom_data = 0;
   2261 		apme_mask = NVM_CFG3_APME;
   2262 		break;
   2263 	case WM_T_82544:
   2264 		apme_mask = NVM_CFG2_82544_APM_EN;
   2265 		eeprom_data = cfg2;
   2266 		break;
   2267 	case WM_T_82546:
   2268 	case WM_T_82546_3:
   2269 	case WM_T_82571:
   2270 	case WM_T_82572:
   2271 	case WM_T_82573:
   2272 	case WM_T_82574:
   2273 	case WM_T_82583:
   2274 	case WM_T_80003:
   2275 	default:
   2276 		apme_mask = NVM_CFG3_APME;
   2277 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2278 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2279 		break;
   2280 	case WM_T_82575:
   2281 	case WM_T_82576:
   2282 	case WM_T_82580:
   2283 	case WM_T_I350:
   2284 	case WM_T_I354: /* XXX ok? */
   2285 	case WM_T_ICH8:
   2286 	case WM_T_ICH9:
   2287 	case WM_T_ICH10:
   2288 	case WM_T_PCH:
   2289 	case WM_T_PCH2:
   2290 	case WM_T_PCH_LPT:
   2291 	case WM_T_PCH_SPT:
   2292 		/* XXX The funcid should be checked on some devices */
   2293 		apme_mask = WUC_APME;
   2294 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2295 		break;
   2296 	}
   2297 
   2298 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2299 	if ((eeprom_data & apme_mask) != 0)
   2300 		sc->sc_flags |= WM_F_WOL;
   2301 #ifdef WM_DEBUG
   2302 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2303 		printf("WOL\n");
   2304 #endif
   2305 
   2306 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2307 		/* Check NVM for autonegotiation */
   2308 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2309 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2310 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2311 		}
   2312 	}
   2313 
   2314 	/*
   2315 	 * XXX need special handling for some multiple port cards
   2316 	 * to disable a paticular port.
   2317 	 */
   2318 
   2319 	if (sc->sc_type >= WM_T_82544) {
   2320 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2321 		if (pn != NULL) {
   2322 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2323 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2324 		} else {
   2325 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2326 				aprint_error_dev(sc->sc_dev,
   2327 				    "unable to read SWDPIN\n");
   2328 				goto out;
   2329 			}
   2330 		}
   2331 	}
   2332 
   2333 	if (cfg1 & NVM_CFG1_ILOS)
   2334 		sc->sc_ctrl |= CTRL_ILOS;
   2335 
   2336 	/*
   2337 	 * XXX
   2338 	 * This code isn't correct because pin 2 and 3 are located
   2339 	 * in different position on newer chips. Check all datasheet.
   2340 	 *
   2341 	 * Until resolve this problem, check if a chip < 82580
   2342 	 */
   2343 	if (sc->sc_type <= WM_T_82580) {
   2344 		if (sc->sc_type >= WM_T_82544) {
   2345 			sc->sc_ctrl |=
   2346 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2347 			    CTRL_SWDPIO_SHIFT;
   2348 			sc->sc_ctrl |=
   2349 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2350 			    CTRL_SWDPINS_SHIFT;
   2351 		} else {
   2352 			sc->sc_ctrl |=
   2353 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2354 			    CTRL_SWDPIO_SHIFT;
   2355 		}
   2356 	}
   2357 
   2358 	/* XXX For other than 82580? */
   2359 	if (sc->sc_type == WM_T_82580) {
   2360 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2361 		if (nvmword & __BIT(13))
   2362 			sc->sc_ctrl |= CTRL_ILOS;
   2363 	}
   2364 
   2365 #if 0
   2366 	if (sc->sc_type >= WM_T_82544) {
   2367 		if (cfg1 & NVM_CFG1_IPS0)
   2368 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2369 		if (cfg1 & NVM_CFG1_IPS1)
   2370 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2371 		sc->sc_ctrl_ext |=
   2372 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2373 		    CTRL_EXT_SWDPIO_SHIFT;
   2374 		sc->sc_ctrl_ext |=
   2375 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2376 		    CTRL_EXT_SWDPINS_SHIFT;
   2377 	} else {
   2378 		sc->sc_ctrl_ext |=
   2379 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2380 		    CTRL_EXT_SWDPIO_SHIFT;
   2381 	}
   2382 #endif
   2383 
   2384 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2385 #if 0
   2386 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2387 #endif
   2388 
   2389 	if (sc->sc_type == WM_T_PCH) {
   2390 		uint16_t val;
   2391 
   2392 		/* Save the NVM K1 bit setting */
   2393 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2394 
   2395 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2396 			sc->sc_nvm_k1_enabled = 1;
   2397 		else
   2398 			sc->sc_nvm_k1_enabled = 0;
   2399 	}
   2400 
   2401 	/*
   2402 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2403 	 * media structures accordingly.
   2404 	 */
   2405 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2406 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2407 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2408 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2409 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2410 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2411 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2412 	} else if (sc->sc_type < WM_T_82543 ||
   2413 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2414 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2415 			aprint_error_dev(sc->sc_dev,
   2416 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2417 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2418 		}
   2419 		wm_tbi_mediainit(sc);
   2420 	} else {
   2421 		switch (sc->sc_type) {
   2422 		case WM_T_82575:
   2423 		case WM_T_82576:
   2424 		case WM_T_82580:
   2425 		case WM_T_I350:
   2426 		case WM_T_I354:
   2427 		case WM_T_I210:
   2428 		case WM_T_I211:
   2429 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2430 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2431 			switch (link_mode) {
   2432 			case CTRL_EXT_LINK_MODE_1000KX:
   2433 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2434 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2435 				break;
   2436 			case CTRL_EXT_LINK_MODE_SGMII:
   2437 				if (wm_sgmii_uses_mdio(sc)) {
   2438 					aprint_verbose_dev(sc->sc_dev,
   2439 					    "SGMII(MDIO)\n");
   2440 					sc->sc_flags |= WM_F_SGMII;
   2441 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2442 					break;
   2443 				}
   2444 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2445 				/*FALLTHROUGH*/
   2446 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2447 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2448 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2449 					if (link_mode
   2450 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2451 						sc->sc_mediatype
   2452 						    = WM_MEDIATYPE_COPPER;
   2453 						sc->sc_flags |= WM_F_SGMII;
   2454 					} else {
   2455 						sc->sc_mediatype
   2456 						    = WM_MEDIATYPE_SERDES;
   2457 						aprint_verbose_dev(sc->sc_dev,
   2458 						    "SERDES\n");
   2459 					}
   2460 					break;
   2461 				}
   2462 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2463 					aprint_verbose_dev(sc->sc_dev,
   2464 					    "SERDES\n");
   2465 
   2466 				/* Change current link mode setting */
   2467 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2468 				switch (sc->sc_mediatype) {
   2469 				case WM_MEDIATYPE_COPPER:
   2470 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2471 					break;
   2472 				case WM_MEDIATYPE_SERDES:
   2473 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2474 					break;
   2475 				default:
   2476 					break;
   2477 				}
   2478 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2479 				break;
   2480 			case CTRL_EXT_LINK_MODE_GMII:
   2481 			default:
   2482 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2483 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2484 				break;
   2485 			}
   2486 
   2487 			reg &= ~CTRL_EXT_I2C_ENA;
   2488 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2489 				reg |= CTRL_EXT_I2C_ENA;
   2490 			else
   2491 				reg &= ~CTRL_EXT_I2C_ENA;
   2492 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2493 
   2494 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2495 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2496 			else
   2497 				wm_tbi_mediainit(sc);
   2498 			break;
   2499 		default:
   2500 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2501 				aprint_error_dev(sc->sc_dev,
   2502 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2503 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2504 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2505 		}
   2506 	}
   2507 
   2508 	ifp = &sc->sc_ethercom.ec_if;
   2509 	xname = device_xname(sc->sc_dev);
   2510 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2511 	ifp->if_softc = sc;
   2512 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2513 	ifp->if_extflags = IFEF_START_MPSAFE;
   2514 	ifp->if_ioctl = wm_ioctl;
   2515 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2516 		ifp->if_start = wm_nq_start;
   2517 		if (sc->sc_nqueues > 1)
   2518 			ifp->if_transmit = wm_nq_transmit;
   2519 	} else {
   2520 		ifp->if_start = wm_start;
   2521 		if (sc->sc_nqueues > 1)
   2522 			ifp->if_transmit = wm_transmit;
   2523 	}
   2524 	ifp->if_watchdog = wm_watchdog;
   2525 	ifp->if_init = wm_init;
   2526 	ifp->if_stop = wm_stop;
   2527 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2528 	IFQ_SET_READY(&ifp->if_snd);
   2529 
   2530 	/* Check for jumbo frame */
   2531 	switch (sc->sc_type) {
   2532 	case WM_T_82573:
   2533 		/* XXX limited to 9234 if ASPM is disabled */
   2534 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2535 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2536 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2537 		break;
   2538 	case WM_T_82571:
   2539 	case WM_T_82572:
   2540 	case WM_T_82574:
   2541 	case WM_T_82575:
   2542 	case WM_T_82576:
   2543 	case WM_T_82580:
   2544 	case WM_T_I350:
   2545 	case WM_T_I354: /* XXXX ok? */
   2546 	case WM_T_I210:
   2547 	case WM_T_I211:
   2548 	case WM_T_80003:
   2549 	case WM_T_ICH9:
   2550 	case WM_T_ICH10:
   2551 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2552 	case WM_T_PCH_LPT:
   2553 	case WM_T_PCH_SPT:
   2554 		/* XXX limited to 9234 */
   2555 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2556 		break;
   2557 	case WM_T_PCH:
   2558 		/* XXX limited to 4096 */
   2559 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2560 		break;
   2561 	case WM_T_82542_2_0:
   2562 	case WM_T_82542_2_1:
   2563 	case WM_T_82583:
   2564 	case WM_T_ICH8:
   2565 		/* No support for jumbo frame */
   2566 		break;
   2567 	default:
   2568 		/* ETHER_MAX_LEN_JUMBO */
   2569 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2570 		break;
   2571 	}
   2572 
   2573 	/* If we're a i82543 or greater, we can support VLANs. */
   2574 	if (sc->sc_type >= WM_T_82543)
   2575 		sc->sc_ethercom.ec_capabilities |=
   2576 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2577 
   2578 	/*
   2579 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2580 	 * on i82543 and later.
   2581 	 */
   2582 	if (sc->sc_type >= WM_T_82543) {
   2583 		ifp->if_capabilities |=
   2584 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2585 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2586 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2587 		    IFCAP_CSUM_TCPv6_Tx |
   2588 		    IFCAP_CSUM_UDPv6_Tx;
   2589 	}
   2590 
   2591 	/*
   2592 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2593 	 *
   2594 	 *	82541GI (8086:1076) ... no
   2595 	 *	82572EI (8086:10b9) ... yes
   2596 	 */
   2597 	if (sc->sc_type >= WM_T_82571) {
   2598 		ifp->if_capabilities |=
   2599 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2600 	}
   2601 
   2602 	/*
   2603 	 * If we're a i82544 or greater (except i82547), we can do
   2604 	 * TCP segmentation offload.
   2605 	 */
   2606 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2607 		ifp->if_capabilities |= IFCAP_TSOv4;
   2608 	}
   2609 
   2610 	if (sc->sc_type >= WM_T_82571) {
   2611 		ifp->if_capabilities |= IFCAP_TSOv6;
   2612 	}
   2613 
   2614 #ifdef WM_MPSAFE
   2615 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2616 #else
   2617 	sc->sc_core_lock = NULL;
   2618 #endif
   2619 
   2620 	/* Attach the interface. */
   2621 	if_initialize(ifp);
   2622 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2623 	ether_ifattach(ifp, enaddr);
   2624 	if_register(ifp);
   2625 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2626 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2627 			  RND_FLAG_DEFAULT);
   2628 
   2629 #ifdef WM_EVENT_COUNTERS
   2630 	/* Attach event counters. */
   2631 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2632 	    NULL, xname, "linkintr");
   2633 
   2634 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2635 	    NULL, xname, "tx_xoff");
   2636 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2637 	    NULL, xname, "tx_xon");
   2638 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2639 	    NULL, xname, "rx_xoff");
   2640 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2641 	    NULL, xname, "rx_xon");
   2642 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2643 	    NULL, xname, "rx_macctl");
   2644 #endif /* WM_EVENT_COUNTERS */
   2645 
   2646 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2647 		pmf_class_network_register(self, ifp);
   2648 	else
   2649 		aprint_error_dev(self, "couldn't establish power handler\n");
   2650 
   2651 	sc->sc_flags |= WM_F_ATTACHED;
   2652  out:
   2653 	return;
   2654 }
   2655 
   2656 /* The detach function (ca_detach) */
   2657 static int
   2658 wm_detach(device_t self, int flags __unused)
   2659 {
   2660 	struct wm_softc *sc = device_private(self);
   2661 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2662 	int i;
   2663 
   2664 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2665 		return 0;
   2666 
   2667 	/* Stop the interface. Callouts are stopped in it. */
   2668 	wm_stop(ifp, 1);
   2669 
   2670 	pmf_device_deregister(self);
   2671 
   2672 	/* Tell the firmware about the release */
   2673 	WM_CORE_LOCK(sc);
   2674 	wm_release_manageability(sc);
   2675 	wm_release_hw_control(sc);
   2676 	wm_enable_wakeup(sc);
   2677 	WM_CORE_UNLOCK(sc);
   2678 
   2679 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2680 
   2681 	/* Delete all remaining media. */
   2682 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2683 
   2684 	ether_ifdetach(ifp);
   2685 	if_detach(ifp);
   2686 	if_percpuq_destroy(sc->sc_ipq);
   2687 
   2688 	/* Unload RX dmamaps and free mbufs */
   2689 	for (i = 0; i < sc->sc_nqueues; i++) {
   2690 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2691 		mutex_enter(rxq->rxq_lock);
   2692 		wm_rxdrain(rxq);
   2693 		mutex_exit(rxq->rxq_lock);
   2694 	}
   2695 	/* Must unlock here */
   2696 
   2697 	/* Disestablish the interrupt handler */
   2698 	for (i = 0; i < sc->sc_nintrs; i++) {
   2699 		if (sc->sc_ihs[i] != NULL) {
   2700 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2701 			sc->sc_ihs[i] = NULL;
   2702 		}
   2703 	}
   2704 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2705 
   2706 	wm_free_txrx_queues(sc);
   2707 
   2708 	/* Unmap the registers */
   2709 	if (sc->sc_ss) {
   2710 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2711 		sc->sc_ss = 0;
   2712 	}
   2713 	if (sc->sc_ios) {
   2714 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2715 		sc->sc_ios = 0;
   2716 	}
   2717 	if (sc->sc_flashs) {
   2718 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2719 		sc->sc_flashs = 0;
   2720 	}
   2721 
   2722 	if (sc->sc_core_lock)
   2723 		mutex_obj_free(sc->sc_core_lock);
   2724 	if (sc->sc_ich_phymtx)
   2725 		mutex_obj_free(sc->sc_ich_phymtx);
   2726 	if (sc->sc_ich_nvmmtx)
   2727 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2728 
   2729 	return 0;
   2730 }
   2731 
   2732 static bool
   2733 wm_suspend(device_t self, const pmf_qual_t *qual)
   2734 {
   2735 	struct wm_softc *sc = device_private(self);
   2736 
   2737 	wm_release_manageability(sc);
   2738 	wm_release_hw_control(sc);
   2739 	wm_enable_wakeup(sc);
   2740 
   2741 	return true;
   2742 }
   2743 
   2744 static bool
   2745 wm_resume(device_t self, const pmf_qual_t *qual)
   2746 {
   2747 	struct wm_softc *sc = device_private(self);
   2748 
   2749 	wm_init_manageability(sc);
   2750 
   2751 	return true;
   2752 }
   2753 
   2754 /*
   2755  * wm_watchdog:		[ifnet interface function]
   2756  *
   2757  *	Watchdog timer handler.
   2758  */
   2759 static void
   2760 wm_watchdog(struct ifnet *ifp)
   2761 {
   2762 	int qid;
   2763 	struct wm_softc *sc = ifp->if_softc;
   2764 
   2765 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2766 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2767 
   2768 		wm_watchdog_txq(ifp, txq);
   2769 	}
   2770 
   2771 	/* Reset the interface. */
   2772 	(void) wm_init(ifp);
   2773 
   2774 	/*
   2775 	 * There are still some upper layer processing which call
   2776 	 * ifp->if_start(). e.g. ALTQ
   2777 	 */
   2778 	/* Try to get more packets going. */
   2779 	ifp->if_start(ifp);
   2780 }
   2781 
   2782 static void
   2783 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2784 {
   2785 	struct wm_softc *sc = ifp->if_softc;
   2786 
   2787 	/*
   2788 	 * Since we're using delayed interrupts, sweep up
   2789 	 * before we report an error.
   2790 	 */
   2791 	mutex_enter(txq->txq_lock);
   2792 	wm_txeof(sc, txq);
   2793 	mutex_exit(txq->txq_lock);
   2794 
   2795 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2796 #ifdef WM_DEBUG
   2797 		int i, j;
   2798 		struct wm_txsoft *txs;
   2799 #endif
   2800 		log(LOG_ERR,
   2801 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2802 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2803 		    txq->txq_next);
   2804 		ifp->if_oerrors++;
   2805 #ifdef WM_DEBUG
   2806 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2807 		    i = WM_NEXTTXS(txq, i)) {
   2808 		    txs = &txq->txq_soft[i];
   2809 		    printf("txs %d tx %d -> %d\n",
   2810 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2811 		    for (j = txs->txs_firstdesc; ;
   2812 			j = WM_NEXTTX(txq, j)) {
   2813 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2814 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2815 			printf("\t %#08x%08x\n",
   2816 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2817 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2818 			if (j == txs->txs_lastdesc)
   2819 				break;
   2820 			}
   2821 		}
   2822 #endif
   2823 	}
   2824 }
   2825 
   2826 /*
   2827  * wm_tick:
   2828  *
   2829  *	One second timer, used to check link status, sweep up
   2830  *	completed transmit jobs, etc.
   2831  */
   2832 static void
   2833 wm_tick(void *arg)
   2834 {
   2835 	struct wm_softc *sc = arg;
   2836 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2837 #ifndef WM_MPSAFE
   2838 	int s = splnet();
   2839 #endif
   2840 
   2841 	WM_CORE_LOCK(sc);
   2842 
   2843 	if (sc->sc_core_stopping)
   2844 		goto out;
   2845 
   2846 	if (sc->sc_type >= WM_T_82542_2_1) {
   2847 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2848 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2849 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2850 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2851 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2852 	}
   2853 
   2854 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2855 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2856 	    + CSR_READ(sc, WMREG_CRCERRS)
   2857 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2858 	    + CSR_READ(sc, WMREG_SYMERRC)
   2859 	    + CSR_READ(sc, WMREG_RXERRC)
   2860 	    + CSR_READ(sc, WMREG_SEC)
   2861 	    + CSR_READ(sc, WMREG_CEXTERR)
   2862 	    + CSR_READ(sc, WMREG_RLEC);
   2863 	/*
   2864 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2865 	 * memory. It does not mean the number of dropped packet. Because
   2866 	 * ethernet controller can receive packets in such case if there is
   2867 	 * space in phy's FIFO.
   2868 	 *
   2869 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2870 	 * own EVCNT instead of if_iqdrops.
   2871 	 */
   2872 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2873 
   2874 	if (sc->sc_flags & WM_F_HAS_MII)
   2875 		mii_tick(&sc->sc_mii);
   2876 	else if ((sc->sc_type >= WM_T_82575)
   2877 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2878 		wm_serdes_tick(sc);
   2879 	else
   2880 		wm_tbi_tick(sc);
   2881 
   2882 out:
   2883 	WM_CORE_UNLOCK(sc);
   2884 #ifndef WM_MPSAFE
   2885 	splx(s);
   2886 #endif
   2887 
   2888 	if (!sc->sc_core_stopping)
   2889 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2890 }
   2891 
   2892 static int
   2893 wm_ifflags_cb(struct ethercom *ec)
   2894 {
   2895 	struct ifnet *ifp = &ec->ec_if;
   2896 	struct wm_softc *sc = ifp->if_softc;
   2897 	int rc = 0;
   2898 
   2899 	WM_CORE_LOCK(sc);
   2900 
   2901 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2902 	sc->sc_if_flags = ifp->if_flags;
   2903 
   2904 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2905 		rc = ENETRESET;
   2906 		goto out;
   2907 	}
   2908 
   2909 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2910 		wm_set_filter(sc);
   2911 
   2912 	wm_set_vlan(sc);
   2913 
   2914 out:
   2915 	WM_CORE_UNLOCK(sc);
   2916 
   2917 	return rc;
   2918 }
   2919 
   2920 /*
   2921  * wm_ioctl:		[ifnet interface function]
   2922  *
   2923  *	Handle control requests from the operator.
   2924  */
   2925 static int
   2926 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2927 {
   2928 	struct wm_softc *sc = ifp->if_softc;
   2929 	struct ifreq *ifr = (struct ifreq *) data;
   2930 	struct ifaddr *ifa = (struct ifaddr *)data;
   2931 	struct sockaddr_dl *sdl;
   2932 	int s, error;
   2933 
   2934 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2935 		device_xname(sc->sc_dev), __func__));
   2936 
   2937 #ifndef WM_MPSAFE
   2938 	s = splnet();
   2939 #endif
   2940 	switch (cmd) {
   2941 	case SIOCSIFMEDIA:
   2942 	case SIOCGIFMEDIA:
   2943 		WM_CORE_LOCK(sc);
   2944 		/* Flow control requires full-duplex mode. */
   2945 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2946 		    (ifr->ifr_media & IFM_FDX) == 0)
   2947 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2948 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2949 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2950 				/* We can do both TXPAUSE and RXPAUSE. */
   2951 				ifr->ifr_media |=
   2952 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2953 			}
   2954 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2955 		}
   2956 		WM_CORE_UNLOCK(sc);
   2957 #ifdef WM_MPSAFE
   2958 		s = splnet();
   2959 #endif
   2960 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2961 #ifdef WM_MPSAFE
   2962 		splx(s);
   2963 #endif
   2964 		break;
   2965 	case SIOCINITIFADDR:
   2966 		WM_CORE_LOCK(sc);
   2967 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2968 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2969 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2970 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2971 			/* unicast address is first multicast entry */
   2972 			wm_set_filter(sc);
   2973 			error = 0;
   2974 			WM_CORE_UNLOCK(sc);
   2975 			break;
   2976 		}
   2977 		WM_CORE_UNLOCK(sc);
   2978 		/*FALLTHROUGH*/
   2979 	default:
   2980 #ifdef WM_MPSAFE
   2981 		s = splnet();
   2982 #endif
   2983 		/* It may call wm_start, so unlock here */
   2984 		error = ether_ioctl(ifp, cmd, data);
   2985 #ifdef WM_MPSAFE
   2986 		splx(s);
   2987 #endif
   2988 		if (error != ENETRESET)
   2989 			break;
   2990 
   2991 		error = 0;
   2992 
   2993 		if (cmd == SIOCSIFCAP) {
   2994 			error = (*ifp->if_init)(ifp);
   2995 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2996 			;
   2997 		else if (ifp->if_flags & IFF_RUNNING) {
   2998 			/*
   2999 			 * Multicast list has changed; set the hardware filter
   3000 			 * accordingly.
   3001 			 */
   3002 			WM_CORE_LOCK(sc);
   3003 			wm_set_filter(sc);
   3004 			WM_CORE_UNLOCK(sc);
   3005 		}
   3006 		break;
   3007 	}
   3008 
   3009 #ifndef WM_MPSAFE
   3010 	splx(s);
   3011 #endif
   3012 	return error;
   3013 }
   3014 
   3015 /* MAC address related */
   3016 
   3017 /*
   3018  * Get the offset of MAC address and return it.
   3019  * If error occured, use offset 0.
   3020  */
   3021 static uint16_t
   3022 wm_check_alt_mac_addr(struct wm_softc *sc)
   3023 {
   3024 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3025 	uint16_t offset = NVM_OFF_MACADDR;
   3026 
   3027 	/* Try to read alternative MAC address pointer */
   3028 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3029 		return 0;
   3030 
   3031 	/* Check pointer if it's valid or not. */
   3032 	if ((offset == 0x0000) || (offset == 0xffff))
   3033 		return 0;
   3034 
   3035 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3036 	/*
   3037 	 * Check whether alternative MAC address is valid or not.
   3038 	 * Some cards have non 0xffff pointer but those don't use
   3039 	 * alternative MAC address in reality.
   3040 	 *
   3041 	 * Check whether the broadcast bit is set or not.
   3042 	 */
   3043 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3044 		if (((myea[0] & 0xff) & 0x01) == 0)
   3045 			return offset; /* Found */
   3046 
   3047 	/* Not found */
   3048 	return 0;
   3049 }
   3050 
   3051 static int
   3052 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3053 {
   3054 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3055 	uint16_t offset = NVM_OFF_MACADDR;
   3056 	int do_invert = 0;
   3057 
   3058 	switch (sc->sc_type) {
   3059 	case WM_T_82580:
   3060 	case WM_T_I350:
   3061 	case WM_T_I354:
   3062 		/* EEPROM Top Level Partitioning */
   3063 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3064 		break;
   3065 	case WM_T_82571:
   3066 	case WM_T_82575:
   3067 	case WM_T_82576:
   3068 	case WM_T_80003:
   3069 	case WM_T_I210:
   3070 	case WM_T_I211:
   3071 		offset = wm_check_alt_mac_addr(sc);
   3072 		if (offset == 0)
   3073 			if ((sc->sc_funcid & 0x01) == 1)
   3074 				do_invert = 1;
   3075 		break;
   3076 	default:
   3077 		if ((sc->sc_funcid & 0x01) == 1)
   3078 			do_invert = 1;
   3079 		break;
   3080 	}
   3081 
   3082 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3083 		goto bad;
   3084 
   3085 	enaddr[0] = myea[0] & 0xff;
   3086 	enaddr[1] = myea[0] >> 8;
   3087 	enaddr[2] = myea[1] & 0xff;
   3088 	enaddr[3] = myea[1] >> 8;
   3089 	enaddr[4] = myea[2] & 0xff;
   3090 	enaddr[5] = myea[2] >> 8;
   3091 
   3092 	/*
   3093 	 * Toggle the LSB of the MAC address on the second port
   3094 	 * of some dual port cards.
   3095 	 */
   3096 	if (do_invert != 0)
   3097 		enaddr[5] ^= 1;
   3098 
   3099 	return 0;
   3100 
   3101  bad:
   3102 	return -1;
   3103 }
   3104 
   3105 /*
   3106  * wm_set_ral:
   3107  *
   3108  *	Set an entery in the receive address list.
   3109  */
   3110 static void
   3111 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3112 {
   3113 	uint32_t ral_lo, ral_hi;
   3114 
   3115 	if (enaddr != NULL) {
   3116 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3117 		    (enaddr[3] << 24);
   3118 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3119 		ral_hi |= RAL_AV;
   3120 	} else {
   3121 		ral_lo = 0;
   3122 		ral_hi = 0;
   3123 	}
   3124 
   3125 	if (sc->sc_type >= WM_T_82544) {
   3126 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3127 		    ral_lo);
   3128 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3129 		    ral_hi);
   3130 	} else {
   3131 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3132 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3133 	}
   3134 }
   3135 
   3136 /*
   3137  * wm_mchash:
   3138  *
   3139  *	Compute the hash of the multicast address for the 4096-bit
   3140  *	multicast filter.
   3141  */
   3142 static uint32_t
   3143 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3144 {
   3145 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3146 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3147 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3148 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3149 	uint32_t hash;
   3150 
   3151 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3152 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3153 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3154 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3155 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3156 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3157 		return (hash & 0x3ff);
   3158 	}
   3159 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3160 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3161 
   3162 	return (hash & 0xfff);
   3163 }
   3164 
   3165 /*
   3166  * wm_set_filter:
   3167  *
   3168  *	Set up the receive filter.
   3169  */
   3170 static void
   3171 wm_set_filter(struct wm_softc *sc)
   3172 {
   3173 	struct ethercom *ec = &sc->sc_ethercom;
   3174 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3175 	struct ether_multi *enm;
   3176 	struct ether_multistep step;
   3177 	bus_addr_t mta_reg;
   3178 	uint32_t hash, reg, bit;
   3179 	int i, size, ralmax;
   3180 
   3181 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3182 		device_xname(sc->sc_dev), __func__));
   3183 
   3184 	if (sc->sc_type >= WM_T_82544)
   3185 		mta_reg = WMREG_CORDOVA_MTA;
   3186 	else
   3187 		mta_reg = WMREG_MTA;
   3188 
   3189 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3190 
   3191 	if (ifp->if_flags & IFF_BROADCAST)
   3192 		sc->sc_rctl |= RCTL_BAM;
   3193 	if (ifp->if_flags & IFF_PROMISC) {
   3194 		sc->sc_rctl |= RCTL_UPE;
   3195 		goto allmulti;
   3196 	}
   3197 
   3198 	/*
   3199 	 * Set the station address in the first RAL slot, and
   3200 	 * clear the remaining slots.
   3201 	 */
   3202 	if (sc->sc_type == WM_T_ICH8)
   3203 		size = WM_RAL_TABSIZE_ICH8 -1;
   3204 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3205 	    || (sc->sc_type == WM_T_PCH))
   3206 		size = WM_RAL_TABSIZE_ICH8;
   3207 	else if (sc->sc_type == WM_T_PCH2)
   3208 		size = WM_RAL_TABSIZE_PCH2;
   3209 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3210 		size = WM_RAL_TABSIZE_PCH_LPT;
   3211 	else if (sc->sc_type == WM_T_82575)
   3212 		size = WM_RAL_TABSIZE_82575;
   3213 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3214 		size = WM_RAL_TABSIZE_82576;
   3215 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3216 		size = WM_RAL_TABSIZE_I350;
   3217 	else
   3218 		size = WM_RAL_TABSIZE;
   3219 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3220 
   3221 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3222 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3223 		switch (i) {
   3224 		case 0:
   3225 			/* We can use all entries */
   3226 			ralmax = size;
   3227 			break;
   3228 		case 1:
   3229 			/* Only RAR[0] */
   3230 			ralmax = 1;
   3231 			break;
   3232 		default:
   3233 			/* available SHRA + RAR[0] */
   3234 			ralmax = i + 1;
   3235 		}
   3236 	} else
   3237 		ralmax = size;
   3238 	for (i = 1; i < size; i++) {
   3239 		if (i < ralmax)
   3240 			wm_set_ral(sc, NULL, i);
   3241 	}
   3242 
   3243 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3244 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3245 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3246 	    || (sc->sc_type == WM_T_PCH_SPT))
   3247 		size = WM_ICH8_MC_TABSIZE;
   3248 	else
   3249 		size = WM_MC_TABSIZE;
   3250 	/* Clear out the multicast table. */
   3251 	for (i = 0; i < size; i++)
   3252 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3253 
   3254 	ETHER_FIRST_MULTI(step, ec, enm);
   3255 	while (enm != NULL) {
   3256 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3257 			/*
   3258 			 * We must listen to a range of multicast addresses.
   3259 			 * For now, just accept all multicasts, rather than
   3260 			 * trying to set only those filter bits needed to match
   3261 			 * the range.  (At this time, the only use of address
   3262 			 * ranges is for IP multicast routing, for which the
   3263 			 * range is big enough to require all bits set.)
   3264 			 */
   3265 			goto allmulti;
   3266 		}
   3267 
   3268 		hash = wm_mchash(sc, enm->enm_addrlo);
   3269 
   3270 		reg = (hash >> 5);
   3271 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3272 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3273 		    || (sc->sc_type == WM_T_PCH2)
   3274 		    || (sc->sc_type == WM_T_PCH_LPT)
   3275 		    || (sc->sc_type == WM_T_PCH_SPT))
   3276 			reg &= 0x1f;
   3277 		else
   3278 			reg &= 0x7f;
   3279 		bit = hash & 0x1f;
   3280 
   3281 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3282 		hash |= 1U << bit;
   3283 
   3284 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3285 			/*
   3286 			 * 82544 Errata 9: Certain register cannot be written
   3287 			 * with particular alignments in PCI-X bus operation
   3288 			 * (FCAH, MTA and VFTA).
   3289 			 */
   3290 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3291 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3292 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3293 		} else
   3294 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3295 
   3296 		ETHER_NEXT_MULTI(step, enm);
   3297 	}
   3298 
   3299 	ifp->if_flags &= ~IFF_ALLMULTI;
   3300 	goto setit;
   3301 
   3302  allmulti:
   3303 	ifp->if_flags |= IFF_ALLMULTI;
   3304 	sc->sc_rctl |= RCTL_MPE;
   3305 
   3306  setit:
   3307 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3308 }
   3309 
   3310 /* Reset and init related */
   3311 
   3312 static void
   3313 wm_set_vlan(struct wm_softc *sc)
   3314 {
   3315 
   3316 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3317 		device_xname(sc->sc_dev), __func__));
   3318 
   3319 	/* Deal with VLAN enables. */
   3320 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3321 		sc->sc_ctrl |= CTRL_VME;
   3322 	else
   3323 		sc->sc_ctrl &= ~CTRL_VME;
   3324 
   3325 	/* Write the control registers. */
   3326 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3327 }
   3328 
   3329 static void
   3330 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3331 {
   3332 	uint32_t gcr;
   3333 	pcireg_t ctrl2;
   3334 
   3335 	gcr = CSR_READ(sc, WMREG_GCR);
   3336 
   3337 	/* Only take action if timeout value is defaulted to 0 */
   3338 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3339 		goto out;
   3340 
   3341 	if ((gcr & GCR_CAP_VER2) == 0) {
   3342 		gcr |= GCR_CMPL_TMOUT_10MS;
   3343 		goto out;
   3344 	}
   3345 
   3346 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3347 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3348 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3349 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3350 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3351 
   3352 out:
   3353 	/* Disable completion timeout resend */
   3354 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3355 
   3356 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3357 }
   3358 
   3359 void
   3360 wm_get_auto_rd_done(struct wm_softc *sc)
   3361 {
   3362 	int i;
   3363 
   3364 	/* wait for eeprom to reload */
   3365 	switch (sc->sc_type) {
   3366 	case WM_T_82571:
   3367 	case WM_T_82572:
   3368 	case WM_T_82573:
   3369 	case WM_T_82574:
   3370 	case WM_T_82583:
   3371 	case WM_T_82575:
   3372 	case WM_T_82576:
   3373 	case WM_T_82580:
   3374 	case WM_T_I350:
   3375 	case WM_T_I354:
   3376 	case WM_T_I210:
   3377 	case WM_T_I211:
   3378 	case WM_T_80003:
   3379 	case WM_T_ICH8:
   3380 	case WM_T_ICH9:
   3381 		for (i = 0; i < 10; i++) {
   3382 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3383 				break;
   3384 			delay(1000);
   3385 		}
   3386 		if (i == 10) {
   3387 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3388 			    "complete\n", device_xname(sc->sc_dev));
   3389 		}
   3390 		break;
   3391 	default:
   3392 		break;
   3393 	}
   3394 }
   3395 
   3396 void
   3397 wm_lan_init_done(struct wm_softc *sc)
   3398 {
   3399 	uint32_t reg = 0;
   3400 	int i;
   3401 
   3402 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3403 		device_xname(sc->sc_dev), __func__));
   3404 
   3405 	/* Wait for eeprom to reload */
   3406 	switch (sc->sc_type) {
   3407 	case WM_T_ICH10:
   3408 	case WM_T_PCH:
   3409 	case WM_T_PCH2:
   3410 	case WM_T_PCH_LPT:
   3411 	case WM_T_PCH_SPT:
   3412 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3413 			reg = CSR_READ(sc, WMREG_STATUS);
   3414 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3415 				break;
   3416 			delay(100);
   3417 		}
   3418 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3419 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3420 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3421 		}
   3422 		break;
   3423 	default:
   3424 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3425 		    __func__);
   3426 		break;
   3427 	}
   3428 
   3429 	reg &= ~STATUS_LAN_INIT_DONE;
   3430 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3431 }
   3432 
   3433 void
   3434 wm_get_cfg_done(struct wm_softc *sc)
   3435 {
   3436 	int mask;
   3437 	uint32_t reg;
   3438 	int i;
   3439 
   3440 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3441 		device_xname(sc->sc_dev), __func__));
   3442 
   3443 	/* Wait for eeprom to reload */
   3444 	switch (sc->sc_type) {
   3445 	case WM_T_82542_2_0:
   3446 	case WM_T_82542_2_1:
   3447 		/* null */
   3448 		break;
   3449 	case WM_T_82543:
   3450 	case WM_T_82544:
   3451 	case WM_T_82540:
   3452 	case WM_T_82545:
   3453 	case WM_T_82545_3:
   3454 	case WM_T_82546:
   3455 	case WM_T_82546_3:
   3456 	case WM_T_82541:
   3457 	case WM_T_82541_2:
   3458 	case WM_T_82547:
   3459 	case WM_T_82547_2:
   3460 	case WM_T_82573:
   3461 	case WM_T_82574:
   3462 	case WM_T_82583:
   3463 		/* generic */
   3464 		delay(10*1000);
   3465 		break;
   3466 	case WM_T_80003:
   3467 	case WM_T_82571:
   3468 	case WM_T_82572:
   3469 	case WM_T_82575:
   3470 	case WM_T_82576:
   3471 	case WM_T_82580:
   3472 	case WM_T_I350:
   3473 	case WM_T_I354:
   3474 	case WM_T_I210:
   3475 	case WM_T_I211:
   3476 		if (sc->sc_type == WM_T_82571) {
   3477 			/* Only 82571 shares port 0 */
   3478 			mask = EEMNGCTL_CFGDONE_0;
   3479 		} else
   3480 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3481 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3482 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3483 				break;
   3484 			delay(1000);
   3485 		}
   3486 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3487 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3488 				device_xname(sc->sc_dev), __func__));
   3489 		}
   3490 		break;
   3491 	case WM_T_ICH8:
   3492 	case WM_T_ICH9:
   3493 	case WM_T_ICH10:
   3494 	case WM_T_PCH:
   3495 	case WM_T_PCH2:
   3496 	case WM_T_PCH_LPT:
   3497 	case WM_T_PCH_SPT:
   3498 		delay(10*1000);
   3499 		if (sc->sc_type >= WM_T_ICH10)
   3500 			wm_lan_init_done(sc);
   3501 		else
   3502 			wm_get_auto_rd_done(sc);
   3503 
   3504 		reg = CSR_READ(sc, WMREG_STATUS);
   3505 		if ((reg & STATUS_PHYRA) != 0)
   3506 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3507 		break;
   3508 	default:
   3509 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3510 		    __func__);
   3511 		break;
   3512 	}
   3513 }
   3514 
   3515 /* Init hardware bits */
   3516 void
   3517 wm_initialize_hardware_bits(struct wm_softc *sc)
   3518 {
   3519 	uint32_t tarc0, tarc1, reg;
   3520 
   3521 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3522 		device_xname(sc->sc_dev), __func__));
   3523 
   3524 	/* For 82571 variant, 80003 and ICHs */
   3525 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3526 	    || (sc->sc_type >= WM_T_80003)) {
   3527 
   3528 		/* Transmit Descriptor Control 0 */
   3529 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3530 		reg |= TXDCTL_COUNT_DESC;
   3531 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3532 
   3533 		/* Transmit Descriptor Control 1 */
   3534 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3535 		reg |= TXDCTL_COUNT_DESC;
   3536 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3537 
   3538 		/* TARC0 */
   3539 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3540 		switch (sc->sc_type) {
   3541 		case WM_T_82571:
   3542 		case WM_T_82572:
   3543 		case WM_T_82573:
   3544 		case WM_T_82574:
   3545 		case WM_T_82583:
   3546 		case WM_T_80003:
   3547 			/* Clear bits 30..27 */
   3548 			tarc0 &= ~__BITS(30, 27);
   3549 			break;
   3550 		default:
   3551 			break;
   3552 		}
   3553 
   3554 		switch (sc->sc_type) {
   3555 		case WM_T_82571:
   3556 		case WM_T_82572:
   3557 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3558 
   3559 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3560 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3561 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3562 			/* 8257[12] Errata No.7 */
   3563 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3564 
   3565 			/* TARC1 bit 28 */
   3566 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3567 				tarc1 &= ~__BIT(28);
   3568 			else
   3569 				tarc1 |= __BIT(28);
   3570 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3571 
   3572 			/*
   3573 			 * 8257[12] Errata No.13
   3574 			 * Disable Dyamic Clock Gating.
   3575 			 */
   3576 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3577 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3578 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3579 			break;
   3580 		case WM_T_82573:
   3581 		case WM_T_82574:
   3582 		case WM_T_82583:
   3583 			if ((sc->sc_type == WM_T_82574)
   3584 			    || (sc->sc_type == WM_T_82583))
   3585 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3586 
   3587 			/* Extended Device Control */
   3588 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3589 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3590 			reg |= __BIT(22);	/* Set bit 22 */
   3591 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3592 
   3593 			/* Device Control */
   3594 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3595 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3596 
   3597 			/* PCIe Control Register */
   3598 			/*
   3599 			 * 82573 Errata (unknown).
   3600 			 *
   3601 			 * 82574 Errata 25 and 82583 Errata 12
   3602 			 * "Dropped Rx Packets":
   3603 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3604 			 */
   3605 			reg = CSR_READ(sc, WMREG_GCR);
   3606 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3607 			CSR_WRITE(sc, WMREG_GCR, reg);
   3608 
   3609 			if ((sc->sc_type == WM_T_82574)
   3610 			    || (sc->sc_type == WM_T_82583)) {
   3611 				/*
   3612 				 * Document says this bit must be set for
   3613 				 * proper operation.
   3614 				 */
   3615 				reg = CSR_READ(sc, WMREG_GCR);
   3616 				reg |= __BIT(22);
   3617 				CSR_WRITE(sc, WMREG_GCR, reg);
   3618 
   3619 				/*
   3620 				 * Apply workaround for hardware errata
   3621 				 * documented in errata docs Fixes issue where
   3622 				 * some error prone or unreliable PCIe
   3623 				 * completions are occurring, particularly
   3624 				 * with ASPM enabled. Without fix, issue can
   3625 				 * cause Tx timeouts.
   3626 				 */
   3627 				reg = CSR_READ(sc, WMREG_GCR2);
   3628 				reg |= __BIT(0);
   3629 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3630 			}
   3631 			break;
   3632 		case WM_T_80003:
   3633 			/* TARC0 */
   3634 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3635 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3636 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3637 
   3638 			/* TARC1 bit 28 */
   3639 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3640 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3641 				tarc1 &= ~__BIT(28);
   3642 			else
   3643 				tarc1 |= __BIT(28);
   3644 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3645 			break;
   3646 		case WM_T_ICH8:
   3647 		case WM_T_ICH9:
   3648 		case WM_T_ICH10:
   3649 		case WM_T_PCH:
   3650 		case WM_T_PCH2:
   3651 		case WM_T_PCH_LPT:
   3652 		case WM_T_PCH_SPT:
   3653 			/* TARC0 */
   3654 			if ((sc->sc_type == WM_T_ICH8)
   3655 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3656 				/* Set TARC0 bits 29 and 28 */
   3657 				tarc0 |= __BITS(29, 28);
   3658 			}
   3659 			/* Set TARC0 bits 23,24,26,27 */
   3660 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3661 
   3662 			/* CTRL_EXT */
   3663 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3664 			reg |= __BIT(22);	/* Set bit 22 */
   3665 			/*
   3666 			 * Enable PHY low-power state when MAC is at D3
   3667 			 * w/o WoL
   3668 			 */
   3669 			if (sc->sc_type >= WM_T_PCH)
   3670 				reg |= CTRL_EXT_PHYPDEN;
   3671 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3672 
   3673 			/* TARC1 */
   3674 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3675 			/* bit 28 */
   3676 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3677 				tarc1 &= ~__BIT(28);
   3678 			else
   3679 				tarc1 |= __BIT(28);
   3680 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3681 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3682 
   3683 			/* Device Status */
   3684 			if (sc->sc_type == WM_T_ICH8) {
   3685 				reg = CSR_READ(sc, WMREG_STATUS);
   3686 				reg &= ~__BIT(31);
   3687 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3688 
   3689 			}
   3690 
   3691 			/* IOSFPC */
   3692 			if (sc->sc_type == WM_T_PCH_SPT) {
   3693 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3694 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3695 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3696 			}
   3697 			/*
   3698 			 * Work-around descriptor data corruption issue during
   3699 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3700 			 * capability.
   3701 			 */
   3702 			reg = CSR_READ(sc, WMREG_RFCTL);
   3703 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3704 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3705 			break;
   3706 		default:
   3707 			break;
   3708 		}
   3709 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3710 
   3711 		/*
   3712 		 * 8257[12] Errata No.52 and some others.
   3713 		 * Avoid RSS Hash Value bug.
   3714 		 */
   3715 		switch (sc->sc_type) {
   3716 		case WM_T_82571:
   3717 		case WM_T_82572:
   3718 		case WM_T_82573:
   3719 		case WM_T_80003:
   3720 		case WM_T_ICH8:
   3721 			reg = CSR_READ(sc, WMREG_RFCTL);
   3722 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3723 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3724 			break;
   3725 		default:
   3726 			break;
   3727 		}
   3728 	}
   3729 }
   3730 
   3731 static uint32_t
   3732 wm_rxpbs_adjust_82580(uint32_t val)
   3733 {
   3734 	uint32_t rv = 0;
   3735 
   3736 	if (val < __arraycount(wm_82580_rxpbs_table))
   3737 		rv = wm_82580_rxpbs_table[val];
   3738 
   3739 	return rv;
   3740 }
   3741 
   3742 /*
   3743  * wm_reset_phy:
   3744  *
   3745  *	generic PHY reset function.
   3746  *	Same as e1000_phy_hw_reset_generic()
   3747  */
   3748 static void
   3749 wm_reset_phy(struct wm_softc *sc)
   3750 {
   3751 	uint32_t reg;
   3752 
   3753 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3754 		device_xname(sc->sc_dev), __func__));
   3755 	if (wm_phy_resetisblocked(sc))
   3756 		return;
   3757 
   3758 	sc->phy.acquire(sc);
   3759 
   3760 	reg = CSR_READ(sc, WMREG_CTRL);
   3761 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3762 	CSR_WRITE_FLUSH(sc);
   3763 
   3764 	delay(sc->phy.reset_delay_us);
   3765 
   3766 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3767 	CSR_WRITE_FLUSH(sc);
   3768 
   3769 	delay(150);
   3770 
   3771 	sc->phy.release(sc);
   3772 
   3773 	wm_get_cfg_done(sc);
   3774 }
   3775 
   3776 static void
   3777 wm_flush_desc_rings(struct wm_softc *sc)
   3778 {
   3779 	pcireg_t preg;
   3780 	uint32_t reg;
   3781 	int nexttx;
   3782 
   3783 	/* First, disable MULR fix in FEXTNVM11 */
   3784 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3785 	reg |= FEXTNVM11_DIS_MULRFIX;
   3786 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3787 
   3788 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3789 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3790 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3791 		struct wm_txqueue *txq;
   3792 		wiseman_txdesc_t *txd;
   3793 
   3794 		/* TX */
   3795 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3796 		    device_xname(sc->sc_dev), preg, reg);
   3797 		reg = CSR_READ(sc, WMREG_TCTL);
   3798 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3799 
   3800 		txq = &sc->sc_queue[0].wmq_txq;
   3801 		nexttx = txq->txq_next;
   3802 		txd = &txq->txq_descs[nexttx];
   3803 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3804 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3805 		txd->wtx_fields.wtxu_status = 0;
   3806 		txd->wtx_fields.wtxu_options = 0;
   3807 		txd->wtx_fields.wtxu_vlan = 0;
   3808 
   3809 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3810 			BUS_SPACE_BARRIER_WRITE);
   3811 
   3812 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3813 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3814 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3815 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3816 		delay(250);
   3817 	}
   3818 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3819 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3820 		uint32_t rctl;
   3821 
   3822 		/* RX */
   3823 		printf("%s: Need RX flush (reg = %08x)\n",
   3824 		    device_xname(sc->sc_dev), preg);
   3825 		rctl = CSR_READ(sc, WMREG_RCTL);
   3826 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3827 		CSR_WRITE_FLUSH(sc);
   3828 		delay(150);
   3829 
   3830 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3831 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3832 		reg &= 0xffffc000;
   3833 		/*
   3834 		 * update thresholds: prefetch threshold to 31, host threshold
   3835 		 * to 1 and make sure the granularity is "descriptors" and not
   3836 		 * "cache lines"
   3837 		 */
   3838 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3839 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3840 
   3841 		/*
   3842 		 * momentarily enable the RX ring for the changes to take
   3843 		 * effect
   3844 		 */
   3845 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3846 		CSR_WRITE_FLUSH(sc);
   3847 		delay(150);
   3848 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3849 	}
   3850 }
   3851 
   3852 /*
   3853  * wm_reset:
   3854  *
   3855  *	Reset the i82542 chip.
   3856  */
   3857 static void
   3858 wm_reset(struct wm_softc *sc)
   3859 {
   3860 	int phy_reset = 0;
   3861 	int i, error = 0;
   3862 	uint32_t reg;
   3863 
   3864 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3865 		device_xname(sc->sc_dev), __func__));
   3866 	KASSERT(sc->sc_type != 0);
   3867 
   3868 	/*
   3869 	 * Allocate on-chip memory according to the MTU size.
   3870 	 * The Packet Buffer Allocation register must be written
   3871 	 * before the chip is reset.
   3872 	 */
   3873 	switch (sc->sc_type) {
   3874 	case WM_T_82547:
   3875 	case WM_T_82547_2:
   3876 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3877 		    PBA_22K : PBA_30K;
   3878 		for (i = 0; i < sc->sc_nqueues; i++) {
   3879 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3880 			txq->txq_fifo_head = 0;
   3881 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3882 			txq->txq_fifo_size =
   3883 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3884 			txq->txq_fifo_stall = 0;
   3885 		}
   3886 		break;
   3887 	case WM_T_82571:
   3888 	case WM_T_82572:
   3889 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3890 	case WM_T_80003:
   3891 		sc->sc_pba = PBA_32K;
   3892 		break;
   3893 	case WM_T_82573:
   3894 		sc->sc_pba = PBA_12K;
   3895 		break;
   3896 	case WM_T_82574:
   3897 	case WM_T_82583:
   3898 		sc->sc_pba = PBA_20K;
   3899 		break;
   3900 	case WM_T_82576:
   3901 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3902 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3903 		break;
   3904 	case WM_T_82580:
   3905 	case WM_T_I350:
   3906 	case WM_T_I354:
   3907 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3908 		break;
   3909 	case WM_T_I210:
   3910 	case WM_T_I211:
   3911 		sc->sc_pba = PBA_34K;
   3912 		break;
   3913 	case WM_T_ICH8:
   3914 		/* Workaround for a bit corruption issue in FIFO memory */
   3915 		sc->sc_pba = PBA_8K;
   3916 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3917 		break;
   3918 	case WM_T_ICH9:
   3919 	case WM_T_ICH10:
   3920 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3921 		    PBA_14K : PBA_10K;
   3922 		break;
   3923 	case WM_T_PCH:
   3924 	case WM_T_PCH2:
   3925 	case WM_T_PCH_LPT:
   3926 	case WM_T_PCH_SPT:
   3927 		sc->sc_pba = PBA_26K;
   3928 		break;
   3929 	default:
   3930 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3931 		    PBA_40K : PBA_48K;
   3932 		break;
   3933 	}
   3934 	/*
   3935 	 * Only old or non-multiqueue devices have the PBA register
   3936 	 * XXX Need special handling for 82575.
   3937 	 */
   3938 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3939 	    || (sc->sc_type == WM_T_82575))
   3940 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3941 
   3942 	/* Prevent the PCI-E bus from sticking */
   3943 	if (sc->sc_flags & WM_F_PCIE) {
   3944 		int timeout = 800;
   3945 
   3946 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3947 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3948 
   3949 		while (timeout--) {
   3950 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3951 			    == 0)
   3952 				break;
   3953 			delay(100);
   3954 		}
   3955 	}
   3956 
   3957 	/* Set the completion timeout for interface */
   3958 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3959 	    || (sc->sc_type == WM_T_82580)
   3960 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3961 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3962 		wm_set_pcie_completion_timeout(sc);
   3963 
   3964 	/* Clear interrupt */
   3965 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3966 	if (sc->sc_nintrs > 1) {
   3967 		if (sc->sc_type != WM_T_82574) {
   3968 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3969 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3970 		} else {
   3971 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3972 		}
   3973 	}
   3974 
   3975 	/* Stop the transmit and receive processes. */
   3976 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3977 	sc->sc_rctl &= ~RCTL_EN;
   3978 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3979 	CSR_WRITE_FLUSH(sc);
   3980 
   3981 	/* XXX set_tbi_sbp_82543() */
   3982 
   3983 	delay(10*1000);
   3984 
   3985 	/* Must acquire the MDIO ownership before MAC reset */
   3986 	switch (sc->sc_type) {
   3987 	case WM_T_82573:
   3988 	case WM_T_82574:
   3989 	case WM_T_82583:
   3990 		error = wm_get_hw_semaphore_82573(sc);
   3991 		break;
   3992 	default:
   3993 		break;
   3994 	}
   3995 
   3996 	/*
   3997 	 * 82541 Errata 29? & 82547 Errata 28?
   3998 	 * See also the description about PHY_RST bit in CTRL register
   3999 	 * in 8254x_GBe_SDM.pdf.
   4000 	 */
   4001 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4002 		CSR_WRITE(sc, WMREG_CTRL,
   4003 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4004 		CSR_WRITE_FLUSH(sc);
   4005 		delay(5000);
   4006 	}
   4007 
   4008 	switch (sc->sc_type) {
   4009 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4010 	case WM_T_82541:
   4011 	case WM_T_82541_2:
   4012 	case WM_T_82547:
   4013 	case WM_T_82547_2:
   4014 		/*
   4015 		 * On some chipsets, a reset through a memory-mapped write
   4016 		 * cycle can cause the chip to reset before completing the
   4017 		 * write cycle.  This causes major headache that can be
   4018 		 * avoided by issuing the reset via indirect register writes
   4019 		 * through I/O space.
   4020 		 *
   4021 		 * So, if we successfully mapped the I/O BAR at attach time,
   4022 		 * use that.  Otherwise, try our luck with a memory-mapped
   4023 		 * reset.
   4024 		 */
   4025 		if (sc->sc_flags & WM_F_IOH_VALID)
   4026 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4027 		else
   4028 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4029 		break;
   4030 	case WM_T_82545_3:
   4031 	case WM_T_82546_3:
   4032 		/* Use the shadow control register on these chips. */
   4033 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4034 		break;
   4035 	case WM_T_80003:
   4036 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4037 		sc->phy.acquire(sc);
   4038 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4039 		sc->phy.release(sc);
   4040 		break;
   4041 	case WM_T_ICH8:
   4042 	case WM_T_ICH9:
   4043 	case WM_T_ICH10:
   4044 	case WM_T_PCH:
   4045 	case WM_T_PCH2:
   4046 	case WM_T_PCH_LPT:
   4047 	case WM_T_PCH_SPT:
   4048 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4049 		if (wm_phy_resetisblocked(sc) == false) {
   4050 			/*
   4051 			 * Gate automatic PHY configuration by hardware on
   4052 			 * non-managed 82579
   4053 			 */
   4054 			if ((sc->sc_type == WM_T_PCH2)
   4055 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4056 				== 0))
   4057 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4058 
   4059 			reg |= CTRL_PHY_RESET;
   4060 			phy_reset = 1;
   4061 		} else
   4062 			printf("XXX reset is blocked!!!\n");
   4063 		sc->phy.acquire(sc);
   4064 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4065 		/* Don't insert a completion barrier when reset */
   4066 		delay(20*1000);
   4067 		mutex_exit(sc->sc_ich_phymtx);
   4068 		break;
   4069 	case WM_T_82580:
   4070 	case WM_T_I350:
   4071 	case WM_T_I354:
   4072 	case WM_T_I210:
   4073 	case WM_T_I211:
   4074 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4075 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4076 			CSR_WRITE_FLUSH(sc);
   4077 		delay(5000);
   4078 		break;
   4079 	case WM_T_82542_2_0:
   4080 	case WM_T_82542_2_1:
   4081 	case WM_T_82543:
   4082 	case WM_T_82540:
   4083 	case WM_T_82545:
   4084 	case WM_T_82546:
   4085 	case WM_T_82571:
   4086 	case WM_T_82572:
   4087 	case WM_T_82573:
   4088 	case WM_T_82574:
   4089 	case WM_T_82575:
   4090 	case WM_T_82576:
   4091 	case WM_T_82583:
   4092 	default:
   4093 		/* Everything else can safely use the documented method. */
   4094 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4095 		break;
   4096 	}
   4097 
   4098 	/* Must release the MDIO ownership after MAC reset */
   4099 	switch (sc->sc_type) {
   4100 	case WM_T_82573:
   4101 	case WM_T_82574:
   4102 	case WM_T_82583:
   4103 		if (error == 0)
   4104 			wm_put_hw_semaphore_82573(sc);
   4105 		break;
   4106 	default:
   4107 		break;
   4108 	}
   4109 
   4110 	if (phy_reset != 0)
   4111 		wm_get_cfg_done(sc);
   4112 
   4113 	/* reload EEPROM */
   4114 	switch (sc->sc_type) {
   4115 	case WM_T_82542_2_0:
   4116 	case WM_T_82542_2_1:
   4117 	case WM_T_82543:
   4118 	case WM_T_82544:
   4119 		delay(10);
   4120 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4121 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4122 		CSR_WRITE_FLUSH(sc);
   4123 		delay(2000);
   4124 		break;
   4125 	case WM_T_82540:
   4126 	case WM_T_82545:
   4127 	case WM_T_82545_3:
   4128 	case WM_T_82546:
   4129 	case WM_T_82546_3:
   4130 		delay(5*1000);
   4131 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4132 		break;
   4133 	case WM_T_82541:
   4134 	case WM_T_82541_2:
   4135 	case WM_T_82547:
   4136 	case WM_T_82547_2:
   4137 		delay(20000);
   4138 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4139 		break;
   4140 	case WM_T_82571:
   4141 	case WM_T_82572:
   4142 	case WM_T_82573:
   4143 	case WM_T_82574:
   4144 	case WM_T_82583:
   4145 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4146 			delay(10);
   4147 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4148 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4149 			CSR_WRITE_FLUSH(sc);
   4150 		}
   4151 		/* check EECD_EE_AUTORD */
   4152 		wm_get_auto_rd_done(sc);
   4153 		/*
   4154 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4155 		 * is set.
   4156 		 */
   4157 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4158 		    || (sc->sc_type == WM_T_82583))
   4159 			delay(25*1000);
   4160 		break;
   4161 	case WM_T_82575:
   4162 	case WM_T_82576:
   4163 	case WM_T_82580:
   4164 	case WM_T_I350:
   4165 	case WM_T_I354:
   4166 	case WM_T_I210:
   4167 	case WM_T_I211:
   4168 	case WM_T_80003:
   4169 		/* check EECD_EE_AUTORD */
   4170 		wm_get_auto_rd_done(sc);
   4171 		break;
   4172 	case WM_T_ICH8:
   4173 	case WM_T_ICH9:
   4174 	case WM_T_ICH10:
   4175 	case WM_T_PCH:
   4176 	case WM_T_PCH2:
   4177 	case WM_T_PCH_LPT:
   4178 	case WM_T_PCH_SPT:
   4179 		break;
   4180 	default:
   4181 		panic("%s: unknown type\n", __func__);
   4182 	}
   4183 
   4184 	/* Check whether EEPROM is present or not */
   4185 	switch (sc->sc_type) {
   4186 	case WM_T_82575:
   4187 	case WM_T_82576:
   4188 	case WM_T_82580:
   4189 	case WM_T_I350:
   4190 	case WM_T_I354:
   4191 	case WM_T_ICH8:
   4192 	case WM_T_ICH9:
   4193 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4194 			/* Not found */
   4195 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4196 			if (sc->sc_type == WM_T_82575)
   4197 				wm_reset_init_script_82575(sc);
   4198 		}
   4199 		break;
   4200 	default:
   4201 		break;
   4202 	}
   4203 
   4204 	if ((sc->sc_type == WM_T_82580)
   4205 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4206 		/* clear global device reset status bit */
   4207 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4208 	}
   4209 
   4210 	/* Clear any pending interrupt events. */
   4211 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4212 	reg = CSR_READ(sc, WMREG_ICR);
   4213 	if (sc->sc_nintrs > 1) {
   4214 		if (sc->sc_type != WM_T_82574) {
   4215 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4216 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4217 		} else
   4218 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4219 	}
   4220 
   4221 	/* reload sc_ctrl */
   4222 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4223 
   4224 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4225 		wm_set_eee_i350(sc);
   4226 
   4227 	/* Clear the host wakeup bit after lcd reset */
   4228 	if (sc->sc_type >= WM_T_PCH) {
   4229 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4230 		    BM_PORT_GEN_CFG);
   4231 		reg &= ~BM_WUC_HOST_WU_BIT;
   4232 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4233 		    BM_PORT_GEN_CFG, reg);
   4234 	}
   4235 
   4236 	/*
   4237 	 * For PCH, this write will make sure that any noise will be detected
   4238 	 * as a CRC error and be dropped rather than show up as a bad packet
   4239 	 * to the DMA engine
   4240 	 */
   4241 	if (sc->sc_type == WM_T_PCH)
   4242 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4243 
   4244 	if (sc->sc_type >= WM_T_82544)
   4245 		CSR_WRITE(sc, WMREG_WUC, 0);
   4246 
   4247 	wm_reset_mdicnfg_82580(sc);
   4248 
   4249 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4250 		wm_pll_workaround_i210(sc);
   4251 }
   4252 
   4253 /*
   4254  * wm_add_rxbuf:
   4255  *
   4256  *	Add a receive buffer to the indiciated descriptor.
   4257  */
   4258 static int
   4259 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4260 {
   4261 	struct wm_softc *sc = rxq->rxq_sc;
   4262 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4263 	struct mbuf *m;
   4264 	int error;
   4265 
   4266 	KASSERT(mutex_owned(rxq->rxq_lock));
   4267 
   4268 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4269 	if (m == NULL)
   4270 		return ENOBUFS;
   4271 
   4272 	MCLGET(m, M_DONTWAIT);
   4273 	if ((m->m_flags & M_EXT) == 0) {
   4274 		m_freem(m);
   4275 		return ENOBUFS;
   4276 	}
   4277 
   4278 	if (rxs->rxs_mbuf != NULL)
   4279 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4280 
   4281 	rxs->rxs_mbuf = m;
   4282 
   4283 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4284 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4285 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4286 	if (error) {
   4287 		/* XXX XXX XXX */
   4288 		aprint_error_dev(sc->sc_dev,
   4289 		    "unable to load rx DMA map %d, error = %d\n",
   4290 		    idx, error);
   4291 		panic("wm_add_rxbuf");
   4292 	}
   4293 
   4294 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4295 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4296 
   4297 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4298 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4299 			wm_init_rxdesc(rxq, idx);
   4300 	} else
   4301 		wm_init_rxdesc(rxq, idx);
   4302 
   4303 	return 0;
   4304 }
   4305 
   4306 /*
   4307  * wm_rxdrain:
   4308  *
   4309  *	Drain the receive queue.
   4310  */
   4311 static void
   4312 wm_rxdrain(struct wm_rxqueue *rxq)
   4313 {
   4314 	struct wm_softc *sc = rxq->rxq_sc;
   4315 	struct wm_rxsoft *rxs;
   4316 	int i;
   4317 
   4318 	KASSERT(mutex_owned(rxq->rxq_lock));
   4319 
   4320 	for (i = 0; i < WM_NRXDESC; i++) {
   4321 		rxs = &rxq->rxq_soft[i];
   4322 		if (rxs->rxs_mbuf != NULL) {
   4323 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4324 			m_freem(rxs->rxs_mbuf);
   4325 			rxs->rxs_mbuf = NULL;
   4326 		}
   4327 	}
   4328 }
   4329 
   4330 
   4331 /*
   4332  * XXX copy from FreeBSD's sys/net/rss_config.c
   4333  */
   4334 /*
   4335  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4336  * effectiveness may be limited by algorithm choice and available entropy
   4337  * during the boot.
   4338  *
   4339  * XXXRW: And that we don't randomize it yet!
   4340  *
   4341  * This is the default Microsoft RSS specification key which is also
   4342  * the Chelsio T5 firmware default key.
   4343  */
   4344 #define RSS_KEYSIZE 40
   4345 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4346 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4347 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4348 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4349 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4350 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4351 };
   4352 
   4353 /*
   4354  * Caller must pass an array of size sizeof(rss_key).
   4355  *
   4356  * XXX
   4357  * As if_ixgbe may use this function, this function should not be
   4358  * if_wm specific function.
   4359  */
   4360 static void
   4361 wm_rss_getkey(uint8_t *key)
   4362 {
   4363 
   4364 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4365 }
   4366 
   4367 /*
   4368  * Setup registers for RSS.
   4369  *
   4370  * XXX not yet VMDq support
   4371  */
   4372 static void
   4373 wm_init_rss(struct wm_softc *sc)
   4374 {
   4375 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4376 	int i;
   4377 
   4378 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4379 
   4380 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4381 		int qid, reta_ent;
   4382 
   4383 		qid  = i % sc->sc_nqueues;
   4384 		switch(sc->sc_type) {
   4385 		case WM_T_82574:
   4386 			reta_ent = __SHIFTIN(qid,
   4387 			    RETA_ENT_QINDEX_MASK_82574);
   4388 			break;
   4389 		case WM_T_82575:
   4390 			reta_ent = __SHIFTIN(qid,
   4391 			    RETA_ENT_QINDEX1_MASK_82575);
   4392 			break;
   4393 		default:
   4394 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4395 			break;
   4396 		}
   4397 
   4398 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4399 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4400 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4401 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4402 	}
   4403 
   4404 	wm_rss_getkey((uint8_t *)rss_key);
   4405 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4406 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4407 
   4408 	if (sc->sc_type == WM_T_82574)
   4409 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4410 	else
   4411 		mrqc = MRQC_ENABLE_RSS_MQ;
   4412 
   4413 	/* XXXX
   4414 	 * The same as FreeBSD igb.
   4415 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4416 	 */
   4417 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4418 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4419 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4420 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4421 
   4422 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4423 }
   4424 
   4425 /*
   4426  * Adjust TX and RX queue numbers which the system actulally uses.
   4427  *
   4428  * The numbers are affected by below parameters.
   4429  *     - The nubmer of hardware queues
   4430  *     - The number of MSI-X vectors (= "nvectors" argument)
   4431  *     - ncpu
   4432  */
   4433 static void
   4434 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4435 {
   4436 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4437 
   4438 	if (nvectors < 2) {
   4439 		sc->sc_nqueues = 1;
   4440 		return;
   4441 	}
   4442 
   4443 	switch(sc->sc_type) {
   4444 	case WM_T_82572:
   4445 		hw_ntxqueues = 2;
   4446 		hw_nrxqueues = 2;
   4447 		break;
   4448 	case WM_T_82574:
   4449 		hw_ntxqueues = 2;
   4450 		hw_nrxqueues = 2;
   4451 		break;
   4452 	case WM_T_82575:
   4453 		hw_ntxqueues = 4;
   4454 		hw_nrxqueues = 4;
   4455 		break;
   4456 	case WM_T_82576:
   4457 		hw_ntxqueues = 16;
   4458 		hw_nrxqueues = 16;
   4459 		break;
   4460 	case WM_T_82580:
   4461 	case WM_T_I350:
   4462 	case WM_T_I354:
   4463 		hw_ntxqueues = 8;
   4464 		hw_nrxqueues = 8;
   4465 		break;
   4466 	case WM_T_I210:
   4467 		hw_ntxqueues = 4;
   4468 		hw_nrxqueues = 4;
   4469 		break;
   4470 	case WM_T_I211:
   4471 		hw_ntxqueues = 2;
   4472 		hw_nrxqueues = 2;
   4473 		break;
   4474 		/*
   4475 		 * As below ethernet controllers does not support MSI-X,
   4476 		 * this driver let them not use multiqueue.
   4477 		 *     - WM_T_80003
   4478 		 *     - WM_T_ICH8
   4479 		 *     - WM_T_ICH9
   4480 		 *     - WM_T_ICH10
   4481 		 *     - WM_T_PCH
   4482 		 *     - WM_T_PCH2
   4483 		 *     - WM_T_PCH_LPT
   4484 		 */
   4485 	default:
   4486 		hw_ntxqueues = 1;
   4487 		hw_nrxqueues = 1;
   4488 		break;
   4489 	}
   4490 
   4491 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4492 
   4493 	/*
   4494 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4495 	 * the number of queues used actually.
   4496 	 */
   4497 	if (nvectors < hw_nqueues + 1) {
   4498 		sc->sc_nqueues = nvectors - 1;
   4499 	} else {
   4500 		sc->sc_nqueues = hw_nqueues;
   4501 	}
   4502 
   4503 	/*
   4504 	 * As queues more then cpus cannot improve scaling, we limit
   4505 	 * the number of queues used actually.
   4506 	 */
   4507 	if (ncpu < sc->sc_nqueues)
   4508 		sc->sc_nqueues = ncpu;
   4509 }
   4510 
   4511 /*
   4512  * Both single interrupt MSI and INTx can use this function.
   4513  */
   4514 static int
   4515 wm_setup_legacy(struct wm_softc *sc)
   4516 {
   4517 	pci_chipset_tag_t pc = sc->sc_pc;
   4518 	const char *intrstr = NULL;
   4519 	char intrbuf[PCI_INTRSTR_LEN];
   4520 	int error;
   4521 
   4522 	error = wm_alloc_txrx_queues(sc);
   4523 	if (error) {
   4524 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4525 		    error);
   4526 		return ENOMEM;
   4527 	}
   4528 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4529 	    sizeof(intrbuf));
   4530 #ifdef WM_MPSAFE
   4531 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4532 #endif
   4533 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4534 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4535 	if (sc->sc_ihs[0] == NULL) {
   4536 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4537 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4538 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4539 		return ENOMEM;
   4540 	}
   4541 
   4542 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4543 	sc->sc_nintrs = 1;
   4544 	return 0;
   4545 }
   4546 
   4547 static int
   4548 wm_setup_msix(struct wm_softc *sc)
   4549 {
   4550 	void *vih;
   4551 	kcpuset_t *affinity;
   4552 	int qidx, error, intr_idx, txrx_established;
   4553 	pci_chipset_tag_t pc = sc->sc_pc;
   4554 	const char *intrstr = NULL;
   4555 	char intrbuf[PCI_INTRSTR_LEN];
   4556 	char intr_xname[INTRDEVNAMEBUF];
   4557 
   4558 	if (sc->sc_nqueues < ncpu) {
   4559 		/*
   4560 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4561 		 * interrupts start from CPU#1.
   4562 		 */
   4563 		sc->sc_affinity_offset = 1;
   4564 	} else {
   4565 		/*
   4566 		 * In this case, this device use all CPUs. So, we unify
   4567 		 * affinitied cpu_index to msix vector number for readability.
   4568 		 */
   4569 		sc->sc_affinity_offset = 0;
   4570 	}
   4571 
   4572 	error = wm_alloc_txrx_queues(sc);
   4573 	if (error) {
   4574 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4575 		    error);
   4576 		return ENOMEM;
   4577 	}
   4578 
   4579 	kcpuset_create(&affinity, false);
   4580 	intr_idx = 0;
   4581 
   4582 	/*
   4583 	 * TX and RX
   4584 	 */
   4585 	txrx_established = 0;
   4586 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4587 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4588 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4589 
   4590 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4591 		    sizeof(intrbuf));
   4592 #ifdef WM_MPSAFE
   4593 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4594 		    PCI_INTR_MPSAFE, true);
   4595 #endif
   4596 		memset(intr_xname, 0, sizeof(intr_xname));
   4597 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4598 		    device_xname(sc->sc_dev), qidx);
   4599 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4600 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4601 		if (vih == NULL) {
   4602 			aprint_error_dev(sc->sc_dev,
   4603 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4604 			    intrstr ? " at " : "",
   4605 			    intrstr ? intrstr : "");
   4606 
   4607 			goto fail;
   4608 		}
   4609 		kcpuset_zero(affinity);
   4610 		/* Round-robin affinity */
   4611 		kcpuset_set(affinity, affinity_to);
   4612 		error = interrupt_distribute(vih, affinity, NULL);
   4613 		if (error == 0) {
   4614 			aprint_normal_dev(sc->sc_dev,
   4615 			    "for TX and RX interrupting at %s affinity to %u\n",
   4616 			    intrstr, affinity_to);
   4617 		} else {
   4618 			aprint_normal_dev(sc->sc_dev,
   4619 			    "for TX and RX interrupting at %s\n", intrstr);
   4620 		}
   4621 		sc->sc_ihs[intr_idx] = vih;
   4622 		wmq->wmq_id= qidx;
   4623 		wmq->wmq_intr_idx = intr_idx;
   4624 
   4625 		txrx_established++;
   4626 		intr_idx++;
   4627 	}
   4628 
   4629 	/*
   4630 	 * LINK
   4631 	 */
   4632 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4633 	    sizeof(intrbuf));
   4634 #ifdef WM_MPSAFE
   4635 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4636 #endif
   4637 	memset(intr_xname, 0, sizeof(intr_xname));
   4638 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4639 	    device_xname(sc->sc_dev));
   4640 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4641 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4642 	if (vih == NULL) {
   4643 		aprint_error_dev(sc->sc_dev,
   4644 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4645 		    intrstr ? " at " : "",
   4646 		    intrstr ? intrstr : "");
   4647 
   4648 		goto fail;
   4649 	}
   4650 	/* keep default affinity to LINK interrupt */
   4651 	aprint_normal_dev(sc->sc_dev,
   4652 	    "for LINK interrupting at %s\n", intrstr);
   4653 	sc->sc_ihs[intr_idx] = vih;
   4654 	sc->sc_link_intr_idx = intr_idx;
   4655 
   4656 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4657 	kcpuset_destroy(affinity);
   4658 	return 0;
   4659 
   4660  fail:
   4661 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4662 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4663 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4664 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4665 	}
   4666 
   4667 	kcpuset_destroy(affinity);
   4668 	return ENOMEM;
   4669 }
   4670 
   4671 static void
   4672 wm_turnon(struct wm_softc *sc)
   4673 {
   4674 	int i;
   4675 
   4676 	KASSERT(WM_CORE_LOCKED(sc));
   4677 
   4678 	for(i = 0; i < sc->sc_nqueues; i++) {
   4679 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4680 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4681 
   4682 		mutex_enter(txq->txq_lock);
   4683 		txq->txq_stopping = false;
   4684 		mutex_exit(txq->txq_lock);
   4685 
   4686 		mutex_enter(rxq->rxq_lock);
   4687 		rxq->rxq_stopping = false;
   4688 		mutex_exit(rxq->rxq_lock);
   4689 	}
   4690 
   4691 	sc->sc_core_stopping = false;
   4692 }
   4693 
   4694 static void
   4695 wm_turnoff(struct wm_softc *sc)
   4696 {
   4697 	int i;
   4698 
   4699 	KASSERT(WM_CORE_LOCKED(sc));
   4700 
   4701 	sc->sc_core_stopping = true;
   4702 
   4703 	for(i = 0; i < sc->sc_nqueues; i++) {
   4704 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4705 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4706 
   4707 		mutex_enter(rxq->rxq_lock);
   4708 		rxq->rxq_stopping = true;
   4709 		mutex_exit(rxq->rxq_lock);
   4710 
   4711 		mutex_enter(txq->txq_lock);
   4712 		txq->txq_stopping = true;
   4713 		mutex_exit(txq->txq_lock);
   4714 	}
   4715 }
   4716 
   4717 /*
   4718  * wm_init:		[ifnet interface function]
   4719  *
   4720  *	Initialize the interface.
   4721  */
   4722 static int
   4723 wm_init(struct ifnet *ifp)
   4724 {
   4725 	struct wm_softc *sc = ifp->if_softc;
   4726 	int ret;
   4727 
   4728 	WM_CORE_LOCK(sc);
   4729 	ret = wm_init_locked(ifp);
   4730 	WM_CORE_UNLOCK(sc);
   4731 
   4732 	return ret;
   4733 }
   4734 
   4735 static int
   4736 wm_init_locked(struct ifnet *ifp)
   4737 {
   4738 	struct wm_softc *sc = ifp->if_softc;
   4739 	int i, j, trynum, error = 0;
   4740 	uint32_t reg;
   4741 
   4742 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4743 		device_xname(sc->sc_dev), __func__));
   4744 	KASSERT(WM_CORE_LOCKED(sc));
   4745 
   4746 	/*
   4747 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4748 	 * There is a small but measurable benefit to avoiding the adjusment
   4749 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4750 	 * on such platforms.  One possibility is that the DMA itself is
   4751 	 * slightly more efficient if the front of the entire packet (instead
   4752 	 * of the front of the headers) is aligned.
   4753 	 *
   4754 	 * Note we must always set align_tweak to 0 if we are using
   4755 	 * jumbo frames.
   4756 	 */
   4757 #ifdef __NO_STRICT_ALIGNMENT
   4758 	sc->sc_align_tweak = 0;
   4759 #else
   4760 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4761 		sc->sc_align_tweak = 0;
   4762 	else
   4763 		sc->sc_align_tweak = 2;
   4764 #endif /* __NO_STRICT_ALIGNMENT */
   4765 
   4766 	/* Cancel any pending I/O. */
   4767 	wm_stop_locked(ifp, 0);
   4768 
   4769 	/* update statistics before reset */
   4770 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4771 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4772 
   4773 	/* PCH_SPT hardware workaround */
   4774 	if (sc->sc_type == WM_T_PCH_SPT)
   4775 		wm_flush_desc_rings(sc);
   4776 
   4777 	/* Reset the chip to a known state. */
   4778 	wm_reset(sc);
   4779 
   4780 	/* AMT based hardware can now take control from firmware */
   4781 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4782 		wm_get_hw_control(sc);
   4783 
   4784 	/* Init hardware bits */
   4785 	wm_initialize_hardware_bits(sc);
   4786 
   4787 	/* Reset the PHY. */
   4788 	if (sc->sc_flags & WM_F_HAS_MII)
   4789 		wm_gmii_reset(sc);
   4790 
   4791 	/* Calculate (E)ITR value */
   4792 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4793 		sc->sc_itr = 450;	/* For EITR */
   4794 	} else if (sc->sc_type >= WM_T_82543) {
   4795 		/*
   4796 		 * Set up the interrupt throttling register (units of 256ns)
   4797 		 * Note that a footnote in Intel's documentation says this
   4798 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4799 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4800 		 * that that is also true for the 1024ns units of the other
   4801 		 * interrupt-related timer registers -- so, really, we ought
   4802 		 * to divide this value by 4 when the link speed is low.
   4803 		 *
   4804 		 * XXX implement this division at link speed change!
   4805 		 */
   4806 
   4807 		/*
   4808 		 * For N interrupts/sec, set this value to:
   4809 		 * 1000000000 / (N * 256).  Note that we set the
   4810 		 * absolute and packet timer values to this value
   4811 		 * divided by 4 to get "simple timer" behavior.
   4812 		 */
   4813 
   4814 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4815 	}
   4816 
   4817 	error = wm_init_txrx_queues(sc);
   4818 	if (error)
   4819 		goto out;
   4820 
   4821 	/*
   4822 	 * Clear out the VLAN table -- we don't use it (yet).
   4823 	 */
   4824 	CSR_WRITE(sc, WMREG_VET, 0);
   4825 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4826 		trynum = 10; /* Due to hw errata */
   4827 	else
   4828 		trynum = 1;
   4829 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4830 		for (j = 0; j < trynum; j++)
   4831 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4832 
   4833 	/*
   4834 	 * Set up flow-control parameters.
   4835 	 *
   4836 	 * XXX Values could probably stand some tuning.
   4837 	 */
   4838 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4839 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4840 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4841 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4842 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4843 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4844 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4845 	}
   4846 
   4847 	sc->sc_fcrtl = FCRTL_DFLT;
   4848 	if (sc->sc_type < WM_T_82543) {
   4849 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4850 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4851 	} else {
   4852 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4853 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4854 	}
   4855 
   4856 	if (sc->sc_type == WM_T_80003)
   4857 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4858 	else
   4859 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4860 
   4861 	/* Writes the control register. */
   4862 	wm_set_vlan(sc);
   4863 
   4864 	if (sc->sc_flags & WM_F_HAS_MII) {
   4865 		int val;
   4866 
   4867 		switch (sc->sc_type) {
   4868 		case WM_T_80003:
   4869 		case WM_T_ICH8:
   4870 		case WM_T_ICH9:
   4871 		case WM_T_ICH10:
   4872 		case WM_T_PCH:
   4873 		case WM_T_PCH2:
   4874 		case WM_T_PCH_LPT:
   4875 		case WM_T_PCH_SPT:
   4876 			/*
   4877 			 * Set the mac to wait the maximum time between each
   4878 			 * iteration and increase the max iterations when
   4879 			 * polling the phy; this fixes erroneous timeouts at
   4880 			 * 10Mbps.
   4881 			 */
   4882 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4883 			    0xFFFF);
   4884 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4885 			val |= 0x3F;
   4886 			wm_kmrn_writereg(sc,
   4887 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4888 			break;
   4889 		default:
   4890 			break;
   4891 		}
   4892 
   4893 		if (sc->sc_type == WM_T_80003) {
   4894 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4895 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4896 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4897 
   4898 			/* Bypass RX and TX FIFO's */
   4899 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4900 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4901 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4902 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4903 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4904 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4905 		}
   4906 	}
   4907 #if 0
   4908 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4909 #endif
   4910 
   4911 	/* Set up checksum offload parameters. */
   4912 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4913 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4914 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4915 		reg |= RXCSUM_IPOFL;
   4916 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4917 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4918 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4919 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4920 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4921 
   4922 	/* Set up MSI-X */
   4923 	if (sc->sc_nintrs > 1) {
   4924 		uint32_t ivar;
   4925 		struct wm_queue *wmq;
   4926 		int qid, qintr_idx;
   4927 
   4928 		if (sc->sc_type == WM_T_82575) {
   4929 			/* Interrupt control */
   4930 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4931 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4932 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4933 
   4934 			/* TX and RX */
   4935 			for (i = 0; i < sc->sc_nqueues; i++) {
   4936 				wmq = &sc->sc_queue[i];
   4937 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4938 				    EITR_TX_QUEUE(wmq->wmq_id)
   4939 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4940 			}
   4941 			/* Link status */
   4942 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4943 			    EITR_OTHER);
   4944 		} else if (sc->sc_type == WM_T_82574) {
   4945 			/* Interrupt control */
   4946 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4947 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4948 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4949 
   4950 			ivar = 0;
   4951 			/* TX and RX */
   4952 			for (i = 0; i < sc->sc_nqueues; i++) {
   4953 				wmq = &sc->sc_queue[i];
   4954 				qid = wmq->wmq_id;
   4955 				qintr_idx = wmq->wmq_intr_idx;
   4956 
   4957 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4958 				    IVAR_TX_MASK_Q_82574(qid));
   4959 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4960 				    IVAR_RX_MASK_Q_82574(qid));
   4961 			}
   4962 			/* Link status */
   4963 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4964 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4965 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4966 		} else {
   4967 			/* Interrupt control */
   4968 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4969 			    | GPIE_EIAME | GPIE_PBA);
   4970 
   4971 			switch (sc->sc_type) {
   4972 			case WM_T_82580:
   4973 			case WM_T_I350:
   4974 			case WM_T_I354:
   4975 			case WM_T_I210:
   4976 			case WM_T_I211:
   4977 				/* TX and RX */
   4978 				for (i = 0; i < sc->sc_nqueues; i++) {
   4979 					wmq = &sc->sc_queue[i];
   4980 					qid = wmq->wmq_id;
   4981 					qintr_idx = wmq->wmq_intr_idx;
   4982 
   4983 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4984 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4985 					ivar |= __SHIFTIN((qintr_idx
   4986 						| IVAR_VALID),
   4987 					    IVAR_TX_MASK_Q(qid));
   4988 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4989 					ivar |= __SHIFTIN((qintr_idx
   4990 						| IVAR_VALID),
   4991 					    IVAR_RX_MASK_Q(qid));
   4992 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4993 				}
   4994 				break;
   4995 			case WM_T_82576:
   4996 				/* TX and RX */
   4997 				for (i = 0; i < sc->sc_nqueues; i++) {
   4998 					wmq = &sc->sc_queue[i];
   4999 					qid = wmq->wmq_id;
   5000 					qintr_idx = wmq->wmq_intr_idx;
   5001 
   5002 					ivar = CSR_READ(sc,
   5003 					    WMREG_IVAR_Q_82576(qid));
   5004 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5005 					ivar |= __SHIFTIN((qintr_idx
   5006 						| IVAR_VALID),
   5007 					    IVAR_TX_MASK_Q_82576(qid));
   5008 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5009 					ivar |= __SHIFTIN((qintr_idx
   5010 						| IVAR_VALID),
   5011 					    IVAR_RX_MASK_Q_82576(qid));
   5012 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5013 					    ivar);
   5014 				}
   5015 				break;
   5016 			default:
   5017 				break;
   5018 			}
   5019 
   5020 			/* Link status */
   5021 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5022 			    IVAR_MISC_OTHER);
   5023 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5024 		}
   5025 
   5026 		if (sc->sc_nqueues > 1) {
   5027 			wm_init_rss(sc);
   5028 
   5029 			/*
   5030 			** NOTE: Receive Full-Packet Checksum Offload
   5031 			** is mutually exclusive with Multiqueue. However
   5032 			** this is not the same as TCP/IP checksums which
   5033 			** still work.
   5034 			*/
   5035 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5036 			reg |= RXCSUM_PCSD;
   5037 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5038 		}
   5039 	}
   5040 
   5041 	/* Set up the interrupt registers. */
   5042 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5043 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5044 	    ICR_RXO | ICR_RXT0;
   5045 	if (sc->sc_nintrs > 1) {
   5046 		uint32_t mask;
   5047 		struct wm_queue *wmq;
   5048 
   5049 		switch (sc->sc_type) {
   5050 		case WM_T_82574:
   5051 			CSR_WRITE(sc, WMREG_EIAC_82574,
   5052 			    WMREG_EIAC_82574_MSIX_MASK);
   5053 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   5054 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5055 			break;
   5056 		default:
   5057 			if (sc->sc_type == WM_T_82575) {
   5058 				mask = 0;
   5059 				for (i = 0; i < sc->sc_nqueues; i++) {
   5060 					wmq = &sc->sc_queue[i];
   5061 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5062 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5063 				}
   5064 				mask |= EITR_OTHER;
   5065 			} else {
   5066 				mask = 0;
   5067 				for (i = 0; i < sc->sc_nqueues; i++) {
   5068 					wmq = &sc->sc_queue[i];
   5069 					mask |= 1 << wmq->wmq_intr_idx;
   5070 				}
   5071 				mask |= 1 << sc->sc_link_intr_idx;
   5072 			}
   5073 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5074 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5075 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5076 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5077 			break;
   5078 		}
   5079 	} else
   5080 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5081 
   5082 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5083 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5084 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5085 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5086 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5087 		reg |= KABGTXD_BGSQLBIAS;
   5088 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5089 	}
   5090 
   5091 	/* Set up the inter-packet gap. */
   5092 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5093 
   5094 	if (sc->sc_type >= WM_T_82543) {
   5095 		/*
   5096 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5097 		 * the multi queue function with MSI-X.
   5098 		 */
   5099 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5100 			int qidx;
   5101 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5102 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5103 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5104 				    sc->sc_itr);
   5105 			}
   5106 			/*
   5107 			 * Link interrupts occur much less than TX
   5108 			 * interrupts and RX interrupts. So, we don't
   5109 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5110 			 * FreeBSD's if_igb.
   5111 			 */
   5112 		} else
   5113 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5114 	}
   5115 
   5116 	/* Set the VLAN ethernetype. */
   5117 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5118 
   5119 	/*
   5120 	 * Set up the transmit control register; we start out with
   5121 	 * a collision distance suitable for FDX, but update it whe
   5122 	 * we resolve the media type.
   5123 	 */
   5124 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5125 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5126 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5127 	if (sc->sc_type >= WM_T_82571)
   5128 		sc->sc_tctl |= TCTL_MULR;
   5129 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5130 
   5131 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5132 		/* Write TDT after TCTL.EN is set. See the document. */
   5133 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5134 	}
   5135 
   5136 	if (sc->sc_type == WM_T_80003) {
   5137 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5138 		reg &= ~TCTL_EXT_GCEX_MASK;
   5139 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5140 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5141 	}
   5142 
   5143 	/* Set the media. */
   5144 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5145 		goto out;
   5146 
   5147 	/* Configure for OS presence */
   5148 	wm_init_manageability(sc);
   5149 
   5150 	/*
   5151 	 * Set up the receive control register; we actually program
   5152 	 * the register when we set the receive filter.  Use multicast
   5153 	 * address offset type 0.
   5154 	 *
   5155 	 * Only the i82544 has the ability to strip the incoming
   5156 	 * CRC, so we don't enable that feature.
   5157 	 */
   5158 	sc->sc_mchash_type = 0;
   5159 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5160 	    | RCTL_MO(sc->sc_mchash_type);
   5161 
   5162 	/*
   5163 	 * The I350 has a bug where it always strips the CRC whether
   5164 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5165 	 */
   5166 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5167 	    || (sc->sc_type == WM_T_I210))
   5168 		sc->sc_rctl |= RCTL_SECRC;
   5169 
   5170 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5171 	    && (ifp->if_mtu > ETHERMTU)) {
   5172 		sc->sc_rctl |= RCTL_LPE;
   5173 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5174 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5175 	}
   5176 
   5177 	if (MCLBYTES == 2048) {
   5178 		sc->sc_rctl |= RCTL_2k;
   5179 	} else {
   5180 		if (sc->sc_type >= WM_T_82543) {
   5181 			switch (MCLBYTES) {
   5182 			case 4096:
   5183 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5184 				break;
   5185 			case 8192:
   5186 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5187 				break;
   5188 			case 16384:
   5189 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5190 				break;
   5191 			default:
   5192 				panic("wm_init: MCLBYTES %d unsupported",
   5193 				    MCLBYTES);
   5194 				break;
   5195 			}
   5196 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5197 	}
   5198 
   5199 	/* Set the receive filter. */
   5200 	wm_set_filter(sc);
   5201 
   5202 	/* Enable ECC */
   5203 	switch (sc->sc_type) {
   5204 	case WM_T_82571:
   5205 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5206 		reg |= PBA_ECC_CORR_EN;
   5207 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5208 		break;
   5209 	case WM_T_PCH_LPT:
   5210 	case WM_T_PCH_SPT:
   5211 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5212 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5213 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5214 
   5215 		sc->sc_ctrl |= CTRL_MEHE;
   5216 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5217 		break;
   5218 	default:
   5219 		break;
   5220 	}
   5221 
   5222 	/* On 575 and later set RDT only if RX enabled */
   5223 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5224 		int qidx;
   5225 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5226 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5227 			for (i = 0; i < WM_NRXDESC; i++) {
   5228 				mutex_enter(rxq->rxq_lock);
   5229 				wm_init_rxdesc(rxq, i);
   5230 				mutex_exit(rxq->rxq_lock);
   5231 
   5232 			}
   5233 		}
   5234 	}
   5235 
   5236 	wm_turnon(sc);
   5237 
   5238 	/* Start the one second link check clock. */
   5239 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5240 
   5241 	/* ...all done! */
   5242 	ifp->if_flags |= IFF_RUNNING;
   5243 	ifp->if_flags &= ~IFF_OACTIVE;
   5244 
   5245  out:
   5246 	sc->sc_if_flags = ifp->if_flags;
   5247 	if (error)
   5248 		log(LOG_ERR, "%s: interface not running\n",
   5249 		    device_xname(sc->sc_dev));
   5250 	return error;
   5251 }
   5252 
   5253 /*
   5254  * wm_stop:		[ifnet interface function]
   5255  *
   5256  *	Stop transmission on the interface.
   5257  */
   5258 static void
   5259 wm_stop(struct ifnet *ifp, int disable)
   5260 {
   5261 	struct wm_softc *sc = ifp->if_softc;
   5262 
   5263 	WM_CORE_LOCK(sc);
   5264 	wm_stop_locked(ifp, disable);
   5265 	WM_CORE_UNLOCK(sc);
   5266 }
   5267 
   5268 static void
   5269 wm_stop_locked(struct ifnet *ifp, int disable)
   5270 {
   5271 	struct wm_softc *sc = ifp->if_softc;
   5272 	struct wm_txsoft *txs;
   5273 	int i, qidx;
   5274 
   5275 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5276 		device_xname(sc->sc_dev), __func__));
   5277 	KASSERT(WM_CORE_LOCKED(sc));
   5278 
   5279 	wm_turnoff(sc);
   5280 
   5281 	/* Stop the one second clock. */
   5282 	callout_stop(&sc->sc_tick_ch);
   5283 
   5284 	/* Stop the 82547 Tx FIFO stall check timer. */
   5285 	if (sc->sc_type == WM_T_82547)
   5286 		callout_stop(&sc->sc_txfifo_ch);
   5287 
   5288 	if (sc->sc_flags & WM_F_HAS_MII) {
   5289 		/* Down the MII. */
   5290 		mii_down(&sc->sc_mii);
   5291 	} else {
   5292 #if 0
   5293 		/* Should we clear PHY's status properly? */
   5294 		wm_reset(sc);
   5295 #endif
   5296 	}
   5297 
   5298 	/* Stop the transmit and receive processes. */
   5299 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5300 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5301 	sc->sc_rctl &= ~RCTL_EN;
   5302 
   5303 	/*
   5304 	 * Clear the interrupt mask to ensure the device cannot assert its
   5305 	 * interrupt line.
   5306 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5307 	 * service any currently pending or shared interrupt.
   5308 	 */
   5309 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5310 	sc->sc_icr = 0;
   5311 	if (sc->sc_nintrs > 1) {
   5312 		if (sc->sc_type != WM_T_82574) {
   5313 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5314 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5315 		} else
   5316 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5317 	}
   5318 
   5319 	/* Release any queued transmit buffers. */
   5320 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5321 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5322 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5323 		mutex_enter(txq->txq_lock);
   5324 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5325 			txs = &txq->txq_soft[i];
   5326 			if (txs->txs_mbuf != NULL) {
   5327 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5328 				m_freem(txs->txs_mbuf);
   5329 				txs->txs_mbuf = NULL;
   5330 			}
   5331 		}
   5332 		mutex_exit(txq->txq_lock);
   5333 	}
   5334 
   5335 	/* Mark the interface as down and cancel the watchdog timer. */
   5336 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5337 	ifp->if_timer = 0;
   5338 
   5339 	if (disable) {
   5340 		for (i = 0; i < sc->sc_nqueues; i++) {
   5341 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5342 			mutex_enter(rxq->rxq_lock);
   5343 			wm_rxdrain(rxq);
   5344 			mutex_exit(rxq->rxq_lock);
   5345 		}
   5346 	}
   5347 
   5348 #if 0 /* notyet */
   5349 	if (sc->sc_type >= WM_T_82544)
   5350 		CSR_WRITE(sc, WMREG_WUC, 0);
   5351 #endif
   5352 }
   5353 
   5354 static void
   5355 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5356 {
   5357 	struct mbuf *m;
   5358 	int i;
   5359 
   5360 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5361 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5362 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5363 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5364 		    m->m_data, m->m_len, m->m_flags);
   5365 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5366 	    i, i == 1 ? "" : "s");
   5367 }
   5368 
   5369 /*
   5370  * wm_82547_txfifo_stall:
   5371  *
   5372  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5373  *	reset the FIFO pointers, and restart packet transmission.
   5374  */
   5375 static void
   5376 wm_82547_txfifo_stall(void *arg)
   5377 {
   5378 	struct wm_softc *sc = arg;
   5379 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5380 
   5381 	mutex_enter(txq->txq_lock);
   5382 
   5383 	if (txq->txq_stopping)
   5384 		goto out;
   5385 
   5386 	if (txq->txq_fifo_stall) {
   5387 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5388 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5389 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5390 			/*
   5391 			 * Packets have drained.  Stop transmitter, reset
   5392 			 * FIFO pointers, restart transmitter, and kick
   5393 			 * the packet queue.
   5394 			 */
   5395 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5396 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5397 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5398 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5399 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5400 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5401 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5402 			CSR_WRITE_FLUSH(sc);
   5403 
   5404 			txq->txq_fifo_head = 0;
   5405 			txq->txq_fifo_stall = 0;
   5406 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5407 		} else {
   5408 			/*
   5409 			 * Still waiting for packets to drain; try again in
   5410 			 * another tick.
   5411 			 */
   5412 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5413 		}
   5414 	}
   5415 
   5416 out:
   5417 	mutex_exit(txq->txq_lock);
   5418 }
   5419 
   5420 /*
   5421  * wm_82547_txfifo_bugchk:
   5422  *
   5423  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5424  *	prevent enqueueing a packet that would wrap around the end
   5425  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5426  *
   5427  *	We do this by checking the amount of space before the end
   5428  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5429  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5430  *	the internal FIFO pointers to the beginning, and restart
   5431  *	transmission on the interface.
   5432  */
   5433 #define	WM_FIFO_HDR		0x10
   5434 #define	WM_82547_PAD_LEN	0x3e0
   5435 static int
   5436 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5437 {
   5438 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5439 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5440 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5441 
   5442 	/* Just return if already stalled. */
   5443 	if (txq->txq_fifo_stall)
   5444 		return 1;
   5445 
   5446 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5447 		/* Stall only occurs in half-duplex mode. */
   5448 		goto send_packet;
   5449 	}
   5450 
   5451 	if (len >= WM_82547_PAD_LEN + space) {
   5452 		txq->txq_fifo_stall = 1;
   5453 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5454 		return 1;
   5455 	}
   5456 
   5457  send_packet:
   5458 	txq->txq_fifo_head += len;
   5459 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5460 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5461 
   5462 	return 0;
   5463 }
   5464 
   5465 static int
   5466 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5467 {
   5468 	int error;
   5469 
   5470 	/*
   5471 	 * Allocate the control data structures, and create and load the
   5472 	 * DMA map for it.
   5473 	 *
   5474 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5475 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5476 	 * both sets within the same 4G segment.
   5477 	 */
   5478 	if (sc->sc_type < WM_T_82544)
   5479 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5480 	else
   5481 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5482 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5483 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5484 	else
   5485 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5486 
   5487 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5488 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5489 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5490 		aprint_error_dev(sc->sc_dev,
   5491 		    "unable to allocate TX control data, error = %d\n",
   5492 		    error);
   5493 		goto fail_0;
   5494 	}
   5495 
   5496 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5497 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5498 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5499 		aprint_error_dev(sc->sc_dev,
   5500 		    "unable to map TX control data, error = %d\n", error);
   5501 		goto fail_1;
   5502 	}
   5503 
   5504 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5505 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5506 		aprint_error_dev(sc->sc_dev,
   5507 		    "unable to create TX control data DMA map, error = %d\n",
   5508 		    error);
   5509 		goto fail_2;
   5510 	}
   5511 
   5512 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5513 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5514 		aprint_error_dev(sc->sc_dev,
   5515 		    "unable to load TX control data DMA map, error = %d\n",
   5516 		    error);
   5517 		goto fail_3;
   5518 	}
   5519 
   5520 	return 0;
   5521 
   5522  fail_3:
   5523 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5524  fail_2:
   5525 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5526 	    WM_TXDESCS_SIZE(txq));
   5527  fail_1:
   5528 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5529  fail_0:
   5530 	return error;
   5531 }
   5532 
   5533 static void
   5534 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5535 {
   5536 
   5537 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5538 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5539 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5540 	    WM_TXDESCS_SIZE(txq));
   5541 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5542 }
   5543 
   5544 static int
   5545 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5546 {
   5547 	int error;
   5548 
   5549 	/*
   5550 	 * Allocate the control data structures, and create and load the
   5551 	 * DMA map for it.
   5552 	 *
   5553 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5554 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5555 	 * both sets within the same 4G segment.
   5556 	 */
   5557 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5558 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5559 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5560 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5561 		aprint_error_dev(sc->sc_dev,
   5562 		    "unable to allocate RX control data, error = %d\n",
   5563 		    error);
   5564 		goto fail_0;
   5565 	}
   5566 
   5567 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5568 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5569 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5570 		aprint_error_dev(sc->sc_dev,
   5571 		    "unable to map RX control data, error = %d\n", error);
   5572 		goto fail_1;
   5573 	}
   5574 
   5575 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5576 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5577 		aprint_error_dev(sc->sc_dev,
   5578 		    "unable to create RX control data DMA map, error = %d\n",
   5579 		    error);
   5580 		goto fail_2;
   5581 	}
   5582 
   5583 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5584 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5585 		aprint_error_dev(sc->sc_dev,
   5586 		    "unable to load RX control data DMA map, error = %d\n",
   5587 		    error);
   5588 		goto fail_3;
   5589 	}
   5590 
   5591 	return 0;
   5592 
   5593  fail_3:
   5594 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5595  fail_2:
   5596 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5597 	    rxq->rxq_desc_size);
   5598  fail_1:
   5599 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5600  fail_0:
   5601 	return error;
   5602 }
   5603 
   5604 static void
   5605 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5606 {
   5607 
   5608 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5609 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5610 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5611 	    rxq->rxq_desc_size);
   5612 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5613 }
   5614 
   5615 
   5616 static int
   5617 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5618 {
   5619 	int i, error;
   5620 
   5621 	/* Create the transmit buffer DMA maps. */
   5622 	WM_TXQUEUELEN(txq) =
   5623 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5624 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5625 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5626 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5627 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5628 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5629 			aprint_error_dev(sc->sc_dev,
   5630 			    "unable to create Tx DMA map %d, error = %d\n",
   5631 			    i, error);
   5632 			goto fail;
   5633 		}
   5634 	}
   5635 
   5636 	return 0;
   5637 
   5638  fail:
   5639 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5640 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5641 			bus_dmamap_destroy(sc->sc_dmat,
   5642 			    txq->txq_soft[i].txs_dmamap);
   5643 	}
   5644 	return error;
   5645 }
   5646 
   5647 static void
   5648 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5649 {
   5650 	int i;
   5651 
   5652 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5653 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5654 			bus_dmamap_destroy(sc->sc_dmat,
   5655 			    txq->txq_soft[i].txs_dmamap);
   5656 	}
   5657 }
   5658 
   5659 static int
   5660 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5661 {
   5662 	int i, error;
   5663 
   5664 	/* Create the receive buffer DMA maps. */
   5665 	for (i = 0; i < WM_NRXDESC; i++) {
   5666 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5667 			    MCLBYTES, 0, 0,
   5668 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5669 			aprint_error_dev(sc->sc_dev,
   5670 			    "unable to create Rx DMA map %d error = %d\n",
   5671 			    i, error);
   5672 			goto fail;
   5673 		}
   5674 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5675 	}
   5676 
   5677 	return 0;
   5678 
   5679  fail:
   5680 	for (i = 0; i < WM_NRXDESC; i++) {
   5681 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5682 			bus_dmamap_destroy(sc->sc_dmat,
   5683 			    rxq->rxq_soft[i].rxs_dmamap);
   5684 	}
   5685 	return error;
   5686 }
   5687 
   5688 static void
   5689 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5690 {
   5691 	int i;
   5692 
   5693 	for (i = 0; i < WM_NRXDESC; i++) {
   5694 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5695 			bus_dmamap_destroy(sc->sc_dmat,
   5696 			    rxq->rxq_soft[i].rxs_dmamap);
   5697 	}
   5698 }
   5699 
   5700 /*
   5701  * wm_alloc_quques:
   5702  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5703  */
   5704 static int
   5705 wm_alloc_txrx_queues(struct wm_softc *sc)
   5706 {
   5707 	int i, error, tx_done, rx_done;
   5708 
   5709 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5710 	    KM_SLEEP);
   5711 	if (sc->sc_queue == NULL) {
   5712 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5713 		error = ENOMEM;
   5714 		goto fail_0;
   5715 	}
   5716 
   5717 	/*
   5718 	 * For transmission
   5719 	 */
   5720 	error = 0;
   5721 	tx_done = 0;
   5722 	for (i = 0; i < sc->sc_nqueues; i++) {
   5723 #ifdef WM_EVENT_COUNTERS
   5724 		int j;
   5725 		const char *xname;
   5726 #endif
   5727 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5728 		txq->txq_sc = sc;
   5729 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5730 
   5731 		error = wm_alloc_tx_descs(sc, txq);
   5732 		if (error)
   5733 			break;
   5734 		error = wm_alloc_tx_buffer(sc, txq);
   5735 		if (error) {
   5736 			wm_free_tx_descs(sc, txq);
   5737 			break;
   5738 		}
   5739 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5740 		if (txq->txq_interq == NULL) {
   5741 			wm_free_tx_descs(sc, txq);
   5742 			wm_free_tx_buffer(sc, txq);
   5743 			error = ENOMEM;
   5744 			break;
   5745 		}
   5746 
   5747 #ifdef WM_EVENT_COUNTERS
   5748 		xname = device_xname(sc->sc_dev);
   5749 
   5750 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5751 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5752 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5753 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5754 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5755 
   5756 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5757 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5758 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5759 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5760 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5761 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5762 
   5763 		for (j = 0; j < WM_NTXSEGS; j++) {
   5764 			snprintf(txq->txq_txseg_evcnt_names[j],
   5765 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5766 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5767 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5768 		}
   5769 
   5770 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5771 
   5772 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5773 #endif /* WM_EVENT_COUNTERS */
   5774 
   5775 		tx_done++;
   5776 	}
   5777 	if (error)
   5778 		goto fail_1;
   5779 
   5780 	/*
   5781 	 * For recieve
   5782 	 */
   5783 	error = 0;
   5784 	rx_done = 0;
   5785 	for (i = 0; i < sc->sc_nqueues; i++) {
   5786 #ifdef WM_EVENT_COUNTERS
   5787 		const char *xname;
   5788 #endif
   5789 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5790 		rxq->rxq_sc = sc;
   5791 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5792 
   5793 		error = wm_alloc_rx_descs(sc, rxq);
   5794 		if (error)
   5795 			break;
   5796 
   5797 		error = wm_alloc_rx_buffer(sc, rxq);
   5798 		if (error) {
   5799 			wm_free_rx_descs(sc, rxq);
   5800 			break;
   5801 		}
   5802 
   5803 #ifdef WM_EVENT_COUNTERS
   5804 		xname = device_xname(sc->sc_dev);
   5805 
   5806 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5807 
   5808 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5809 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5810 #endif /* WM_EVENT_COUNTERS */
   5811 
   5812 		rx_done++;
   5813 	}
   5814 	if (error)
   5815 		goto fail_2;
   5816 
   5817 	return 0;
   5818 
   5819  fail_2:
   5820 	for (i = 0; i < rx_done; i++) {
   5821 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5822 		wm_free_rx_buffer(sc, rxq);
   5823 		wm_free_rx_descs(sc, rxq);
   5824 		if (rxq->rxq_lock)
   5825 			mutex_obj_free(rxq->rxq_lock);
   5826 	}
   5827  fail_1:
   5828 	for (i = 0; i < tx_done; i++) {
   5829 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5830 		pcq_destroy(txq->txq_interq);
   5831 		wm_free_tx_buffer(sc, txq);
   5832 		wm_free_tx_descs(sc, txq);
   5833 		if (txq->txq_lock)
   5834 			mutex_obj_free(txq->txq_lock);
   5835 	}
   5836 
   5837 	kmem_free(sc->sc_queue,
   5838 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5839  fail_0:
   5840 	return error;
   5841 }
   5842 
   5843 /*
   5844  * wm_free_quques:
   5845  *	Free {tx,rx}descs and {tx,rx} buffers
   5846  */
   5847 static void
   5848 wm_free_txrx_queues(struct wm_softc *sc)
   5849 {
   5850 	int i;
   5851 
   5852 	for (i = 0; i < sc->sc_nqueues; i++) {
   5853 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5854 		wm_free_rx_buffer(sc, rxq);
   5855 		wm_free_rx_descs(sc, rxq);
   5856 		if (rxq->rxq_lock)
   5857 			mutex_obj_free(rxq->rxq_lock);
   5858 	}
   5859 
   5860 	for (i = 0; i < sc->sc_nqueues; i++) {
   5861 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5862 		wm_free_tx_buffer(sc, txq);
   5863 		wm_free_tx_descs(sc, txq);
   5864 		if (txq->txq_lock)
   5865 			mutex_obj_free(txq->txq_lock);
   5866 	}
   5867 
   5868 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5869 }
   5870 
   5871 static void
   5872 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5873 {
   5874 
   5875 	KASSERT(mutex_owned(txq->txq_lock));
   5876 
   5877 	/* Initialize the transmit descriptor ring. */
   5878 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5879 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5880 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5881 	txq->txq_free = WM_NTXDESC(txq);
   5882 	txq->txq_next = 0;
   5883 }
   5884 
   5885 static void
   5886 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5887     struct wm_txqueue *txq)
   5888 {
   5889 
   5890 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5891 		device_xname(sc->sc_dev), __func__));
   5892 	KASSERT(mutex_owned(txq->txq_lock));
   5893 
   5894 	if (sc->sc_type < WM_T_82543) {
   5895 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5896 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5897 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5898 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5899 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5900 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5901 	} else {
   5902 		int qid = wmq->wmq_id;
   5903 
   5904 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5905 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5906 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5907 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5908 
   5909 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5910 			/*
   5911 			 * Don't write TDT before TCTL.EN is set.
   5912 			 * See the document.
   5913 			 */
   5914 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5915 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5916 			    | TXDCTL_WTHRESH(0));
   5917 		else {
   5918 			/* ITR / 4 */
   5919 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5920 			if (sc->sc_type >= WM_T_82540) {
   5921 				/* should be same */
   5922 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5923 			}
   5924 
   5925 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5926 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5927 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5928 		}
   5929 	}
   5930 }
   5931 
   5932 static void
   5933 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5934 {
   5935 	int i;
   5936 
   5937 	KASSERT(mutex_owned(txq->txq_lock));
   5938 
   5939 	/* Initialize the transmit job descriptors. */
   5940 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5941 		txq->txq_soft[i].txs_mbuf = NULL;
   5942 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5943 	txq->txq_snext = 0;
   5944 	txq->txq_sdirty = 0;
   5945 }
   5946 
   5947 static void
   5948 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5949     struct wm_txqueue *txq)
   5950 {
   5951 
   5952 	KASSERT(mutex_owned(txq->txq_lock));
   5953 
   5954 	/*
   5955 	 * Set up some register offsets that are different between
   5956 	 * the i82542 and the i82543 and later chips.
   5957 	 */
   5958 	if (sc->sc_type < WM_T_82543)
   5959 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5960 	else
   5961 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5962 
   5963 	wm_init_tx_descs(sc, txq);
   5964 	wm_init_tx_regs(sc, wmq, txq);
   5965 	wm_init_tx_buffer(sc, txq);
   5966 }
   5967 
   5968 static void
   5969 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5970     struct wm_rxqueue *rxq)
   5971 {
   5972 
   5973 	KASSERT(mutex_owned(rxq->rxq_lock));
   5974 
   5975 	/*
   5976 	 * Initialize the receive descriptor and receive job
   5977 	 * descriptor rings.
   5978 	 */
   5979 	if (sc->sc_type < WM_T_82543) {
   5980 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5981 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5982 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5983 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5984 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5985 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5986 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5987 
   5988 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5989 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5990 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5991 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5992 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5993 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5994 	} else {
   5995 		int qid = wmq->wmq_id;
   5996 
   5997 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5998 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5999 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   6000 
   6001 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6002 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6003 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   6004 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   6005 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6006 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6007 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6008 			    | RXDCTL_WTHRESH(1));
   6009 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6010 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6011 		} else {
   6012 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6013 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6014 			/* ITR / 4 */
   6015 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   6016 			/* MUST be same */
   6017 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   6018 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6019 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6020 		}
   6021 	}
   6022 }
   6023 
   6024 static int
   6025 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6026 {
   6027 	struct wm_rxsoft *rxs;
   6028 	int error, i;
   6029 
   6030 	KASSERT(mutex_owned(rxq->rxq_lock));
   6031 
   6032 	for (i = 0; i < WM_NRXDESC; i++) {
   6033 		rxs = &rxq->rxq_soft[i];
   6034 		if (rxs->rxs_mbuf == NULL) {
   6035 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6036 				log(LOG_ERR, "%s: unable to allocate or map "
   6037 				    "rx buffer %d, error = %d\n",
   6038 				    device_xname(sc->sc_dev), i, error);
   6039 				/*
   6040 				 * XXX Should attempt to run with fewer receive
   6041 				 * XXX buffers instead of just failing.
   6042 				 */
   6043 				wm_rxdrain(rxq);
   6044 				return ENOMEM;
   6045 			}
   6046 		} else {
   6047 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6048 				wm_init_rxdesc(rxq, i);
   6049 			/*
   6050 			 * For 82575 and newer device, the RX descriptors
   6051 			 * must be initialized after the setting of RCTL.EN in
   6052 			 * wm_set_filter()
   6053 			 */
   6054 		}
   6055 	}
   6056 	rxq->rxq_ptr = 0;
   6057 	rxq->rxq_discard = 0;
   6058 	WM_RXCHAIN_RESET(rxq);
   6059 
   6060 	return 0;
   6061 }
   6062 
   6063 static int
   6064 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6065     struct wm_rxqueue *rxq)
   6066 {
   6067 
   6068 	KASSERT(mutex_owned(rxq->rxq_lock));
   6069 
   6070 	/*
   6071 	 * Set up some register offsets that are different between
   6072 	 * the i82542 and the i82543 and later chips.
   6073 	 */
   6074 	if (sc->sc_type < WM_T_82543)
   6075 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6076 	else
   6077 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6078 
   6079 	wm_init_rx_regs(sc, wmq, rxq);
   6080 	return wm_init_rx_buffer(sc, rxq);
   6081 }
   6082 
   6083 /*
   6084  * wm_init_quques:
   6085  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6086  */
   6087 static int
   6088 wm_init_txrx_queues(struct wm_softc *sc)
   6089 {
   6090 	int i, error = 0;
   6091 
   6092 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6093 		device_xname(sc->sc_dev), __func__));
   6094 
   6095 	for (i = 0; i < sc->sc_nqueues; i++) {
   6096 		struct wm_queue *wmq = &sc->sc_queue[i];
   6097 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6098 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6099 
   6100 		mutex_enter(txq->txq_lock);
   6101 		wm_init_tx_queue(sc, wmq, txq);
   6102 		mutex_exit(txq->txq_lock);
   6103 
   6104 		mutex_enter(rxq->rxq_lock);
   6105 		error = wm_init_rx_queue(sc, wmq, rxq);
   6106 		mutex_exit(rxq->rxq_lock);
   6107 		if (error)
   6108 			break;
   6109 	}
   6110 
   6111 	return error;
   6112 }
   6113 
   6114 /*
   6115  * wm_tx_offload:
   6116  *
   6117  *	Set up TCP/IP checksumming parameters for the
   6118  *	specified packet.
   6119  */
   6120 static int
   6121 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6122     uint8_t *fieldsp)
   6123 {
   6124 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6125 	struct mbuf *m0 = txs->txs_mbuf;
   6126 	struct livengood_tcpip_ctxdesc *t;
   6127 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6128 	uint32_t ipcse;
   6129 	struct ether_header *eh;
   6130 	int offset, iphl;
   6131 	uint8_t fields;
   6132 
   6133 	/*
   6134 	 * XXX It would be nice if the mbuf pkthdr had offset
   6135 	 * fields for the protocol headers.
   6136 	 */
   6137 
   6138 	eh = mtod(m0, struct ether_header *);
   6139 	switch (htons(eh->ether_type)) {
   6140 	case ETHERTYPE_IP:
   6141 	case ETHERTYPE_IPV6:
   6142 		offset = ETHER_HDR_LEN;
   6143 		break;
   6144 
   6145 	case ETHERTYPE_VLAN:
   6146 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6147 		break;
   6148 
   6149 	default:
   6150 		/*
   6151 		 * Don't support this protocol or encapsulation.
   6152 		 */
   6153 		*fieldsp = 0;
   6154 		*cmdp = 0;
   6155 		return 0;
   6156 	}
   6157 
   6158 	if ((m0->m_pkthdr.csum_flags &
   6159 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6160 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6161 	} else {
   6162 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6163 	}
   6164 	ipcse = offset + iphl - 1;
   6165 
   6166 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6167 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6168 	seg = 0;
   6169 	fields = 0;
   6170 
   6171 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6172 		int hlen = offset + iphl;
   6173 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6174 
   6175 		if (__predict_false(m0->m_len <
   6176 				    (hlen + sizeof(struct tcphdr)))) {
   6177 			/*
   6178 			 * TCP/IP headers are not in the first mbuf; we need
   6179 			 * to do this the slow and painful way.  Let's just
   6180 			 * hope this doesn't happen very often.
   6181 			 */
   6182 			struct tcphdr th;
   6183 
   6184 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6185 
   6186 			m_copydata(m0, hlen, sizeof(th), &th);
   6187 			if (v4) {
   6188 				struct ip ip;
   6189 
   6190 				m_copydata(m0, offset, sizeof(ip), &ip);
   6191 				ip.ip_len = 0;
   6192 				m_copyback(m0,
   6193 				    offset + offsetof(struct ip, ip_len),
   6194 				    sizeof(ip.ip_len), &ip.ip_len);
   6195 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6196 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6197 			} else {
   6198 				struct ip6_hdr ip6;
   6199 
   6200 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6201 				ip6.ip6_plen = 0;
   6202 				m_copyback(m0,
   6203 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6204 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6205 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6206 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6207 			}
   6208 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6209 			    sizeof(th.th_sum), &th.th_sum);
   6210 
   6211 			hlen += th.th_off << 2;
   6212 		} else {
   6213 			/*
   6214 			 * TCP/IP headers are in the first mbuf; we can do
   6215 			 * this the easy way.
   6216 			 */
   6217 			struct tcphdr *th;
   6218 
   6219 			if (v4) {
   6220 				struct ip *ip =
   6221 				    (void *)(mtod(m0, char *) + offset);
   6222 				th = (void *)(mtod(m0, char *) + hlen);
   6223 
   6224 				ip->ip_len = 0;
   6225 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6226 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6227 			} else {
   6228 				struct ip6_hdr *ip6 =
   6229 				    (void *)(mtod(m0, char *) + offset);
   6230 				th = (void *)(mtod(m0, char *) + hlen);
   6231 
   6232 				ip6->ip6_plen = 0;
   6233 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6234 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6235 			}
   6236 			hlen += th->th_off << 2;
   6237 		}
   6238 
   6239 		if (v4) {
   6240 			WM_Q_EVCNT_INCR(txq, txtso);
   6241 			cmdlen |= WTX_TCPIP_CMD_IP;
   6242 		} else {
   6243 			WM_Q_EVCNT_INCR(txq, txtso6);
   6244 			ipcse = 0;
   6245 		}
   6246 		cmd |= WTX_TCPIP_CMD_TSE;
   6247 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6248 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6249 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6250 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6251 	}
   6252 
   6253 	/*
   6254 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6255 	 * offload feature, if we load the context descriptor, we
   6256 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6257 	 */
   6258 
   6259 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6260 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6261 	    WTX_TCPIP_IPCSE(ipcse);
   6262 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6263 		WM_Q_EVCNT_INCR(txq, txipsum);
   6264 		fields |= WTX_IXSM;
   6265 	}
   6266 
   6267 	offset += iphl;
   6268 
   6269 	if (m0->m_pkthdr.csum_flags &
   6270 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6271 		WM_Q_EVCNT_INCR(txq, txtusum);
   6272 		fields |= WTX_TXSM;
   6273 		tucs = WTX_TCPIP_TUCSS(offset) |
   6274 		    WTX_TCPIP_TUCSO(offset +
   6275 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6276 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6277 	} else if ((m0->m_pkthdr.csum_flags &
   6278 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6279 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6280 		fields |= WTX_TXSM;
   6281 		tucs = WTX_TCPIP_TUCSS(offset) |
   6282 		    WTX_TCPIP_TUCSO(offset +
   6283 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6284 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6285 	} else {
   6286 		/* Just initialize it to a valid TCP context. */
   6287 		tucs = WTX_TCPIP_TUCSS(offset) |
   6288 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6289 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6290 	}
   6291 
   6292 	/* Fill in the context descriptor. */
   6293 	t = (struct livengood_tcpip_ctxdesc *)
   6294 	    &txq->txq_descs[txq->txq_next];
   6295 	t->tcpip_ipcs = htole32(ipcs);
   6296 	t->tcpip_tucs = htole32(tucs);
   6297 	t->tcpip_cmdlen = htole32(cmdlen);
   6298 	t->tcpip_seg = htole32(seg);
   6299 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6300 
   6301 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6302 	txs->txs_ndesc++;
   6303 
   6304 	*cmdp = cmd;
   6305 	*fieldsp = fields;
   6306 
   6307 	return 0;
   6308 }
   6309 
   6310 static inline int
   6311 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6312 {
   6313 	struct wm_softc *sc = ifp->if_softc;
   6314 	u_int cpuid = cpu_index(curcpu());
   6315 
   6316 	/*
   6317 	 * Currently, simple distribute strategy.
   6318 	 * TODO:
   6319 	 * destribute by flowid(RSS has value).
   6320 	 */
   6321 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6322 }
   6323 
   6324 /*
   6325  * wm_start:		[ifnet interface function]
   6326  *
   6327  *	Start packet transmission on the interface.
   6328  */
   6329 static void
   6330 wm_start(struct ifnet *ifp)
   6331 {
   6332 	struct wm_softc *sc = ifp->if_softc;
   6333 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6334 
   6335 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6336 
   6337 	/*
   6338 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6339 	 */
   6340 
   6341 	mutex_enter(txq->txq_lock);
   6342 	if (!txq->txq_stopping)
   6343 		wm_start_locked(ifp);
   6344 	mutex_exit(txq->txq_lock);
   6345 }
   6346 
   6347 static void
   6348 wm_start_locked(struct ifnet *ifp)
   6349 {
   6350 	struct wm_softc *sc = ifp->if_softc;
   6351 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6352 
   6353 	wm_send_common_locked(ifp, txq, false);
   6354 }
   6355 
   6356 static int
   6357 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6358 {
   6359 	int qid;
   6360 	struct wm_softc *sc = ifp->if_softc;
   6361 	struct wm_txqueue *txq;
   6362 
   6363 	qid = wm_select_txqueue(ifp, m);
   6364 	txq = &sc->sc_queue[qid].wmq_txq;
   6365 
   6366 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6367 		m_freem(m);
   6368 		WM_Q_EVCNT_INCR(txq, txdrop);
   6369 		return ENOBUFS;
   6370 	}
   6371 
   6372 	/*
   6373 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6374 	 */
   6375 	ifp->if_obytes += m->m_pkthdr.len;
   6376 	if (m->m_flags & M_MCAST)
   6377 		ifp->if_omcasts++;
   6378 
   6379 	if (mutex_tryenter(txq->txq_lock)) {
   6380 		if (!txq->txq_stopping)
   6381 			wm_transmit_locked(ifp, txq);
   6382 		mutex_exit(txq->txq_lock);
   6383 	}
   6384 
   6385 	return 0;
   6386 }
   6387 
   6388 static void
   6389 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6390 {
   6391 
   6392 	wm_send_common_locked(ifp, txq, true);
   6393 }
   6394 
   6395 static void
   6396 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6397     bool is_transmit)
   6398 {
   6399 	struct wm_softc *sc = ifp->if_softc;
   6400 	struct mbuf *m0;
   6401 	struct m_tag *mtag;
   6402 	struct wm_txsoft *txs;
   6403 	bus_dmamap_t dmamap;
   6404 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6405 	bus_addr_t curaddr;
   6406 	bus_size_t seglen, curlen;
   6407 	uint32_t cksumcmd;
   6408 	uint8_t cksumfields;
   6409 
   6410 	KASSERT(mutex_owned(txq->txq_lock));
   6411 
   6412 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6413 		return;
   6414 
   6415 	/* Remember the previous number of free descriptors. */
   6416 	ofree = txq->txq_free;
   6417 
   6418 	/*
   6419 	 * Loop through the send queue, setting up transmit descriptors
   6420 	 * until we drain the queue, or use up all available transmit
   6421 	 * descriptors.
   6422 	 */
   6423 	for (;;) {
   6424 		m0 = NULL;
   6425 
   6426 		/* Get a work queue entry. */
   6427 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6428 			wm_txeof(sc, txq);
   6429 			if (txq->txq_sfree == 0) {
   6430 				DPRINTF(WM_DEBUG_TX,
   6431 				    ("%s: TX: no free job descriptors\n",
   6432 					device_xname(sc->sc_dev)));
   6433 				WM_Q_EVCNT_INCR(txq, txsstall);
   6434 				break;
   6435 			}
   6436 		}
   6437 
   6438 		/* Grab a packet off the queue. */
   6439 		if (is_transmit)
   6440 			m0 = pcq_get(txq->txq_interq);
   6441 		else
   6442 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6443 		if (m0 == NULL)
   6444 			break;
   6445 
   6446 		DPRINTF(WM_DEBUG_TX,
   6447 		    ("%s: TX: have packet to transmit: %p\n",
   6448 		    device_xname(sc->sc_dev), m0));
   6449 
   6450 		txs = &txq->txq_soft[txq->txq_snext];
   6451 		dmamap = txs->txs_dmamap;
   6452 
   6453 		use_tso = (m0->m_pkthdr.csum_flags &
   6454 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6455 
   6456 		/*
   6457 		 * So says the Linux driver:
   6458 		 * The controller does a simple calculation to make sure
   6459 		 * there is enough room in the FIFO before initiating the
   6460 		 * DMA for each buffer.  The calc is:
   6461 		 *	4 = ceil(buffer len / MSS)
   6462 		 * To make sure we don't overrun the FIFO, adjust the max
   6463 		 * buffer len if the MSS drops.
   6464 		 */
   6465 		dmamap->dm_maxsegsz =
   6466 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6467 		    ? m0->m_pkthdr.segsz << 2
   6468 		    : WTX_MAX_LEN;
   6469 
   6470 		/*
   6471 		 * Load the DMA map.  If this fails, the packet either
   6472 		 * didn't fit in the allotted number of segments, or we
   6473 		 * were short on resources.  For the too-many-segments
   6474 		 * case, we simply report an error and drop the packet,
   6475 		 * since we can't sanely copy a jumbo packet to a single
   6476 		 * buffer.
   6477 		 */
   6478 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6479 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6480 		if (error) {
   6481 			if (error == EFBIG) {
   6482 				WM_Q_EVCNT_INCR(txq, txdrop);
   6483 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6484 				    "DMA segments, dropping...\n",
   6485 				    device_xname(sc->sc_dev));
   6486 				wm_dump_mbuf_chain(sc, m0);
   6487 				m_freem(m0);
   6488 				continue;
   6489 			}
   6490 			/*  Short on resources, just stop for now. */
   6491 			DPRINTF(WM_DEBUG_TX,
   6492 			    ("%s: TX: dmamap load failed: %d\n",
   6493 			    device_xname(sc->sc_dev), error));
   6494 			break;
   6495 		}
   6496 
   6497 		segs_needed = dmamap->dm_nsegs;
   6498 		if (use_tso) {
   6499 			/* For sentinel descriptor; see below. */
   6500 			segs_needed++;
   6501 		}
   6502 
   6503 		/*
   6504 		 * Ensure we have enough descriptors free to describe
   6505 		 * the packet.  Note, we always reserve one descriptor
   6506 		 * at the end of the ring due to the semantics of the
   6507 		 * TDT register, plus one more in the event we need
   6508 		 * to load offload context.
   6509 		 */
   6510 		if (segs_needed > txq->txq_free - 2) {
   6511 			/*
   6512 			 * Not enough free descriptors to transmit this
   6513 			 * packet.  We haven't committed anything yet,
   6514 			 * so just unload the DMA map, put the packet
   6515 			 * pack on the queue, and punt.  Notify the upper
   6516 			 * layer that there are no more slots left.
   6517 			 */
   6518 			DPRINTF(WM_DEBUG_TX,
   6519 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6520 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6521 			    segs_needed, txq->txq_free - 1));
   6522 			ifp->if_flags |= IFF_OACTIVE;
   6523 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6524 			WM_Q_EVCNT_INCR(txq, txdstall);
   6525 			break;
   6526 		}
   6527 
   6528 		/*
   6529 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6530 		 * once we know we can transmit the packet, since we
   6531 		 * do some internal FIFO space accounting here.
   6532 		 */
   6533 		if (sc->sc_type == WM_T_82547 &&
   6534 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6535 			DPRINTF(WM_DEBUG_TX,
   6536 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6537 			    device_xname(sc->sc_dev)));
   6538 			ifp->if_flags |= IFF_OACTIVE;
   6539 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6540 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6541 			break;
   6542 		}
   6543 
   6544 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6545 
   6546 		DPRINTF(WM_DEBUG_TX,
   6547 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6548 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6549 
   6550 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6551 
   6552 		/*
   6553 		 * Store a pointer to the packet so that we can free it
   6554 		 * later.
   6555 		 *
   6556 		 * Initially, we consider the number of descriptors the
   6557 		 * packet uses the number of DMA segments.  This may be
   6558 		 * incremented by 1 if we do checksum offload (a descriptor
   6559 		 * is used to set the checksum context).
   6560 		 */
   6561 		txs->txs_mbuf = m0;
   6562 		txs->txs_firstdesc = txq->txq_next;
   6563 		txs->txs_ndesc = segs_needed;
   6564 
   6565 		/* Set up offload parameters for this packet. */
   6566 		if (m0->m_pkthdr.csum_flags &
   6567 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6568 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6569 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6570 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6571 					  &cksumfields) != 0) {
   6572 				/* Error message already displayed. */
   6573 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6574 				continue;
   6575 			}
   6576 		} else {
   6577 			cksumcmd = 0;
   6578 			cksumfields = 0;
   6579 		}
   6580 
   6581 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6582 
   6583 		/* Sync the DMA map. */
   6584 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6585 		    BUS_DMASYNC_PREWRITE);
   6586 
   6587 		/* Initialize the transmit descriptor. */
   6588 		for (nexttx = txq->txq_next, seg = 0;
   6589 		     seg < dmamap->dm_nsegs; seg++) {
   6590 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6591 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6592 			     seglen != 0;
   6593 			     curaddr += curlen, seglen -= curlen,
   6594 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6595 				curlen = seglen;
   6596 
   6597 				/*
   6598 				 * So says the Linux driver:
   6599 				 * Work around for premature descriptor
   6600 				 * write-backs in TSO mode.  Append a
   6601 				 * 4-byte sentinel descriptor.
   6602 				 */
   6603 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6604 				    curlen > 8)
   6605 					curlen -= 4;
   6606 
   6607 				wm_set_dma_addr(
   6608 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6609 				txq->txq_descs[nexttx].wtx_cmdlen
   6610 				    = htole32(cksumcmd | curlen);
   6611 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6612 				    = 0;
   6613 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6614 				    = cksumfields;
   6615 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6616 				lasttx = nexttx;
   6617 
   6618 				DPRINTF(WM_DEBUG_TX,
   6619 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6620 				     "len %#04zx\n",
   6621 				    device_xname(sc->sc_dev), nexttx,
   6622 				    (uint64_t)curaddr, curlen));
   6623 			}
   6624 		}
   6625 
   6626 		KASSERT(lasttx != -1);
   6627 
   6628 		/*
   6629 		 * Set up the command byte on the last descriptor of
   6630 		 * the packet.  If we're in the interrupt delay window,
   6631 		 * delay the interrupt.
   6632 		 */
   6633 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6634 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6635 
   6636 		/*
   6637 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6638 		 * up the descriptor to encapsulate the packet for us.
   6639 		 *
   6640 		 * This is only valid on the last descriptor of the packet.
   6641 		 */
   6642 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6643 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6644 			    htole32(WTX_CMD_VLE);
   6645 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6646 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6647 		}
   6648 
   6649 		txs->txs_lastdesc = lasttx;
   6650 
   6651 		DPRINTF(WM_DEBUG_TX,
   6652 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6653 		    device_xname(sc->sc_dev),
   6654 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6655 
   6656 		/* Sync the descriptors we're using. */
   6657 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6658 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6659 
   6660 		/* Give the packet to the chip. */
   6661 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6662 
   6663 		DPRINTF(WM_DEBUG_TX,
   6664 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6665 
   6666 		DPRINTF(WM_DEBUG_TX,
   6667 		    ("%s: TX: finished transmitting packet, job %d\n",
   6668 		    device_xname(sc->sc_dev), txq->txq_snext));
   6669 
   6670 		/* Advance the tx pointer. */
   6671 		txq->txq_free -= txs->txs_ndesc;
   6672 		txq->txq_next = nexttx;
   6673 
   6674 		txq->txq_sfree--;
   6675 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6676 
   6677 		/* Pass the packet to any BPF listeners. */
   6678 		bpf_mtap(ifp, m0);
   6679 	}
   6680 
   6681 	if (m0 != NULL) {
   6682 		ifp->if_flags |= IFF_OACTIVE;
   6683 		WM_Q_EVCNT_INCR(txq, txdrop);
   6684 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6685 			__func__));
   6686 		m_freem(m0);
   6687 	}
   6688 
   6689 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6690 		/* No more slots; notify upper layer. */
   6691 		ifp->if_flags |= IFF_OACTIVE;
   6692 	}
   6693 
   6694 	if (txq->txq_free != ofree) {
   6695 		/* Set a watchdog timer in case the chip flakes out. */
   6696 		ifp->if_timer = 5;
   6697 	}
   6698 }
   6699 
   6700 /*
   6701  * wm_nq_tx_offload:
   6702  *
   6703  *	Set up TCP/IP checksumming parameters for the
   6704  *	specified packet, for NEWQUEUE devices
   6705  */
   6706 static int
   6707 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6708     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6709 {
   6710 	struct mbuf *m0 = txs->txs_mbuf;
   6711 	struct m_tag *mtag;
   6712 	uint32_t vl_len, mssidx, cmdc;
   6713 	struct ether_header *eh;
   6714 	int offset, iphl;
   6715 
   6716 	/*
   6717 	 * XXX It would be nice if the mbuf pkthdr had offset
   6718 	 * fields for the protocol headers.
   6719 	 */
   6720 	*cmdlenp = 0;
   6721 	*fieldsp = 0;
   6722 
   6723 	eh = mtod(m0, struct ether_header *);
   6724 	switch (htons(eh->ether_type)) {
   6725 	case ETHERTYPE_IP:
   6726 	case ETHERTYPE_IPV6:
   6727 		offset = ETHER_HDR_LEN;
   6728 		break;
   6729 
   6730 	case ETHERTYPE_VLAN:
   6731 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6732 		break;
   6733 
   6734 	default:
   6735 		/* Don't support this protocol or encapsulation. */
   6736 		*do_csum = false;
   6737 		return 0;
   6738 	}
   6739 	*do_csum = true;
   6740 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6741 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6742 
   6743 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6744 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6745 
   6746 	if ((m0->m_pkthdr.csum_flags &
   6747 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6748 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6749 	} else {
   6750 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6751 	}
   6752 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6753 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6754 
   6755 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6756 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6757 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6758 		*cmdlenp |= NQTX_CMD_VLE;
   6759 	}
   6760 
   6761 	mssidx = 0;
   6762 
   6763 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6764 		int hlen = offset + iphl;
   6765 		int tcp_hlen;
   6766 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6767 
   6768 		if (__predict_false(m0->m_len <
   6769 				    (hlen + sizeof(struct tcphdr)))) {
   6770 			/*
   6771 			 * TCP/IP headers are not in the first mbuf; we need
   6772 			 * to do this the slow and painful way.  Let's just
   6773 			 * hope this doesn't happen very often.
   6774 			 */
   6775 			struct tcphdr th;
   6776 
   6777 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6778 
   6779 			m_copydata(m0, hlen, sizeof(th), &th);
   6780 			if (v4) {
   6781 				struct ip ip;
   6782 
   6783 				m_copydata(m0, offset, sizeof(ip), &ip);
   6784 				ip.ip_len = 0;
   6785 				m_copyback(m0,
   6786 				    offset + offsetof(struct ip, ip_len),
   6787 				    sizeof(ip.ip_len), &ip.ip_len);
   6788 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6789 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6790 			} else {
   6791 				struct ip6_hdr ip6;
   6792 
   6793 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6794 				ip6.ip6_plen = 0;
   6795 				m_copyback(m0,
   6796 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6797 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6798 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6799 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6800 			}
   6801 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6802 			    sizeof(th.th_sum), &th.th_sum);
   6803 
   6804 			tcp_hlen = th.th_off << 2;
   6805 		} else {
   6806 			/*
   6807 			 * TCP/IP headers are in the first mbuf; we can do
   6808 			 * this the easy way.
   6809 			 */
   6810 			struct tcphdr *th;
   6811 
   6812 			if (v4) {
   6813 				struct ip *ip =
   6814 				    (void *)(mtod(m0, char *) + offset);
   6815 				th = (void *)(mtod(m0, char *) + hlen);
   6816 
   6817 				ip->ip_len = 0;
   6818 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6819 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6820 			} else {
   6821 				struct ip6_hdr *ip6 =
   6822 				    (void *)(mtod(m0, char *) + offset);
   6823 				th = (void *)(mtod(m0, char *) + hlen);
   6824 
   6825 				ip6->ip6_plen = 0;
   6826 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6827 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6828 			}
   6829 			tcp_hlen = th->th_off << 2;
   6830 		}
   6831 		hlen += tcp_hlen;
   6832 		*cmdlenp |= NQTX_CMD_TSE;
   6833 
   6834 		if (v4) {
   6835 			WM_Q_EVCNT_INCR(txq, txtso);
   6836 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6837 		} else {
   6838 			WM_Q_EVCNT_INCR(txq, txtso6);
   6839 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6840 		}
   6841 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6842 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6843 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6844 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6845 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6846 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6847 	} else {
   6848 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6849 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6850 	}
   6851 
   6852 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6853 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6854 		cmdc |= NQTXC_CMD_IP4;
   6855 	}
   6856 
   6857 	if (m0->m_pkthdr.csum_flags &
   6858 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6859 		WM_Q_EVCNT_INCR(txq, txtusum);
   6860 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6861 			cmdc |= NQTXC_CMD_TCP;
   6862 		} else {
   6863 			cmdc |= NQTXC_CMD_UDP;
   6864 		}
   6865 		cmdc |= NQTXC_CMD_IP4;
   6866 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6867 	}
   6868 	if (m0->m_pkthdr.csum_flags &
   6869 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6870 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6871 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6872 			cmdc |= NQTXC_CMD_TCP;
   6873 		} else {
   6874 			cmdc |= NQTXC_CMD_UDP;
   6875 		}
   6876 		cmdc |= NQTXC_CMD_IP6;
   6877 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6878 	}
   6879 
   6880 	/* Fill in the context descriptor. */
   6881 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6882 	    htole32(vl_len);
   6883 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6884 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6885 	    htole32(cmdc);
   6886 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6887 	    htole32(mssidx);
   6888 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6889 	DPRINTF(WM_DEBUG_TX,
   6890 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6891 	    txq->txq_next, 0, vl_len));
   6892 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6893 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6894 	txs->txs_ndesc++;
   6895 	return 0;
   6896 }
   6897 
   6898 /*
   6899  * wm_nq_start:		[ifnet interface function]
   6900  *
   6901  *	Start packet transmission on the interface for NEWQUEUE devices
   6902  */
   6903 static void
   6904 wm_nq_start(struct ifnet *ifp)
   6905 {
   6906 	struct wm_softc *sc = ifp->if_softc;
   6907 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6908 
   6909 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6910 
   6911 	/*
   6912 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6913 	 */
   6914 
   6915 	mutex_enter(txq->txq_lock);
   6916 	if (!txq->txq_stopping)
   6917 		wm_nq_start_locked(ifp);
   6918 	mutex_exit(txq->txq_lock);
   6919 }
   6920 
   6921 static void
   6922 wm_nq_start_locked(struct ifnet *ifp)
   6923 {
   6924 	struct wm_softc *sc = ifp->if_softc;
   6925 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6926 
   6927 	wm_nq_send_common_locked(ifp, txq, false);
   6928 }
   6929 
   6930 static int
   6931 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6932 {
   6933 	int qid;
   6934 	struct wm_softc *sc = ifp->if_softc;
   6935 	struct wm_txqueue *txq;
   6936 
   6937 	qid = wm_select_txqueue(ifp, m);
   6938 	txq = &sc->sc_queue[qid].wmq_txq;
   6939 
   6940 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6941 		m_freem(m);
   6942 		WM_Q_EVCNT_INCR(txq, txdrop);
   6943 		return ENOBUFS;
   6944 	}
   6945 
   6946 	/*
   6947 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6948 	 */
   6949 	ifp->if_obytes += m->m_pkthdr.len;
   6950 	if (m->m_flags & M_MCAST)
   6951 		ifp->if_omcasts++;
   6952 
   6953 	if (mutex_tryenter(txq->txq_lock)) {
   6954 		if (!txq->txq_stopping)
   6955 			wm_nq_transmit_locked(ifp, txq);
   6956 		mutex_exit(txq->txq_lock);
   6957 	}
   6958 
   6959 	return 0;
   6960 }
   6961 
   6962 static void
   6963 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6964 {
   6965 
   6966 	wm_nq_send_common_locked(ifp, txq, true);
   6967 }
   6968 
   6969 static void
   6970 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6971     bool is_transmit)
   6972 {
   6973 	struct wm_softc *sc = ifp->if_softc;
   6974 	struct mbuf *m0;
   6975 	struct m_tag *mtag;
   6976 	struct wm_txsoft *txs;
   6977 	bus_dmamap_t dmamap;
   6978 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6979 	bool do_csum, sent;
   6980 
   6981 	KASSERT(mutex_owned(txq->txq_lock));
   6982 
   6983 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6984 		return;
   6985 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6986 		return;
   6987 
   6988 	sent = false;
   6989 
   6990 	/*
   6991 	 * Loop through the send queue, setting up transmit descriptors
   6992 	 * until we drain the queue, or use up all available transmit
   6993 	 * descriptors.
   6994 	 */
   6995 	for (;;) {
   6996 		m0 = NULL;
   6997 
   6998 		/* Get a work queue entry. */
   6999 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7000 			wm_txeof(sc, txq);
   7001 			if (txq->txq_sfree == 0) {
   7002 				DPRINTF(WM_DEBUG_TX,
   7003 				    ("%s: TX: no free job descriptors\n",
   7004 					device_xname(sc->sc_dev)));
   7005 				WM_Q_EVCNT_INCR(txq, txsstall);
   7006 				break;
   7007 			}
   7008 		}
   7009 
   7010 		/* Grab a packet off the queue. */
   7011 		if (is_transmit)
   7012 			m0 = pcq_get(txq->txq_interq);
   7013 		else
   7014 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7015 		if (m0 == NULL)
   7016 			break;
   7017 
   7018 		DPRINTF(WM_DEBUG_TX,
   7019 		    ("%s: TX: have packet to transmit: %p\n",
   7020 		    device_xname(sc->sc_dev), m0));
   7021 
   7022 		txs = &txq->txq_soft[txq->txq_snext];
   7023 		dmamap = txs->txs_dmamap;
   7024 
   7025 		/*
   7026 		 * Load the DMA map.  If this fails, the packet either
   7027 		 * didn't fit in the allotted number of segments, or we
   7028 		 * were short on resources.  For the too-many-segments
   7029 		 * case, we simply report an error and drop the packet,
   7030 		 * since we can't sanely copy a jumbo packet to a single
   7031 		 * buffer.
   7032 		 */
   7033 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7034 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7035 		if (error) {
   7036 			if (error == EFBIG) {
   7037 				WM_Q_EVCNT_INCR(txq, txdrop);
   7038 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7039 				    "DMA segments, dropping...\n",
   7040 				    device_xname(sc->sc_dev));
   7041 				wm_dump_mbuf_chain(sc, m0);
   7042 				m_freem(m0);
   7043 				continue;
   7044 			}
   7045 			/* Short on resources, just stop for now. */
   7046 			DPRINTF(WM_DEBUG_TX,
   7047 			    ("%s: TX: dmamap load failed: %d\n",
   7048 			    device_xname(sc->sc_dev), error));
   7049 			break;
   7050 		}
   7051 
   7052 		segs_needed = dmamap->dm_nsegs;
   7053 
   7054 		/*
   7055 		 * Ensure we have enough descriptors free to describe
   7056 		 * the packet.  Note, we always reserve one descriptor
   7057 		 * at the end of the ring due to the semantics of the
   7058 		 * TDT register, plus one more in the event we need
   7059 		 * to load offload context.
   7060 		 */
   7061 		if (segs_needed > txq->txq_free - 2) {
   7062 			/*
   7063 			 * Not enough free descriptors to transmit this
   7064 			 * packet.  We haven't committed anything yet,
   7065 			 * so just unload the DMA map, put the packet
   7066 			 * pack on the queue, and punt.  Notify the upper
   7067 			 * layer that there are no more slots left.
   7068 			 */
   7069 			DPRINTF(WM_DEBUG_TX,
   7070 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7071 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7072 			    segs_needed, txq->txq_free - 1));
   7073 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7074 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7075 			WM_Q_EVCNT_INCR(txq, txdstall);
   7076 			break;
   7077 		}
   7078 
   7079 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7080 
   7081 		DPRINTF(WM_DEBUG_TX,
   7082 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7083 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7084 
   7085 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7086 
   7087 		/*
   7088 		 * Store a pointer to the packet so that we can free it
   7089 		 * later.
   7090 		 *
   7091 		 * Initially, we consider the number of descriptors the
   7092 		 * packet uses the number of DMA segments.  This may be
   7093 		 * incremented by 1 if we do checksum offload (a descriptor
   7094 		 * is used to set the checksum context).
   7095 		 */
   7096 		txs->txs_mbuf = m0;
   7097 		txs->txs_firstdesc = txq->txq_next;
   7098 		txs->txs_ndesc = segs_needed;
   7099 
   7100 		/* Set up offload parameters for this packet. */
   7101 		uint32_t cmdlen, fields, dcmdlen;
   7102 		if (m0->m_pkthdr.csum_flags &
   7103 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7104 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7105 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7106 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7107 			    &do_csum) != 0) {
   7108 				/* Error message already displayed. */
   7109 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7110 				continue;
   7111 			}
   7112 		} else {
   7113 			do_csum = false;
   7114 			cmdlen = 0;
   7115 			fields = 0;
   7116 		}
   7117 
   7118 		/* Sync the DMA map. */
   7119 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7120 		    BUS_DMASYNC_PREWRITE);
   7121 
   7122 		/* Initialize the first transmit descriptor. */
   7123 		nexttx = txq->txq_next;
   7124 		if (!do_csum) {
   7125 			/* setup a legacy descriptor */
   7126 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7127 			    dmamap->dm_segs[0].ds_addr);
   7128 			txq->txq_descs[nexttx].wtx_cmdlen =
   7129 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7130 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7131 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7132 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7133 			    NULL) {
   7134 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7135 				    htole32(WTX_CMD_VLE);
   7136 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7137 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7138 			} else {
   7139 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7140 			}
   7141 			dcmdlen = 0;
   7142 		} else {
   7143 			/* setup an advanced data descriptor */
   7144 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7145 			    htole64(dmamap->dm_segs[0].ds_addr);
   7146 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7147 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7148 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7149 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7150 			    htole32(fields);
   7151 			DPRINTF(WM_DEBUG_TX,
   7152 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7153 			    device_xname(sc->sc_dev), nexttx,
   7154 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7155 			DPRINTF(WM_DEBUG_TX,
   7156 			    ("\t 0x%08x%08x\n", fields,
   7157 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7158 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7159 		}
   7160 
   7161 		lasttx = nexttx;
   7162 		nexttx = WM_NEXTTX(txq, nexttx);
   7163 		/*
   7164 		 * fill in the next descriptors. legacy or adcanced format
   7165 		 * is the same here
   7166 		 */
   7167 		for (seg = 1; seg < dmamap->dm_nsegs;
   7168 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7169 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7170 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7171 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7172 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7173 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7174 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7175 			lasttx = nexttx;
   7176 
   7177 			DPRINTF(WM_DEBUG_TX,
   7178 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7179 			     "len %#04zx\n",
   7180 			    device_xname(sc->sc_dev), nexttx,
   7181 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7182 			    dmamap->dm_segs[seg].ds_len));
   7183 		}
   7184 
   7185 		KASSERT(lasttx != -1);
   7186 
   7187 		/*
   7188 		 * Set up the command byte on the last descriptor of
   7189 		 * the packet.  If we're in the interrupt delay window,
   7190 		 * delay the interrupt.
   7191 		 */
   7192 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7193 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7194 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7195 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7196 
   7197 		txs->txs_lastdesc = lasttx;
   7198 
   7199 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7200 		    device_xname(sc->sc_dev),
   7201 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7202 
   7203 		/* Sync the descriptors we're using. */
   7204 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7205 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7206 
   7207 		/* Give the packet to the chip. */
   7208 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7209 		sent = true;
   7210 
   7211 		DPRINTF(WM_DEBUG_TX,
   7212 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7213 
   7214 		DPRINTF(WM_DEBUG_TX,
   7215 		    ("%s: TX: finished transmitting packet, job %d\n",
   7216 		    device_xname(sc->sc_dev), txq->txq_snext));
   7217 
   7218 		/* Advance the tx pointer. */
   7219 		txq->txq_free -= txs->txs_ndesc;
   7220 		txq->txq_next = nexttx;
   7221 
   7222 		txq->txq_sfree--;
   7223 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7224 
   7225 		/* Pass the packet to any BPF listeners. */
   7226 		bpf_mtap(ifp, m0);
   7227 	}
   7228 
   7229 	if (m0 != NULL) {
   7230 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7231 		WM_Q_EVCNT_INCR(txq, txdrop);
   7232 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7233 			__func__));
   7234 		m_freem(m0);
   7235 	}
   7236 
   7237 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7238 		/* No more slots; notify upper layer. */
   7239 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7240 	}
   7241 
   7242 	if (sent) {
   7243 		/* Set a watchdog timer in case the chip flakes out. */
   7244 		ifp->if_timer = 5;
   7245 	}
   7246 }
   7247 
   7248 /* Interrupt */
   7249 
   7250 /*
   7251  * wm_txeof:
   7252  *
   7253  *	Helper; handle transmit interrupts.
   7254  */
   7255 static int
   7256 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7257 {
   7258 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7259 	struct wm_txsoft *txs;
   7260 	bool processed = false;
   7261 	int count = 0;
   7262 	int i;
   7263 	uint8_t status;
   7264 
   7265 	KASSERT(mutex_owned(txq->txq_lock));
   7266 
   7267 	if (txq->txq_stopping)
   7268 		return 0;
   7269 
   7270 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7271 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7272 	else
   7273 		ifp->if_flags &= ~IFF_OACTIVE;
   7274 
   7275 	/*
   7276 	 * Go through the Tx list and free mbufs for those
   7277 	 * frames which have been transmitted.
   7278 	 */
   7279 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7280 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7281 		txs = &txq->txq_soft[i];
   7282 
   7283 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7284 			device_xname(sc->sc_dev), i));
   7285 
   7286 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7287 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7288 
   7289 		status =
   7290 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7291 		if ((status & WTX_ST_DD) == 0) {
   7292 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7293 			    BUS_DMASYNC_PREREAD);
   7294 			break;
   7295 		}
   7296 
   7297 		processed = true;
   7298 		count++;
   7299 		DPRINTF(WM_DEBUG_TX,
   7300 		    ("%s: TX: job %d done: descs %d..%d\n",
   7301 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7302 		    txs->txs_lastdesc));
   7303 
   7304 		/*
   7305 		 * XXX We should probably be using the statistics
   7306 		 * XXX registers, but I don't know if they exist
   7307 		 * XXX on chips before the i82544.
   7308 		 */
   7309 
   7310 #ifdef WM_EVENT_COUNTERS
   7311 		if (status & WTX_ST_TU)
   7312 			WM_Q_EVCNT_INCR(txq, tu);
   7313 #endif /* WM_EVENT_COUNTERS */
   7314 
   7315 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7316 			ifp->if_oerrors++;
   7317 			if (status & WTX_ST_LC)
   7318 				log(LOG_WARNING, "%s: late collision\n",
   7319 				    device_xname(sc->sc_dev));
   7320 			else if (status & WTX_ST_EC) {
   7321 				ifp->if_collisions += 16;
   7322 				log(LOG_WARNING, "%s: excessive collisions\n",
   7323 				    device_xname(sc->sc_dev));
   7324 			}
   7325 		} else
   7326 			ifp->if_opackets++;
   7327 
   7328 		txq->txq_free += txs->txs_ndesc;
   7329 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7330 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7331 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7332 		m_freem(txs->txs_mbuf);
   7333 		txs->txs_mbuf = NULL;
   7334 	}
   7335 
   7336 	/* Update the dirty transmit buffer pointer. */
   7337 	txq->txq_sdirty = i;
   7338 	DPRINTF(WM_DEBUG_TX,
   7339 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7340 
   7341 	if (count != 0)
   7342 		rnd_add_uint32(&sc->rnd_source, count);
   7343 
   7344 	/*
   7345 	 * If there are no more pending transmissions, cancel the watchdog
   7346 	 * timer.
   7347 	 */
   7348 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7349 		ifp->if_timer = 0;
   7350 
   7351 	return processed;
   7352 }
   7353 
   7354 /*
   7355  * wm_rxeof:
   7356  *
   7357  *	Helper; handle receive interrupts.
   7358  */
   7359 static void
   7360 wm_rxeof(struct wm_rxqueue *rxq)
   7361 {
   7362 	struct wm_softc *sc = rxq->rxq_sc;
   7363 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7364 	struct wm_rxsoft *rxs;
   7365 	struct mbuf *m;
   7366 	int i, len;
   7367 	int count = 0;
   7368 	uint8_t status, errors;
   7369 	uint16_t vlantag;
   7370 
   7371 	KASSERT(mutex_owned(rxq->rxq_lock));
   7372 
   7373 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7374 		rxs = &rxq->rxq_soft[i];
   7375 
   7376 		DPRINTF(WM_DEBUG_RX,
   7377 		    ("%s: RX: checking descriptor %d\n",
   7378 		    device_xname(sc->sc_dev), i));
   7379 
   7380 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7381 
   7382 		status = rxq->rxq_descs[i].wrx_status;
   7383 		errors = rxq->rxq_descs[i].wrx_errors;
   7384 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7385 		vlantag = rxq->rxq_descs[i].wrx_special;
   7386 
   7387 		if ((status & WRX_ST_DD) == 0) {
   7388 			/* We have processed all of the receive descriptors. */
   7389 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7390 			break;
   7391 		}
   7392 
   7393 		count++;
   7394 		if (__predict_false(rxq->rxq_discard)) {
   7395 			DPRINTF(WM_DEBUG_RX,
   7396 			    ("%s: RX: discarding contents of descriptor %d\n",
   7397 			    device_xname(sc->sc_dev), i));
   7398 			wm_init_rxdesc(rxq, i);
   7399 			if (status & WRX_ST_EOP) {
   7400 				/* Reset our state. */
   7401 				DPRINTF(WM_DEBUG_RX,
   7402 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7403 				    device_xname(sc->sc_dev)));
   7404 				rxq->rxq_discard = 0;
   7405 			}
   7406 			continue;
   7407 		}
   7408 
   7409 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7410 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7411 
   7412 		m = rxs->rxs_mbuf;
   7413 
   7414 		/*
   7415 		 * Add a new receive buffer to the ring, unless of
   7416 		 * course the length is zero. Treat the latter as a
   7417 		 * failed mapping.
   7418 		 */
   7419 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7420 			/*
   7421 			 * Failed, throw away what we've done so
   7422 			 * far, and discard the rest of the packet.
   7423 			 */
   7424 			ifp->if_ierrors++;
   7425 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7426 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7427 			wm_init_rxdesc(rxq, i);
   7428 			if ((status & WRX_ST_EOP) == 0)
   7429 				rxq->rxq_discard = 1;
   7430 			if (rxq->rxq_head != NULL)
   7431 				m_freem(rxq->rxq_head);
   7432 			WM_RXCHAIN_RESET(rxq);
   7433 			DPRINTF(WM_DEBUG_RX,
   7434 			    ("%s: RX: Rx buffer allocation failed, "
   7435 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7436 			    rxq->rxq_discard ? " (discard)" : ""));
   7437 			continue;
   7438 		}
   7439 
   7440 		m->m_len = len;
   7441 		rxq->rxq_len += len;
   7442 		DPRINTF(WM_DEBUG_RX,
   7443 		    ("%s: RX: buffer at %p len %d\n",
   7444 		    device_xname(sc->sc_dev), m->m_data, len));
   7445 
   7446 		/* If this is not the end of the packet, keep looking. */
   7447 		if ((status & WRX_ST_EOP) == 0) {
   7448 			WM_RXCHAIN_LINK(rxq, m);
   7449 			DPRINTF(WM_DEBUG_RX,
   7450 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7451 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7452 			continue;
   7453 		}
   7454 
   7455 		/*
   7456 		 * Okay, we have the entire packet now.  The chip is
   7457 		 * configured to include the FCS except I350 and I21[01]
   7458 		 * (not all chips can be configured to strip it),
   7459 		 * so we need to trim it.
   7460 		 * May need to adjust length of previous mbuf in the
   7461 		 * chain if the current mbuf is too short.
   7462 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7463 		 * is always set in I350, so we don't trim it.
   7464 		 */
   7465 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7466 		    && (sc->sc_type != WM_T_I210)
   7467 		    && (sc->sc_type != WM_T_I211)) {
   7468 			if (m->m_len < ETHER_CRC_LEN) {
   7469 				rxq->rxq_tail->m_len
   7470 				    -= (ETHER_CRC_LEN - m->m_len);
   7471 				m->m_len = 0;
   7472 			} else
   7473 				m->m_len -= ETHER_CRC_LEN;
   7474 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7475 		} else
   7476 			len = rxq->rxq_len;
   7477 
   7478 		WM_RXCHAIN_LINK(rxq, m);
   7479 
   7480 		*rxq->rxq_tailp = NULL;
   7481 		m = rxq->rxq_head;
   7482 
   7483 		WM_RXCHAIN_RESET(rxq);
   7484 
   7485 		DPRINTF(WM_DEBUG_RX,
   7486 		    ("%s: RX: have entire packet, len -> %d\n",
   7487 		    device_xname(sc->sc_dev), len));
   7488 
   7489 		/* If an error occurred, update stats and drop the packet. */
   7490 		if (errors &
   7491 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7492 			if (errors & WRX_ER_SE)
   7493 				log(LOG_WARNING, "%s: symbol error\n",
   7494 				    device_xname(sc->sc_dev));
   7495 			else if (errors & WRX_ER_SEQ)
   7496 				log(LOG_WARNING, "%s: receive sequence error\n",
   7497 				    device_xname(sc->sc_dev));
   7498 			else if (errors & WRX_ER_CE)
   7499 				log(LOG_WARNING, "%s: CRC error\n",
   7500 				    device_xname(sc->sc_dev));
   7501 			m_freem(m);
   7502 			continue;
   7503 		}
   7504 
   7505 		/* No errors.  Receive the packet. */
   7506 		m_set_rcvif(m, ifp);
   7507 		m->m_pkthdr.len = len;
   7508 
   7509 		/*
   7510 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7511 		 * for us.  Associate the tag with the packet.
   7512 		 */
   7513 		/* XXXX should check for i350 and i354 */
   7514 		if ((status & WRX_ST_VP) != 0) {
   7515 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7516 		}
   7517 
   7518 		/* Set up checksum info for this packet. */
   7519 		if ((status & WRX_ST_IXSM) == 0) {
   7520 			if (status & WRX_ST_IPCS) {
   7521 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7522 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7523 				if (errors & WRX_ER_IPE)
   7524 					m->m_pkthdr.csum_flags |=
   7525 					    M_CSUM_IPv4_BAD;
   7526 			}
   7527 			if (status & WRX_ST_TCPCS) {
   7528 				/*
   7529 				 * Note: we don't know if this was TCP or UDP,
   7530 				 * so we just set both bits, and expect the
   7531 				 * upper layers to deal.
   7532 				 */
   7533 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7534 				m->m_pkthdr.csum_flags |=
   7535 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7536 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7537 				if (errors & WRX_ER_TCPE)
   7538 					m->m_pkthdr.csum_flags |=
   7539 					    M_CSUM_TCP_UDP_BAD;
   7540 			}
   7541 		}
   7542 
   7543 		ifp->if_ipackets++;
   7544 
   7545 		mutex_exit(rxq->rxq_lock);
   7546 
   7547 		/* Pass this up to any BPF listeners. */
   7548 		bpf_mtap(ifp, m);
   7549 
   7550 		/* Pass it on. */
   7551 		if_percpuq_enqueue(sc->sc_ipq, m);
   7552 
   7553 		mutex_enter(rxq->rxq_lock);
   7554 
   7555 		if (rxq->rxq_stopping)
   7556 			break;
   7557 	}
   7558 
   7559 	/* Update the receive pointer. */
   7560 	rxq->rxq_ptr = i;
   7561 	if (count != 0)
   7562 		rnd_add_uint32(&sc->rnd_source, count);
   7563 
   7564 	DPRINTF(WM_DEBUG_RX,
   7565 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7566 }
   7567 
   7568 /*
   7569  * wm_linkintr_gmii:
   7570  *
   7571  *	Helper; handle link interrupts for GMII.
   7572  */
   7573 static void
   7574 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7575 {
   7576 
   7577 	KASSERT(WM_CORE_LOCKED(sc));
   7578 
   7579 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7580 		__func__));
   7581 
   7582 	if (icr & ICR_LSC) {
   7583 		uint32_t reg;
   7584 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7585 
   7586 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7587 			wm_gig_downshift_workaround_ich8lan(sc);
   7588 
   7589 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7590 			device_xname(sc->sc_dev)));
   7591 		mii_pollstat(&sc->sc_mii);
   7592 		if (sc->sc_type == WM_T_82543) {
   7593 			int miistatus, active;
   7594 
   7595 			/*
   7596 			 * With 82543, we need to force speed and
   7597 			 * duplex on the MAC equal to what the PHY
   7598 			 * speed and duplex configuration is.
   7599 			 */
   7600 			miistatus = sc->sc_mii.mii_media_status;
   7601 
   7602 			if (miistatus & IFM_ACTIVE) {
   7603 				active = sc->sc_mii.mii_media_active;
   7604 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7605 				switch (IFM_SUBTYPE(active)) {
   7606 				case IFM_10_T:
   7607 					sc->sc_ctrl |= CTRL_SPEED_10;
   7608 					break;
   7609 				case IFM_100_TX:
   7610 					sc->sc_ctrl |= CTRL_SPEED_100;
   7611 					break;
   7612 				case IFM_1000_T:
   7613 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7614 					break;
   7615 				default:
   7616 					/*
   7617 					 * fiber?
   7618 					 * Shoud not enter here.
   7619 					 */
   7620 					printf("unknown media (%x)\n", active);
   7621 					break;
   7622 				}
   7623 				if (active & IFM_FDX)
   7624 					sc->sc_ctrl |= CTRL_FD;
   7625 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7626 			}
   7627 		} else if ((sc->sc_type == WM_T_ICH8)
   7628 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7629 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7630 		} else if (sc->sc_type == WM_T_PCH) {
   7631 			wm_k1_gig_workaround_hv(sc,
   7632 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7633 		}
   7634 
   7635 		if ((sc->sc_phytype == WMPHY_82578)
   7636 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7637 			== IFM_1000_T)) {
   7638 
   7639 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7640 				delay(200*1000); /* XXX too big */
   7641 
   7642 				/* Link stall fix for link up */
   7643 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7644 				    HV_MUX_DATA_CTRL,
   7645 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7646 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7647 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7648 				    HV_MUX_DATA_CTRL,
   7649 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7650 			}
   7651 		}
   7652 		/*
   7653 		 * I217 Packet Loss issue:
   7654 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   7655 		 * on power up.
   7656 		 * Set the Beacon Duration for I217 to 8 usec
   7657 		 */
   7658 		if ((sc->sc_type == WM_T_PCH_LPT)
   7659 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7660 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   7661 			reg &= ~FEXTNVM4_BEACON_DURATION;
   7662 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   7663 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   7664 		}
   7665 
   7666 		/* XXX Work-around I218 hang issue */
   7667 		/* e1000_k1_workaround_lpt_lp() */
   7668 
   7669 		if ((sc->sc_type == WM_T_PCH_LPT)
   7670 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7671 			/*
   7672 			 * Set platform power management values for Latency
   7673 			 * Tolerance Reporting (LTR)
   7674 			 */
   7675 			wm_platform_pm_pch_lpt(sc,
   7676 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   7677 				    != 0));
   7678 		}
   7679 
   7680 		/* FEXTNVM6 K1-off workaround */
   7681 		if (sc->sc_type == WM_T_PCH_SPT) {
   7682 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   7683 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   7684 			    & FEXTNVM6_K1_OFF_ENABLE)
   7685 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   7686 			else
   7687 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   7688 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   7689 		}
   7690 	} else if (icr & ICR_RXSEQ) {
   7691 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7692 			device_xname(sc->sc_dev)));
   7693 	}
   7694 }
   7695 
   7696 /*
   7697  * wm_linkintr_tbi:
   7698  *
   7699  *	Helper; handle link interrupts for TBI mode.
   7700  */
   7701 static void
   7702 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7703 {
   7704 	uint32_t status;
   7705 
   7706 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7707 		__func__));
   7708 
   7709 	status = CSR_READ(sc, WMREG_STATUS);
   7710 	if (icr & ICR_LSC) {
   7711 		if (status & STATUS_LU) {
   7712 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7713 			    device_xname(sc->sc_dev),
   7714 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7715 			/*
   7716 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7717 			 * so we should update sc->sc_ctrl
   7718 			 */
   7719 
   7720 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7721 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7722 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7723 			if (status & STATUS_FD)
   7724 				sc->sc_tctl |=
   7725 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7726 			else
   7727 				sc->sc_tctl |=
   7728 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7729 			if (sc->sc_ctrl & CTRL_TFCE)
   7730 				sc->sc_fcrtl |= FCRTL_XONE;
   7731 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7732 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7733 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7734 				      sc->sc_fcrtl);
   7735 			sc->sc_tbi_linkup = 1;
   7736 		} else {
   7737 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7738 			    device_xname(sc->sc_dev)));
   7739 			sc->sc_tbi_linkup = 0;
   7740 		}
   7741 		/* Update LED */
   7742 		wm_tbi_serdes_set_linkled(sc);
   7743 	} else if (icr & ICR_RXSEQ) {
   7744 		DPRINTF(WM_DEBUG_LINK,
   7745 		    ("%s: LINK: Receive sequence error\n",
   7746 		    device_xname(sc->sc_dev)));
   7747 	}
   7748 }
   7749 
   7750 /*
   7751  * wm_linkintr_serdes:
   7752  *
   7753  *	Helper; handle link interrupts for TBI mode.
   7754  */
   7755 static void
   7756 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7757 {
   7758 	struct mii_data *mii = &sc->sc_mii;
   7759 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7760 	uint32_t pcs_adv, pcs_lpab, reg;
   7761 
   7762 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7763 		__func__));
   7764 
   7765 	if (icr & ICR_LSC) {
   7766 		/* Check PCS */
   7767 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7768 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7769 			mii->mii_media_status |= IFM_ACTIVE;
   7770 			sc->sc_tbi_linkup = 1;
   7771 		} else {
   7772 			mii->mii_media_status |= IFM_NONE;
   7773 			sc->sc_tbi_linkup = 0;
   7774 			wm_tbi_serdes_set_linkled(sc);
   7775 			return;
   7776 		}
   7777 		mii->mii_media_active |= IFM_1000_SX;
   7778 		if ((reg & PCS_LSTS_FDX) != 0)
   7779 			mii->mii_media_active |= IFM_FDX;
   7780 		else
   7781 			mii->mii_media_active |= IFM_HDX;
   7782 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7783 			/* Check flow */
   7784 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7785 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7786 				DPRINTF(WM_DEBUG_LINK,
   7787 				    ("XXX LINKOK but not ACOMP\n"));
   7788 				return;
   7789 			}
   7790 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7791 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7792 			DPRINTF(WM_DEBUG_LINK,
   7793 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7794 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7795 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7796 				mii->mii_media_active |= IFM_FLOW
   7797 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7798 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7799 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7800 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7801 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7802 				mii->mii_media_active |= IFM_FLOW
   7803 				    | IFM_ETH_TXPAUSE;
   7804 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7805 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7806 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7807 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7808 				mii->mii_media_active |= IFM_FLOW
   7809 				    | IFM_ETH_RXPAUSE;
   7810 		}
   7811 		/* Update LED */
   7812 		wm_tbi_serdes_set_linkled(sc);
   7813 	} else {
   7814 		DPRINTF(WM_DEBUG_LINK,
   7815 		    ("%s: LINK: Receive sequence error\n",
   7816 		    device_xname(sc->sc_dev)));
   7817 	}
   7818 }
   7819 
   7820 /*
   7821  * wm_linkintr:
   7822  *
   7823  *	Helper; handle link interrupts.
   7824  */
   7825 static void
   7826 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7827 {
   7828 
   7829 	KASSERT(WM_CORE_LOCKED(sc));
   7830 
   7831 	if (sc->sc_flags & WM_F_HAS_MII)
   7832 		wm_linkintr_gmii(sc, icr);
   7833 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7834 	    && (sc->sc_type >= WM_T_82575))
   7835 		wm_linkintr_serdes(sc, icr);
   7836 	else
   7837 		wm_linkintr_tbi(sc, icr);
   7838 }
   7839 
   7840 /*
   7841  * wm_intr_legacy:
   7842  *
   7843  *	Interrupt service routine for INTx and MSI.
   7844  */
   7845 static int
   7846 wm_intr_legacy(void *arg)
   7847 {
   7848 	struct wm_softc *sc = arg;
   7849 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7850 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7851 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7852 	uint32_t icr, rndval = 0;
   7853 	int handled = 0;
   7854 
   7855 	DPRINTF(WM_DEBUG_TX,
   7856 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7857 	while (1 /* CONSTCOND */) {
   7858 		icr = CSR_READ(sc, WMREG_ICR);
   7859 		if ((icr & sc->sc_icr) == 0)
   7860 			break;
   7861 		if (rndval == 0)
   7862 			rndval = icr;
   7863 
   7864 		mutex_enter(rxq->rxq_lock);
   7865 
   7866 		if (rxq->rxq_stopping) {
   7867 			mutex_exit(rxq->rxq_lock);
   7868 			break;
   7869 		}
   7870 
   7871 		handled = 1;
   7872 
   7873 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7874 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7875 			DPRINTF(WM_DEBUG_RX,
   7876 			    ("%s: RX: got Rx intr 0x%08x\n",
   7877 			    device_xname(sc->sc_dev),
   7878 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7879 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7880 		}
   7881 #endif
   7882 		wm_rxeof(rxq);
   7883 
   7884 		mutex_exit(rxq->rxq_lock);
   7885 		mutex_enter(txq->txq_lock);
   7886 
   7887 		if (txq->txq_stopping) {
   7888 			mutex_exit(txq->txq_lock);
   7889 			break;
   7890 		}
   7891 
   7892 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7893 		if (icr & ICR_TXDW) {
   7894 			DPRINTF(WM_DEBUG_TX,
   7895 			    ("%s: TX: got TXDW interrupt\n",
   7896 			    device_xname(sc->sc_dev)));
   7897 			WM_Q_EVCNT_INCR(txq, txdw);
   7898 		}
   7899 #endif
   7900 		wm_txeof(sc, txq);
   7901 
   7902 		mutex_exit(txq->txq_lock);
   7903 		WM_CORE_LOCK(sc);
   7904 
   7905 		if (sc->sc_core_stopping) {
   7906 			WM_CORE_UNLOCK(sc);
   7907 			break;
   7908 		}
   7909 
   7910 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7911 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7912 			wm_linkintr(sc, icr);
   7913 		}
   7914 
   7915 		WM_CORE_UNLOCK(sc);
   7916 
   7917 		if (icr & ICR_RXO) {
   7918 #if defined(WM_DEBUG)
   7919 			log(LOG_WARNING, "%s: Receive overrun\n",
   7920 			    device_xname(sc->sc_dev));
   7921 #endif /* defined(WM_DEBUG) */
   7922 		}
   7923 	}
   7924 
   7925 	rnd_add_uint32(&sc->rnd_source, rndval);
   7926 
   7927 	if (handled) {
   7928 		/* Try to get more packets going. */
   7929 		ifp->if_start(ifp);
   7930 	}
   7931 
   7932 	return handled;
   7933 }
   7934 
   7935 static int
   7936 wm_txrxintr_msix(void *arg)
   7937 {
   7938 	struct wm_queue *wmq = arg;
   7939 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7940 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7941 	struct wm_softc *sc = txq->txq_sc;
   7942 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7943 
   7944 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7945 
   7946 	DPRINTF(WM_DEBUG_TX,
   7947 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7948 
   7949 	if (sc->sc_type == WM_T_82574)
   7950 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7951 	else if (sc->sc_type == WM_T_82575)
   7952 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7953 	else
   7954 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7955 
   7956 	mutex_enter(txq->txq_lock);
   7957 
   7958 	if (txq->txq_stopping) {
   7959 		mutex_exit(txq->txq_lock);
   7960 		return 0;
   7961 	}
   7962 
   7963 	WM_Q_EVCNT_INCR(txq, txdw);
   7964 	wm_txeof(sc, txq);
   7965 
   7966 	/* Try to get more packets going. */
   7967 	if (pcq_peek(txq->txq_interq) != NULL)
   7968 		wm_nq_transmit_locked(ifp, txq);
   7969 	/*
   7970 	 * There are still some upper layer processing which call
   7971 	 * ifp->if_start(). e.g. ALTQ
   7972 	 */
   7973 	if (wmq->wmq_id == 0) {
   7974 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7975 			wm_nq_start_locked(ifp);
   7976 	}
   7977 
   7978 	mutex_exit(txq->txq_lock);
   7979 
   7980 	DPRINTF(WM_DEBUG_RX,
   7981 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7982 	mutex_enter(rxq->rxq_lock);
   7983 
   7984 	if (rxq->rxq_stopping) {
   7985 		mutex_exit(rxq->rxq_lock);
   7986 		return 0;
   7987 	}
   7988 
   7989 	WM_Q_EVCNT_INCR(rxq, rxintr);
   7990 	wm_rxeof(rxq);
   7991 	mutex_exit(rxq->rxq_lock);
   7992 
   7993 	if (sc->sc_type == WM_T_82574)
   7994 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7995 	else if (sc->sc_type == WM_T_82575)
   7996 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7997 	else
   7998 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7999 
   8000 	return 1;
   8001 }
   8002 
   8003 /*
   8004  * wm_linkintr_msix:
   8005  *
   8006  *	Interrupt service routine for link status change for MSI-X.
   8007  */
   8008 static int
   8009 wm_linkintr_msix(void *arg)
   8010 {
   8011 	struct wm_softc *sc = arg;
   8012 	uint32_t reg;
   8013 
   8014 	DPRINTF(WM_DEBUG_LINK,
   8015 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8016 
   8017 	reg = CSR_READ(sc, WMREG_ICR);
   8018 	WM_CORE_LOCK(sc);
   8019 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8020 		goto out;
   8021 
   8022 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8023 	wm_linkintr(sc, ICR_LSC);
   8024 
   8025 out:
   8026 	WM_CORE_UNLOCK(sc);
   8027 
   8028 	if (sc->sc_type == WM_T_82574)
   8029 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8030 	else if (sc->sc_type == WM_T_82575)
   8031 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8032 	else
   8033 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8034 
   8035 	return 1;
   8036 }
   8037 
   8038 /*
   8039  * Media related.
   8040  * GMII, SGMII, TBI (and SERDES)
   8041  */
   8042 
   8043 /* Common */
   8044 
   8045 /*
   8046  * wm_tbi_serdes_set_linkled:
   8047  *
   8048  *	Update the link LED on TBI and SERDES devices.
   8049  */
   8050 static void
   8051 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8052 {
   8053 
   8054 	if (sc->sc_tbi_linkup)
   8055 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8056 	else
   8057 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8058 
   8059 	/* 82540 or newer devices are active low */
   8060 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8061 
   8062 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8063 }
   8064 
   8065 /* GMII related */
   8066 
   8067 /*
   8068  * wm_gmii_reset:
   8069  *
   8070  *	Reset the PHY.
   8071  */
   8072 static void
   8073 wm_gmii_reset(struct wm_softc *sc)
   8074 {
   8075 	uint32_t reg;
   8076 	int rv;
   8077 
   8078 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8079 		device_xname(sc->sc_dev), __func__));
   8080 
   8081 	rv = sc->phy.acquire(sc);
   8082 	if (rv != 0) {
   8083 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8084 		    __func__);
   8085 		return;
   8086 	}
   8087 
   8088 	switch (sc->sc_type) {
   8089 	case WM_T_82542_2_0:
   8090 	case WM_T_82542_2_1:
   8091 		/* null */
   8092 		break;
   8093 	case WM_T_82543:
   8094 		/*
   8095 		 * With 82543, we need to force speed and duplex on the MAC
   8096 		 * equal to what the PHY speed and duplex configuration is.
   8097 		 * In addition, we need to perform a hardware reset on the PHY
   8098 		 * to take it out of reset.
   8099 		 */
   8100 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8101 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8102 
   8103 		/* The PHY reset pin is active-low. */
   8104 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8105 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8106 		    CTRL_EXT_SWDPIN(4));
   8107 		reg |= CTRL_EXT_SWDPIO(4);
   8108 
   8109 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8110 		CSR_WRITE_FLUSH(sc);
   8111 		delay(10*1000);
   8112 
   8113 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8114 		CSR_WRITE_FLUSH(sc);
   8115 		delay(150);
   8116 #if 0
   8117 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8118 #endif
   8119 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8120 		break;
   8121 	case WM_T_82544:	/* reset 10000us */
   8122 	case WM_T_82540:
   8123 	case WM_T_82545:
   8124 	case WM_T_82545_3:
   8125 	case WM_T_82546:
   8126 	case WM_T_82546_3:
   8127 	case WM_T_82541:
   8128 	case WM_T_82541_2:
   8129 	case WM_T_82547:
   8130 	case WM_T_82547_2:
   8131 	case WM_T_82571:	/* reset 100us */
   8132 	case WM_T_82572:
   8133 	case WM_T_82573:
   8134 	case WM_T_82574:
   8135 	case WM_T_82575:
   8136 	case WM_T_82576:
   8137 	case WM_T_82580:
   8138 	case WM_T_I350:
   8139 	case WM_T_I354:
   8140 	case WM_T_I210:
   8141 	case WM_T_I211:
   8142 	case WM_T_82583:
   8143 	case WM_T_80003:
   8144 		/* generic reset */
   8145 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8146 		CSR_WRITE_FLUSH(sc);
   8147 		delay(20000);
   8148 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8149 		CSR_WRITE_FLUSH(sc);
   8150 		delay(20000);
   8151 
   8152 		if ((sc->sc_type == WM_T_82541)
   8153 		    || (sc->sc_type == WM_T_82541_2)
   8154 		    || (sc->sc_type == WM_T_82547)
   8155 		    || (sc->sc_type == WM_T_82547_2)) {
   8156 			/* workaround for igp are done in igp_reset() */
   8157 			/* XXX add code to set LED after phy reset */
   8158 		}
   8159 		break;
   8160 	case WM_T_ICH8:
   8161 	case WM_T_ICH9:
   8162 	case WM_T_ICH10:
   8163 	case WM_T_PCH:
   8164 	case WM_T_PCH2:
   8165 	case WM_T_PCH_LPT:
   8166 	case WM_T_PCH_SPT:
   8167 		/* generic reset */
   8168 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8169 		CSR_WRITE_FLUSH(sc);
   8170 		delay(100);
   8171 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8172 		CSR_WRITE_FLUSH(sc);
   8173 		delay(150);
   8174 		break;
   8175 	default:
   8176 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8177 		    __func__);
   8178 		break;
   8179 	}
   8180 
   8181 	sc->phy.release(sc);
   8182 
   8183 	/* get_cfg_done */
   8184 	wm_get_cfg_done(sc);
   8185 
   8186 	/* extra setup */
   8187 	switch (sc->sc_type) {
   8188 	case WM_T_82542_2_0:
   8189 	case WM_T_82542_2_1:
   8190 	case WM_T_82543:
   8191 	case WM_T_82544:
   8192 	case WM_T_82540:
   8193 	case WM_T_82545:
   8194 	case WM_T_82545_3:
   8195 	case WM_T_82546:
   8196 	case WM_T_82546_3:
   8197 	case WM_T_82541_2:
   8198 	case WM_T_82547_2:
   8199 	case WM_T_82571:
   8200 	case WM_T_82572:
   8201 	case WM_T_82573:
   8202 	case WM_T_82575:
   8203 	case WM_T_82576:
   8204 	case WM_T_82580:
   8205 	case WM_T_I350:
   8206 	case WM_T_I354:
   8207 	case WM_T_I210:
   8208 	case WM_T_I211:
   8209 	case WM_T_80003:
   8210 		/* null */
   8211 		break;
   8212 	case WM_T_82574:
   8213 	case WM_T_82583:
   8214 		wm_lplu_d0_disable(sc);
   8215 		break;
   8216 	case WM_T_82541:
   8217 	case WM_T_82547:
   8218 		/* XXX Configure actively LED after PHY reset */
   8219 		break;
   8220 	case WM_T_ICH8:
   8221 	case WM_T_ICH9:
   8222 	case WM_T_ICH10:
   8223 	case WM_T_PCH:
   8224 	case WM_T_PCH2:
   8225 	case WM_T_PCH_LPT:
   8226 	case WM_T_PCH_SPT:
   8227 		/* Allow time for h/w to get to a quiescent state afer reset */
   8228 		delay(10*1000);
   8229 
   8230 		if (sc->sc_type == WM_T_PCH)
   8231 			wm_hv_phy_workaround_ich8lan(sc);
   8232 
   8233 		if (sc->sc_type == WM_T_PCH2)
   8234 			wm_lv_phy_workaround_ich8lan(sc);
   8235 
   8236 		/* Clear the host wakeup bit after lcd reset */
   8237 		if (sc->sc_type >= WM_T_PCH) {
   8238 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8239 			    BM_PORT_GEN_CFG);
   8240 			reg &= ~BM_WUC_HOST_WU_BIT;
   8241 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8242 			    BM_PORT_GEN_CFG, reg);
   8243 		}
   8244 
   8245 		/*
   8246 		 * XXX Configure the LCD with th extended configuration region
   8247 		 * in NVM
   8248 		 */
   8249 
   8250 		/* Disable D0 LPLU. */
   8251 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8252 			wm_lplu_d0_disable_pch(sc);
   8253 		else
   8254 			wm_lplu_d0_disable(sc);	/* ICH* */
   8255 		break;
   8256 	default:
   8257 		panic("%s: unknown type\n", __func__);
   8258 		break;
   8259 	}
   8260 }
   8261 
   8262 /*
   8263  * wm_get_phy_id_82575:
   8264  *
   8265  * Return PHY ID. Return -1 if it failed.
   8266  */
   8267 static int
   8268 wm_get_phy_id_82575(struct wm_softc *sc)
   8269 {
   8270 	uint32_t reg;
   8271 	int phyid = -1;
   8272 
   8273 	/* XXX */
   8274 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8275 		return -1;
   8276 
   8277 	if (wm_sgmii_uses_mdio(sc)) {
   8278 		switch (sc->sc_type) {
   8279 		case WM_T_82575:
   8280 		case WM_T_82576:
   8281 			reg = CSR_READ(sc, WMREG_MDIC);
   8282 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8283 			break;
   8284 		case WM_T_82580:
   8285 		case WM_T_I350:
   8286 		case WM_T_I354:
   8287 		case WM_T_I210:
   8288 		case WM_T_I211:
   8289 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8290 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8291 			break;
   8292 		default:
   8293 			return -1;
   8294 		}
   8295 	}
   8296 
   8297 	return phyid;
   8298 }
   8299 
   8300 
   8301 /*
   8302  * wm_gmii_mediainit:
   8303  *
   8304  *	Initialize media for use on 1000BASE-T devices.
   8305  */
   8306 static void
   8307 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8308 {
   8309 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8310 	struct mii_data *mii = &sc->sc_mii;
   8311 	uint32_t reg;
   8312 
   8313 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8314 		device_xname(sc->sc_dev), __func__));
   8315 
   8316 	/* We have GMII. */
   8317 	sc->sc_flags |= WM_F_HAS_MII;
   8318 
   8319 	if (sc->sc_type == WM_T_80003)
   8320 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8321 	else
   8322 		sc->sc_tipg = TIPG_1000T_DFLT;
   8323 
   8324 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8325 	if ((sc->sc_type == WM_T_82580)
   8326 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8327 	    || (sc->sc_type == WM_T_I211)) {
   8328 		reg = CSR_READ(sc, WMREG_PHPM);
   8329 		reg &= ~PHPM_GO_LINK_D;
   8330 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8331 	}
   8332 
   8333 	/*
   8334 	 * Let the chip set speed/duplex on its own based on
   8335 	 * signals from the PHY.
   8336 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8337 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8338 	 */
   8339 	sc->sc_ctrl |= CTRL_SLU;
   8340 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8341 
   8342 	/* Initialize our media structures and probe the GMII. */
   8343 	mii->mii_ifp = ifp;
   8344 
   8345 	/*
   8346 	 * Determine the PHY access method.
   8347 	 *
   8348 	 *  For SGMII, use SGMII specific method.
   8349 	 *
   8350 	 *  For some devices, we can determine the PHY access method
   8351 	 * from sc_type.
   8352 	 *
   8353 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8354 	 * access  method by sc_type, so use the PCI product ID for some
   8355 	 * devices.
   8356 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8357 	 * can't detect, then use bm's method.
   8358 	 */
   8359 	switch (prodid) {
   8360 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8361 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8362 		/* 82577 */
   8363 		sc->sc_phytype = WMPHY_82577;
   8364 		break;
   8365 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8366 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8367 		/* 82578 */
   8368 		sc->sc_phytype = WMPHY_82578;
   8369 		break;
   8370 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8371 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8372 		/* 82579 */
   8373 		sc->sc_phytype = WMPHY_82579;
   8374 		break;
   8375 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8376 	case PCI_PRODUCT_INTEL_82801I_BM:
   8377 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8378 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8379 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8380 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8381 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8382 		/* ICH8, 9, 10 with 82567 */
   8383 		sc->sc_phytype = WMPHY_BM;
   8384 		mii->mii_readreg = wm_gmii_bm_readreg;
   8385 		mii->mii_writereg = wm_gmii_bm_writereg;
   8386 		break;
   8387 	default:
   8388 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8389 		    && !wm_sgmii_uses_mdio(sc)){
   8390 			/* SGMII */
   8391 			mii->mii_readreg = wm_sgmii_readreg;
   8392 			mii->mii_writereg = wm_sgmii_writereg;
   8393 		} else if ((sc->sc_type == WM_T_82574)
   8394 		    || (sc->sc_type == WM_T_82583)) {
   8395 			/* BM2 (phyaddr == 1) */
   8396 			sc->sc_phytype = WMPHY_BM;
   8397 			mii->mii_readreg = wm_gmii_bm_readreg;
   8398 			mii->mii_writereg = wm_gmii_bm_writereg;
   8399 		} else if (sc->sc_type >= WM_T_ICH8) {
   8400 			/* non-82567 ICH8, 9 and 10 */
   8401 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8402 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8403 		} else if (sc->sc_type >= WM_T_80003) {
   8404 			/* 80003 */
   8405 			sc->sc_phytype = WMPHY_GG82563;
   8406 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8407 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8408 		} else if (sc->sc_type >= WM_T_I210) {
   8409 			/* I210 and I211 */
   8410 			sc->sc_phytype = WMPHY_210;
   8411 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8412 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8413 		} else if (sc->sc_type >= WM_T_82580) {
   8414 			/* 82580, I350 and I354 */
   8415 			sc->sc_phytype = WMPHY_82580;
   8416 			mii->mii_readreg = wm_gmii_82580_readreg;
   8417 			mii->mii_writereg = wm_gmii_82580_writereg;
   8418 		} else if (sc->sc_type >= WM_T_82544) {
   8419 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8420 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8421 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8422 		} else {
   8423 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8424 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8425 		}
   8426 		break;
   8427 	}
   8428 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8429 		/* All PCH* use _hv_ */
   8430 		mii->mii_readreg = wm_gmii_hv_readreg;
   8431 		mii->mii_writereg = wm_gmii_hv_writereg;
   8432 	}
   8433 	mii->mii_statchg = wm_gmii_statchg;
   8434 
   8435 	/* get PHY control from SMBus to PCIe */
   8436 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   8437 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   8438 		wm_smbustopci(sc);
   8439 
   8440 	wm_gmii_reset(sc);
   8441 
   8442 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8443 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8444 	    wm_gmii_mediastatus);
   8445 
   8446 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8447 	    || (sc->sc_type == WM_T_82580)
   8448 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8449 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8450 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8451 			/* Attach only one port */
   8452 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8453 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8454 		} else {
   8455 			int i, id;
   8456 			uint32_t ctrl_ext;
   8457 
   8458 			id = wm_get_phy_id_82575(sc);
   8459 			if (id != -1) {
   8460 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8461 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8462 			}
   8463 			if ((id == -1)
   8464 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8465 				/* Power on sgmii phy if it is disabled */
   8466 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8467 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8468 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8469 				CSR_WRITE_FLUSH(sc);
   8470 				delay(300*1000); /* XXX too long */
   8471 
   8472 				/* from 1 to 8 */
   8473 				for (i = 1; i < 8; i++)
   8474 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8475 					    0xffffffff, i, MII_OFFSET_ANY,
   8476 					    MIIF_DOPAUSE);
   8477 
   8478 				/* restore previous sfp cage power state */
   8479 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8480 			}
   8481 		}
   8482 	} else {
   8483 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8484 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8485 	}
   8486 
   8487 	/*
   8488 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8489 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8490 	 */
   8491 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8492 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8493 		wm_set_mdio_slow_mode_hv(sc);
   8494 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8495 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8496 	}
   8497 
   8498 	/*
   8499 	 * (For ICH8 variants)
   8500 	 * If PHY detection failed, use BM's r/w function and retry.
   8501 	 */
   8502 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8503 		/* if failed, retry with *_bm_* */
   8504 		mii->mii_readreg = wm_gmii_bm_readreg;
   8505 		mii->mii_writereg = wm_gmii_bm_writereg;
   8506 
   8507 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8508 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8509 	}
   8510 
   8511 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8512 		/* Any PHY wasn't find */
   8513 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8514 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8515 		sc->sc_phytype = WMPHY_NONE;
   8516 	} else {
   8517 		/*
   8518 		 * PHY Found!
   8519 		 * Check PHY type.
   8520 		 */
   8521 		uint32_t model;
   8522 		struct mii_softc *child;
   8523 
   8524 		child = LIST_FIRST(&mii->mii_phys);
   8525 		model = child->mii_mpd_model;
   8526 		if (model == MII_MODEL_yyINTEL_I82566)
   8527 			sc->sc_phytype = WMPHY_IGP_3;
   8528 
   8529 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8530 	}
   8531 }
   8532 
   8533 /*
   8534  * wm_gmii_mediachange:	[ifmedia interface function]
   8535  *
   8536  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8537  */
   8538 static int
   8539 wm_gmii_mediachange(struct ifnet *ifp)
   8540 {
   8541 	struct wm_softc *sc = ifp->if_softc;
   8542 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8543 	int rc;
   8544 
   8545 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8546 		device_xname(sc->sc_dev), __func__));
   8547 	if ((ifp->if_flags & IFF_UP) == 0)
   8548 		return 0;
   8549 
   8550 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8551 	sc->sc_ctrl |= CTRL_SLU;
   8552 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8553 	    || (sc->sc_type > WM_T_82543)) {
   8554 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8555 	} else {
   8556 		sc->sc_ctrl &= ~CTRL_ASDE;
   8557 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8558 		if (ife->ifm_media & IFM_FDX)
   8559 			sc->sc_ctrl |= CTRL_FD;
   8560 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8561 		case IFM_10_T:
   8562 			sc->sc_ctrl |= CTRL_SPEED_10;
   8563 			break;
   8564 		case IFM_100_TX:
   8565 			sc->sc_ctrl |= CTRL_SPEED_100;
   8566 			break;
   8567 		case IFM_1000_T:
   8568 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8569 			break;
   8570 		default:
   8571 			panic("wm_gmii_mediachange: bad media 0x%x",
   8572 			    ife->ifm_media);
   8573 		}
   8574 	}
   8575 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8576 	if (sc->sc_type <= WM_T_82543)
   8577 		wm_gmii_reset(sc);
   8578 
   8579 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8580 		return 0;
   8581 	return rc;
   8582 }
   8583 
   8584 /*
   8585  * wm_gmii_mediastatus:	[ifmedia interface function]
   8586  *
   8587  *	Get the current interface media status on a 1000BASE-T device.
   8588  */
   8589 static void
   8590 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8591 {
   8592 	struct wm_softc *sc = ifp->if_softc;
   8593 
   8594 	ether_mediastatus(ifp, ifmr);
   8595 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8596 	    | sc->sc_flowflags;
   8597 }
   8598 
   8599 #define	MDI_IO		CTRL_SWDPIN(2)
   8600 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8601 #define	MDI_CLK		CTRL_SWDPIN(3)
   8602 
   8603 static void
   8604 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8605 {
   8606 	uint32_t i, v;
   8607 
   8608 	v = CSR_READ(sc, WMREG_CTRL);
   8609 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8610 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8611 
   8612 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8613 		if (data & i)
   8614 			v |= MDI_IO;
   8615 		else
   8616 			v &= ~MDI_IO;
   8617 		CSR_WRITE(sc, WMREG_CTRL, v);
   8618 		CSR_WRITE_FLUSH(sc);
   8619 		delay(10);
   8620 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8621 		CSR_WRITE_FLUSH(sc);
   8622 		delay(10);
   8623 		CSR_WRITE(sc, WMREG_CTRL, v);
   8624 		CSR_WRITE_FLUSH(sc);
   8625 		delay(10);
   8626 	}
   8627 }
   8628 
   8629 static uint32_t
   8630 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8631 {
   8632 	uint32_t v, i, data = 0;
   8633 
   8634 	v = CSR_READ(sc, WMREG_CTRL);
   8635 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8636 	v |= CTRL_SWDPIO(3);
   8637 
   8638 	CSR_WRITE(sc, WMREG_CTRL, v);
   8639 	CSR_WRITE_FLUSH(sc);
   8640 	delay(10);
   8641 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8642 	CSR_WRITE_FLUSH(sc);
   8643 	delay(10);
   8644 	CSR_WRITE(sc, WMREG_CTRL, v);
   8645 	CSR_WRITE_FLUSH(sc);
   8646 	delay(10);
   8647 
   8648 	for (i = 0; i < 16; i++) {
   8649 		data <<= 1;
   8650 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8651 		CSR_WRITE_FLUSH(sc);
   8652 		delay(10);
   8653 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8654 			data |= 1;
   8655 		CSR_WRITE(sc, WMREG_CTRL, v);
   8656 		CSR_WRITE_FLUSH(sc);
   8657 		delay(10);
   8658 	}
   8659 
   8660 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8661 	CSR_WRITE_FLUSH(sc);
   8662 	delay(10);
   8663 	CSR_WRITE(sc, WMREG_CTRL, v);
   8664 	CSR_WRITE_FLUSH(sc);
   8665 	delay(10);
   8666 
   8667 	return data;
   8668 }
   8669 
   8670 #undef MDI_IO
   8671 #undef MDI_DIR
   8672 #undef MDI_CLK
   8673 
   8674 /*
   8675  * wm_gmii_i82543_readreg:	[mii interface function]
   8676  *
   8677  *	Read a PHY register on the GMII (i82543 version).
   8678  */
   8679 static int
   8680 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8681 {
   8682 	struct wm_softc *sc = device_private(self);
   8683 	int rv;
   8684 
   8685 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8686 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8687 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8688 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8689 
   8690 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8691 	    device_xname(sc->sc_dev), phy, reg, rv));
   8692 
   8693 	return rv;
   8694 }
   8695 
   8696 /*
   8697  * wm_gmii_i82543_writereg:	[mii interface function]
   8698  *
   8699  *	Write a PHY register on the GMII (i82543 version).
   8700  */
   8701 static void
   8702 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8703 {
   8704 	struct wm_softc *sc = device_private(self);
   8705 
   8706 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8707 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8708 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8709 	    (MII_COMMAND_START << 30), 32);
   8710 }
   8711 
   8712 /*
   8713  * wm_gmii_mdic_readreg:	[mii interface function]
   8714  *
   8715  *	Read a PHY register on the GMII.
   8716  */
   8717 static int
   8718 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   8719 {
   8720 	struct wm_softc *sc = device_private(self);
   8721 	uint32_t mdic = 0;
   8722 	int i, rv;
   8723 
   8724 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8725 	    MDIC_REGADD(reg));
   8726 
   8727 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8728 		mdic = CSR_READ(sc, WMREG_MDIC);
   8729 		if (mdic & MDIC_READY)
   8730 			break;
   8731 		delay(50);
   8732 	}
   8733 
   8734 	if ((mdic & MDIC_READY) == 0) {
   8735 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8736 		    device_xname(sc->sc_dev), phy, reg);
   8737 		rv = 0;
   8738 	} else if (mdic & MDIC_E) {
   8739 #if 0 /* This is normal if no PHY is present. */
   8740 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8741 		    device_xname(sc->sc_dev), phy, reg);
   8742 #endif
   8743 		rv = 0;
   8744 	} else {
   8745 		rv = MDIC_DATA(mdic);
   8746 		if (rv == 0xffff)
   8747 			rv = 0;
   8748 	}
   8749 
   8750 	return rv;
   8751 }
   8752 
   8753 /*
   8754  * wm_gmii_mdic_writereg:	[mii interface function]
   8755  *
   8756  *	Write a PHY register on the GMII.
   8757  */
   8758 static void
   8759 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   8760 {
   8761 	struct wm_softc *sc = device_private(self);
   8762 	uint32_t mdic = 0;
   8763 	int i;
   8764 
   8765 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8766 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8767 
   8768 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8769 		mdic = CSR_READ(sc, WMREG_MDIC);
   8770 		if (mdic & MDIC_READY)
   8771 			break;
   8772 		delay(50);
   8773 	}
   8774 
   8775 	if ((mdic & MDIC_READY) == 0)
   8776 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8777 		    device_xname(sc->sc_dev), phy, reg);
   8778 	else if (mdic & MDIC_E)
   8779 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8780 		    device_xname(sc->sc_dev), phy, reg);
   8781 }
   8782 
   8783 /*
   8784  * wm_gmii_i82544_readreg:	[mii interface function]
   8785  *
   8786  *	Read a PHY register on the GMII.
   8787  */
   8788 static int
   8789 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8790 {
   8791 	struct wm_softc *sc = device_private(self);
   8792 	int rv;
   8793 
   8794 	if (sc->phy.acquire(sc)) {
   8795 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8796 		    __func__);
   8797 		return 0;
   8798 	}
   8799 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8800 	sc->phy.release(sc);
   8801 
   8802 	return rv;
   8803 }
   8804 
   8805 /*
   8806  * wm_gmii_i82544_writereg:	[mii interface function]
   8807  *
   8808  *	Write a PHY register on the GMII.
   8809  */
   8810 static void
   8811 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8812 {
   8813 	struct wm_softc *sc = device_private(self);
   8814 
   8815 	if (sc->phy.acquire(sc)) {
   8816 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8817 		    __func__);
   8818 	}
   8819 	wm_gmii_mdic_writereg(self, phy, reg, val);
   8820 	sc->phy.release(sc);
   8821 }
   8822 
   8823 /*
   8824  * wm_gmii_i80003_readreg:	[mii interface function]
   8825  *
   8826  *	Read a PHY register on the kumeran
   8827  * This could be handled by the PHY layer if we didn't have to lock the
   8828  * ressource ...
   8829  */
   8830 static int
   8831 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8832 {
   8833 	struct wm_softc *sc = device_private(self);
   8834 	int rv;
   8835 
   8836 	if (phy != 1) /* only one PHY on kumeran bus */
   8837 		return 0;
   8838 
   8839 	if (sc->phy.acquire(sc)) {
   8840 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8841 		    __func__);
   8842 		return 0;
   8843 	}
   8844 
   8845 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8846 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8847 		    reg >> GG82563_PAGE_SHIFT);
   8848 	} else {
   8849 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8850 		    reg >> GG82563_PAGE_SHIFT);
   8851 	}
   8852 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8853 	delay(200);
   8854 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8855 	delay(200);
   8856 	sc->phy.release(sc);
   8857 
   8858 	return rv;
   8859 }
   8860 
   8861 /*
   8862  * wm_gmii_i80003_writereg:	[mii interface function]
   8863  *
   8864  *	Write a PHY register on the kumeran.
   8865  * This could be handled by the PHY layer if we didn't have to lock the
   8866  * ressource ...
   8867  */
   8868 static void
   8869 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8870 {
   8871 	struct wm_softc *sc = device_private(self);
   8872 
   8873 	if (phy != 1) /* only one PHY on kumeran bus */
   8874 		return;
   8875 
   8876 	if (sc->phy.acquire(sc)) {
   8877 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8878 		    __func__);
   8879 		return;
   8880 	}
   8881 
   8882 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8883 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8884 		    reg >> GG82563_PAGE_SHIFT);
   8885 	} else {
   8886 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8887 		    reg >> GG82563_PAGE_SHIFT);
   8888 	}
   8889 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8890 	delay(200);
   8891 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8892 	delay(200);
   8893 
   8894 	sc->phy.release(sc);
   8895 }
   8896 
   8897 /*
   8898  * wm_gmii_bm_readreg:	[mii interface function]
   8899  *
   8900  *	Read a PHY register on the kumeran
   8901  * This could be handled by the PHY layer if we didn't have to lock the
   8902  * ressource ...
   8903  */
   8904 static int
   8905 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8906 {
   8907 	struct wm_softc *sc = device_private(self);
   8908 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8909 	uint16_t val;
   8910 	int rv;
   8911 
   8912 	if (sc->phy.acquire(sc)) {
   8913 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8914 		    __func__);
   8915 		return 0;
   8916 	}
   8917 
   8918 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8919 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8920 		    || (reg == 31)) ? 1 : phy;
   8921 	/* Page 800 works differently than the rest so it has its own func */
   8922 	if (page == BM_WUC_PAGE) {
   8923 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8924 		rv = val;
   8925 		goto release;
   8926 	}
   8927 
   8928 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8929 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8930 		    && (sc->sc_type != WM_T_82583))
   8931 			wm_gmii_mdic_writereg(self, phy,
   8932 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8933 		else
   8934 			wm_gmii_mdic_writereg(self, phy,
   8935 			    BME1000_PHY_PAGE_SELECT, page);
   8936 	}
   8937 
   8938 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8939 
   8940 release:
   8941 	sc->phy.release(sc);
   8942 	return rv;
   8943 }
   8944 
   8945 /*
   8946  * wm_gmii_bm_writereg:	[mii interface function]
   8947  *
   8948  *	Write a PHY register on the kumeran.
   8949  * This could be handled by the PHY layer if we didn't have to lock the
   8950  * ressource ...
   8951  */
   8952 static void
   8953 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8954 {
   8955 	struct wm_softc *sc = device_private(self);
   8956 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8957 
   8958 	if (sc->phy.acquire(sc)) {
   8959 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8960 		    __func__);
   8961 		return;
   8962 	}
   8963 
   8964 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8965 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8966 		    || (reg == 31)) ? 1 : phy;
   8967 	/* Page 800 works differently than the rest so it has its own func */
   8968 	if (page == BM_WUC_PAGE) {
   8969 		uint16_t tmp;
   8970 
   8971 		tmp = val;
   8972 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8973 		goto release;
   8974 	}
   8975 
   8976 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8977 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8978 		    && (sc->sc_type != WM_T_82583))
   8979 			wm_gmii_mdic_writereg(self, phy,
   8980 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8981 		else
   8982 			wm_gmii_mdic_writereg(self, phy,
   8983 			    BME1000_PHY_PAGE_SELECT, page);
   8984 	}
   8985 
   8986 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8987 
   8988 release:
   8989 	sc->phy.release(sc);
   8990 }
   8991 
   8992 static void
   8993 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8994 {
   8995 	struct wm_softc *sc = device_private(self);
   8996 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8997 	uint16_t wuce, reg;
   8998 
   8999 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9000 		device_xname(sc->sc_dev), __func__));
   9001 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9002 	if (sc->sc_type == WM_T_PCH) {
   9003 		/* XXX e1000 driver do nothing... why? */
   9004 	}
   9005 
   9006 	/*
   9007 	 * 1) Enable PHY wakeup register first.
   9008 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9009 	 */
   9010 
   9011 	/* Set page 769 */
   9012 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9013 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9014 
   9015 	/* Read WUCE and save it */
   9016 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9017 
   9018 	reg = wuce | BM_WUC_ENABLE_BIT;
   9019 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9020 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9021 
   9022 	/* Select page 800 */
   9023 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9024 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9025 
   9026 	/*
   9027 	 * 2) Access PHY wakeup register.
   9028 	 * See e1000_access_phy_wakeup_reg_bm.
   9029 	 */
   9030 
   9031 	/* Write page 800 */
   9032 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9033 
   9034 	if (rd)
   9035 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9036 	else
   9037 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9038 
   9039 	/*
   9040 	 * 3) Disable PHY wakeup register.
   9041 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9042 	 */
   9043 	/* Set page 769 */
   9044 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9045 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9046 
   9047 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9048 }
   9049 
   9050 /*
   9051  * wm_gmii_hv_readreg:	[mii interface function]
   9052  *
   9053  *	Read a PHY register on the kumeran
   9054  * This could be handled by the PHY layer if we didn't have to lock the
   9055  * ressource ...
   9056  */
   9057 static int
   9058 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9059 {
   9060 	struct wm_softc *sc = device_private(self);
   9061 	int rv;
   9062 
   9063 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9064 		device_xname(sc->sc_dev), __func__));
   9065 	if (sc->phy.acquire(sc)) {
   9066 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9067 		    __func__);
   9068 		return 0;
   9069 	}
   9070 
   9071 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9072 	sc->phy.release(sc);
   9073 	return rv;
   9074 }
   9075 
   9076 static int
   9077 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9078 {
   9079 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9080 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9081 	uint16_t val;
   9082 	int rv;
   9083 
   9084 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9085 
   9086 	/* Page 800 works differently than the rest so it has its own func */
   9087 	if (page == BM_WUC_PAGE) {
   9088 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9089 		return val;
   9090 	}
   9091 
   9092 	/*
   9093 	 * Lower than page 768 works differently than the rest so it has its
   9094 	 * own func
   9095 	 */
   9096 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9097 		printf("gmii_hv_readreg!!!\n");
   9098 		return 0;
   9099 	}
   9100 
   9101 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9102 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9103 		    page << BME1000_PAGE_SHIFT);
   9104 	}
   9105 
   9106 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9107 	return rv;
   9108 }
   9109 
   9110 /*
   9111  * wm_gmii_hv_writereg:	[mii interface function]
   9112  *
   9113  *	Write a PHY register on the kumeran.
   9114  * This could be handled by the PHY layer if we didn't have to lock the
   9115  * ressource ...
   9116  */
   9117 static void
   9118 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9119 {
   9120 	struct wm_softc *sc = device_private(self);
   9121 
   9122 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9123 		device_xname(sc->sc_dev), __func__));
   9124 
   9125 	if (sc->phy.acquire(sc)) {
   9126 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9127 		    __func__);
   9128 		return;
   9129 	}
   9130 
   9131 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9132 	sc->phy.release(sc);
   9133 }
   9134 
   9135 static void
   9136 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9137 {
   9138 	struct wm_softc *sc = device_private(self);
   9139 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9140 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9141 
   9142 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9143 
   9144 	/* Page 800 works differently than the rest so it has its own func */
   9145 	if (page == BM_WUC_PAGE) {
   9146 		uint16_t tmp;
   9147 
   9148 		tmp = val;
   9149 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9150 		return;
   9151 	}
   9152 
   9153 	/*
   9154 	 * Lower than page 768 works differently than the rest so it has its
   9155 	 * own func
   9156 	 */
   9157 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9158 		printf("gmii_hv_writereg!!!\n");
   9159 		return;
   9160 	}
   9161 
   9162 	{
   9163 		/*
   9164 		 * XXX Workaround MDIO accesses being disabled after entering
   9165 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9166 		 * register is set)
   9167 		 */
   9168 		if (sc->sc_phytype == WMPHY_82578) {
   9169 			struct mii_softc *child;
   9170 
   9171 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9172 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9173 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9174 			    && ((val & (1 << 11)) != 0)) {
   9175 				printf("XXX need workaround\n");
   9176 			}
   9177 		}
   9178 
   9179 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9180 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9181 			    page << BME1000_PAGE_SHIFT);
   9182 		}
   9183 	}
   9184 
   9185 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9186 }
   9187 
   9188 /*
   9189  * wm_gmii_82580_readreg:	[mii interface function]
   9190  *
   9191  *	Read a PHY register on the 82580 and I350.
   9192  * This could be handled by the PHY layer if we didn't have to lock the
   9193  * ressource ...
   9194  */
   9195 static int
   9196 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9197 {
   9198 	struct wm_softc *sc = device_private(self);
   9199 	int rv;
   9200 
   9201 	if (sc->phy.acquire(sc) != 0) {
   9202 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9203 		    __func__);
   9204 		return 0;
   9205 	}
   9206 
   9207 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9208 
   9209 	sc->phy.release(sc);
   9210 	return rv;
   9211 }
   9212 
   9213 /*
   9214  * wm_gmii_82580_writereg:	[mii interface function]
   9215  *
   9216  *	Write a PHY register on the 82580 and I350.
   9217  * This could be handled by the PHY layer if we didn't have to lock the
   9218  * ressource ...
   9219  */
   9220 static void
   9221 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9222 {
   9223 	struct wm_softc *sc = device_private(self);
   9224 
   9225 	if (sc->phy.acquire(sc) != 0) {
   9226 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9227 		    __func__);
   9228 		return;
   9229 	}
   9230 
   9231 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9232 
   9233 	sc->phy.release(sc);
   9234 }
   9235 
   9236 /*
   9237  * wm_gmii_gs40g_readreg:	[mii interface function]
   9238  *
   9239  *	Read a PHY register on the I2100 and I211.
   9240  * This could be handled by the PHY layer if we didn't have to lock the
   9241  * ressource ...
   9242  */
   9243 static int
   9244 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9245 {
   9246 	struct wm_softc *sc = device_private(self);
   9247 	int page, offset;
   9248 	int rv;
   9249 
   9250 	/* Acquire semaphore */
   9251 	if (sc->phy.acquire(sc)) {
   9252 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9253 		    __func__);
   9254 		return 0;
   9255 	}
   9256 
   9257 	/* Page select */
   9258 	page = reg >> GS40G_PAGE_SHIFT;
   9259 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9260 
   9261 	/* Read reg */
   9262 	offset = reg & GS40G_OFFSET_MASK;
   9263 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9264 
   9265 	sc->phy.release(sc);
   9266 	return rv;
   9267 }
   9268 
   9269 /*
   9270  * wm_gmii_gs40g_writereg:	[mii interface function]
   9271  *
   9272  *	Write a PHY register on the I210 and I211.
   9273  * This could be handled by the PHY layer if we didn't have to lock the
   9274  * ressource ...
   9275  */
   9276 static void
   9277 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9278 {
   9279 	struct wm_softc *sc = device_private(self);
   9280 	int page, offset;
   9281 
   9282 	/* Acquire semaphore */
   9283 	if (sc->phy.acquire(sc)) {
   9284 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9285 		    __func__);
   9286 		return;
   9287 	}
   9288 
   9289 	/* Page select */
   9290 	page = reg >> GS40G_PAGE_SHIFT;
   9291 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9292 
   9293 	/* Write reg */
   9294 	offset = reg & GS40G_OFFSET_MASK;
   9295 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9296 
   9297 	/* Release semaphore */
   9298 	sc->phy.release(sc);
   9299 }
   9300 
   9301 /*
   9302  * wm_gmii_statchg:	[mii interface function]
   9303  *
   9304  *	Callback from MII layer when media changes.
   9305  */
   9306 static void
   9307 wm_gmii_statchg(struct ifnet *ifp)
   9308 {
   9309 	struct wm_softc *sc = ifp->if_softc;
   9310 	struct mii_data *mii = &sc->sc_mii;
   9311 
   9312 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9313 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9314 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9315 
   9316 	/*
   9317 	 * Get flow control negotiation result.
   9318 	 */
   9319 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9320 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9321 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9322 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9323 	}
   9324 
   9325 	if (sc->sc_flowflags & IFM_FLOW) {
   9326 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9327 			sc->sc_ctrl |= CTRL_TFCE;
   9328 			sc->sc_fcrtl |= FCRTL_XONE;
   9329 		}
   9330 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9331 			sc->sc_ctrl |= CTRL_RFCE;
   9332 	}
   9333 
   9334 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9335 		DPRINTF(WM_DEBUG_LINK,
   9336 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9337 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9338 	} else {
   9339 		DPRINTF(WM_DEBUG_LINK,
   9340 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9341 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9342 	}
   9343 
   9344 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9345 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9346 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9347 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9348 	if (sc->sc_type == WM_T_80003) {
   9349 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9350 		case IFM_1000_T:
   9351 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9352 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9353 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9354 			break;
   9355 		default:
   9356 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9357 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9358 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9359 			break;
   9360 		}
   9361 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9362 	}
   9363 }
   9364 
   9365 /* kumeran related (80003, ICH* and PCH*) */
   9366 
   9367 /*
   9368  * wm_kmrn_readreg:
   9369  *
   9370  *	Read a kumeran register
   9371  */
   9372 static int
   9373 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9374 {
   9375 	int rv;
   9376 
   9377 	if (sc->sc_type == WM_T_80003)
   9378 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9379 	else
   9380 		rv = sc->phy.acquire(sc);
   9381 	if (rv != 0) {
   9382 		aprint_error_dev(sc->sc_dev,
   9383 		    "%s: failed to get semaphore\n", __func__);
   9384 		return 0;
   9385 	}
   9386 
   9387 	rv = wm_kmrn_readreg_locked(sc, reg);
   9388 
   9389 	if (sc->sc_type == WM_T_80003)
   9390 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9391 	else
   9392 		sc->phy.release(sc);
   9393 
   9394 	return rv;
   9395 }
   9396 
   9397 static int
   9398 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9399 {
   9400 	int rv;
   9401 
   9402 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9403 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9404 	    KUMCTRLSTA_REN);
   9405 	CSR_WRITE_FLUSH(sc);
   9406 	delay(2);
   9407 
   9408 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9409 
   9410 	return rv;
   9411 }
   9412 
   9413 /*
   9414  * wm_kmrn_writereg:
   9415  *
   9416  *	Write a kumeran register
   9417  */
   9418 static void
   9419 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9420 {
   9421 	int rv;
   9422 
   9423 	if (sc->sc_type == WM_T_80003)
   9424 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9425 	else
   9426 		rv = sc->phy.acquire(sc);
   9427 	if (rv != 0) {
   9428 		aprint_error_dev(sc->sc_dev,
   9429 		    "%s: failed to get semaphore\n", __func__);
   9430 		return;
   9431 	}
   9432 
   9433 	wm_kmrn_writereg_locked(sc, reg, val);
   9434 
   9435 	if (sc->sc_type == WM_T_80003)
   9436 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9437 	else
   9438 		sc->phy.release(sc);
   9439 }
   9440 
   9441 static void
   9442 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9443 {
   9444 
   9445 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9446 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9447 	    (val & KUMCTRLSTA_MASK));
   9448 }
   9449 
   9450 /* SGMII related */
   9451 
   9452 /*
   9453  * wm_sgmii_uses_mdio
   9454  *
   9455  * Check whether the transaction is to the internal PHY or the external
   9456  * MDIO interface. Return true if it's MDIO.
   9457  */
   9458 static bool
   9459 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9460 {
   9461 	uint32_t reg;
   9462 	bool ismdio = false;
   9463 
   9464 	switch (sc->sc_type) {
   9465 	case WM_T_82575:
   9466 	case WM_T_82576:
   9467 		reg = CSR_READ(sc, WMREG_MDIC);
   9468 		ismdio = ((reg & MDIC_DEST) != 0);
   9469 		break;
   9470 	case WM_T_82580:
   9471 	case WM_T_I350:
   9472 	case WM_T_I354:
   9473 	case WM_T_I210:
   9474 	case WM_T_I211:
   9475 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9476 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9477 		break;
   9478 	default:
   9479 		break;
   9480 	}
   9481 
   9482 	return ismdio;
   9483 }
   9484 
   9485 /*
   9486  * wm_sgmii_readreg:	[mii interface function]
   9487  *
   9488  *	Read a PHY register on the SGMII
   9489  * This could be handled by the PHY layer if we didn't have to lock the
   9490  * ressource ...
   9491  */
   9492 static int
   9493 wm_sgmii_readreg(device_t self, int phy, int reg)
   9494 {
   9495 	struct wm_softc *sc = device_private(self);
   9496 	uint32_t i2ccmd;
   9497 	int i, rv;
   9498 
   9499 	if (sc->phy.acquire(sc)) {
   9500 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9501 		    __func__);
   9502 		return 0;
   9503 	}
   9504 
   9505 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9506 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9507 	    | I2CCMD_OPCODE_READ;
   9508 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9509 
   9510 	/* Poll the ready bit */
   9511 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9512 		delay(50);
   9513 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9514 		if (i2ccmd & I2CCMD_READY)
   9515 			break;
   9516 	}
   9517 	if ((i2ccmd & I2CCMD_READY) == 0)
   9518 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9519 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9520 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9521 
   9522 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9523 
   9524 	sc->phy.release(sc);
   9525 	return rv;
   9526 }
   9527 
   9528 /*
   9529  * wm_sgmii_writereg:	[mii interface function]
   9530  *
   9531  *	Write a PHY register on the SGMII.
   9532  * This could be handled by the PHY layer if we didn't have to lock the
   9533  * ressource ...
   9534  */
   9535 static void
   9536 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9537 {
   9538 	struct wm_softc *sc = device_private(self);
   9539 	uint32_t i2ccmd;
   9540 	int i;
   9541 	int val_swapped;
   9542 
   9543 	if (sc->phy.acquire(sc) != 0) {
   9544 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9545 		    __func__);
   9546 		return;
   9547 	}
   9548 	/* Swap the data bytes for the I2C interface */
   9549 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9550 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9551 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9552 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9553 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9554 
   9555 	/* Poll the ready bit */
   9556 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9557 		delay(50);
   9558 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9559 		if (i2ccmd & I2CCMD_READY)
   9560 			break;
   9561 	}
   9562 	if ((i2ccmd & I2CCMD_READY) == 0)
   9563 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9564 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9565 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9566 
   9567 	sc->phy.release(sc);
   9568 }
   9569 
   9570 /* TBI related */
   9571 
   9572 /*
   9573  * wm_tbi_mediainit:
   9574  *
   9575  *	Initialize media for use on 1000BASE-X devices.
   9576  */
   9577 static void
   9578 wm_tbi_mediainit(struct wm_softc *sc)
   9579 {
   9580 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9581 	const char *sep = "";
   9582 
   9583 	if (sc->sc_type < WM_T_82543)
   9584 		sc->sc_tipg = TIPG_WM_DFLT;
   9585 	else
   9586 		sc->sc_tipg = TIPG_LG_DFLT;
   9587 
   9588 	sc->sc_tbi_serdes_anegticks = 5;
   9589 
   9590 	/* Initialize our media structures */
   9591 	sc->sc_mii.mii_ifp = ifp;
   9592 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9593 
   9594 	if ((sc->sc_type >= WM_T_82575)
   9595 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9596 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9597 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9598 	else
   9599 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9600 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9601 
   9602 	/*
   9603 	 * SWD Pins:
   9604 	 *
   9605 	 *	0 = Link LED (output)
   9606 	 *	1 = Loss Of Signal (input)
   9607 	 */
   9608 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9609 
   9610 	/* XXX Perhaps this is only for TBI */
   9611 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9612 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9613 
   9614 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9615 		sc->sc_ctrl &= ~CTRL_LRST;
   9616 
   9617 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9618 
   9619 #define	ADD(ss, mm, dd)							\
   9620 do {									\
   9621 	aprint_normal("%s%s", sep, ss);					\
   9622 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9623 	sep = ", ";							\
   9624 } while (/*CONSTCOND*/0)
   9625 
   9626 	aprint_normal_dev(sc->sc_dev, "");
   9627 
   9628 	/* Only 82545 is LX */
   9629 	if (sc->sc_type == WM_T_82545) {
   9630 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9631 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9632 	} else {
   9633 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9634 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9635 	}
   9636 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9637 	aprint_normal("\n");
   9638 
   9639 #undef ADD
   9640 
   9641 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9642 }
   9643 
   9644 /*
   9645  * wm_tbi_mediachange:	[ifmedia interface function]
   9646  *
   9647  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9648  */
   9649 static int
   9650 wm_tbi_mediachange(struct ifnet *ifp)
   9651 {
   9652 	struct wm_softc *sc = ifp->if_softc;
   9653 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9654 	uint32_t status;
   9655 	int i;
   9656 
   9657 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9658 		/* XXX need some work for >= 82571 and < 82575 */
   9659 		if (sc->sc_type < WM_T_82575)
   9660 			return 0;
   9661 	}
   9662 
   9663 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9664 	    || (sc->sc_type >= WM_T_82575))
   9665 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9666 
   9667 	sc->sc_ctrl &= ~CTRL_LRST;
   9668 	sc->sc_txcw = TXCW_ANE;
   9669 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9670 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9671 	else if (ife->ifm_media & IFM_FDX)
   9672 		sc->sc_txcw |= TXCW_FD;
   9673 	else
   9674 		sc->sc_txcw |= TXCW_HD;
   9675 
   9676 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9677 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9678 
   9679 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9680 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9681 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9682 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9683 	CSR_WRITE_FLUSH(sc);
   9684 	delay(1000);
   9685 
   9686 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9687 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9688 
   9689 	/*
   9690 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9691 	 * optics detect a signal, 0 if they don't.
   9692 	 */
   9693 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9694 		/* Have signal; wait for the link to come up. */
   9695 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9696 			delay(10000);
   9697 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9698 				break;
   9699 		}
   9700 
   9701 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9702 			    device_xname(sc->sc_dev),i));
   9703 
   9704 		status = CSR_READ(sc, WMREG_STATUS);
   9705 		DPRINTF(WM_DEBUG_LINK,
   9706 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9707 			device_xname(sc->sc_dev),status, STATUS_LU));
   9708 		if (status & STATUS_LU) {
   9709 			/* Link is up. */
   9710 			DPRINTF(WM_DEBUG_LINK,
   9711 			    ("%s: LINK: set media -> link up %s\n",
   9712 			    device_xname(sc->sc_dev),
   9713 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9714 
   9715 			/*
   9716 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9717 			 * so we should update sc->sc_ctrl
   9718 			 */
   9719 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9720 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9721 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9722 			if (status & STATUS_FD)
   9723 				sc->sc_tctl |=
   9724 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9725 			else
   9726 				sc->sc_tctl |=
   9727 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9728 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9729 				sc->sc_fcrtl |= FCRTL_XONE;
   9730 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9731 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9732 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9733 				      sc->sc_fcrtl);
   9734 			sc->sc_tbi_linkup = 1;
   9735 		} else {
   9736 			if (i == WM_LINKUP_TIMEOUT)
   9737 				wm_check_for_link(sc);
   9738 			/* Link is down. */
   9739 			DPRINTF(WM_DEBUG_LINK,
   9740 			    ("%s: LINK: set media -> link down\n",
   9741 			    device_xname(sc->sc_dev)));
   9742 			sc->sc_tbi_linkup = 0;
   9743 		}
   9744 	} else {
   9745 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9746 		    device_xname(sc->sc_dev)));
   9747 		sc->sc_tbi_linkup = 0;
   9748 	}
   9749 
   9750 	wm_tbi_serdes_set_linkled(sc);
   9751 
   9752 	return 0;
   9753 }
   9754 
   9755 /*
   9756  * wm_tbi_mediastatus:	[ifmedia interface function]
   9757  *
   9758  *	Get the current interface media status on a 1000BASE-X device.
   9759  */
   9760 static void
   9761 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9762 {
   9763 	struct wm_softc *sc = ifp->if_softc;
   9764 	uint32_t ctrl, status;
   9765 
   9766 	ifmr->ifm_status = IFM_AVALID;
   9767 	ifmr->ifm_active = IFM_ETHER;
   9768 
   9769 	status = CSR_READ(sc, WMREG_STATUS);
   9770 	if ((status & STATUS_LU) == 0) {
   9771 		ifmr->ifm_active |= IFM_NONE;
   9772 		return;
   9773 	}
   9774 
   9775 	ifmr->ifm_status |= IFM_ACTIVE;
   9776 	/* Only 82545 is LX */
   9777 	if (sc->sc_type == WM_T_82545)
   9778 		ifmr->ifm_active |= IFM_1000_LX;
   9779 	else
   9780 		ifmr->ifm_active |= IFM_1000_SX;
   9781 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9782 		ifmr->ifm_active |= IFM_FDX;
   9783 	else
   9784 		ifmr->ifm_active |= IFM_HDX;
   9785 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9786 	if (ctrl & CTRL_RFCE)
   9787 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9788 	if (ctrl & CTRL_TFCE)
   9789 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9790 }
   9791 
   9792 /* XXX TBI only */
   9793 static int
   9794 wm_check_for_link(struct wm_softc *sc)
   9795 {
   9796 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9797 	uint32_t rxcw;
   9798 	uint32_t ctrl;
   9799 	uint32_t status;
   9800 	uint32_t sig;
   9801 
   9802 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9803 		/* XXX need some work for >= 82571 */
   9804 		if (sc->sc_type >= WM_T_82571) {
   9805 			sc->sc_tbi_linkup = 1;
   9806 			return 0;
   9807 		}
   9808 	}
   9809 
   9810 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9811 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9812 	status = CSR_READ(sc, WMREG_STATUS);
   9813 
   9814 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9815 
   9816 	DPRINTF(WM_DEBUG_LINK,
   9817 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9818 		device_xname(sc->sc_dev), __func__,
   9819 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9820 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9821 
   9822 	/*
   9823 	 * SWDPIN   LU RXCW
   9824 	 *      0    0    0
   9825 	 *      0    0    1	(should not happen)
   9826 	 *      0    1    0	(should not happen)
   9827 	 *      0    1    1	(should not happen)
   9828 	 *      1    0    0	Disable autonego and force linkup
   9829 	 *      1    0    1	got /C/ but not linkup yet
   9830 	 *      1    1    0	(linkup)
   9831 	 *      1    1    1	If IFM_AUTO, back to autonego
   9832 	 *
   9833 	 */
   9834 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9835 	    && ((status & STATUS_LU) == 0)
   9836 	    && ((rxcw & RXCW_C) == 0)) {
   9837 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9838 			__func__));
   9839 		sc->sc_tbi_linkup = 0;
   9840 		/* Disable auto-negotiation in the TXCW register */
   9841 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9842 
   9843 		/*
   9844 		 * Force link-up and also force full-duplex.
   9845 		 *
   9846 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9847 		 * so we should update sc->sc_ctrl
   9848 		 */
   9849 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9850 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9851 	} else if (((status & STATUS_LU) != 0)
   9852 	    && ((rxcw & RXCW_C) != 0)
   9853 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9854 		sc->sc_tbi_linkup = 1;
   9855 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9856 			__func__));
   9857 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9858 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9859 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9860 	    && ((rxcw & RXCW_C) != 0)) {
   9861 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9862 	} else {
   9863 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9864 			status));
   9865 	}
   9866 
   9867 	return 0;
   9868 }
   9869 
   9870 /*
   9871  * wm_tbi_tick:
   9872  *
   9873  *	Check the link on TBI devices.
   9874  *	This function acts as mii_tick().
   9875  */
   9876 static void
   9877 wm_tbi_tick(struct wm_softc *sc)
   9878 {
   9879 	struct mii_data *mii = &sc->sc_mii;
   9880 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9881 	uint32_t status;
   9882 
   9883 	KASSERT(WM_CORE_LOCKED(sc));
   9884 
   9885 	status = CSR_READ(sc, WMREG_STATUS);
   9886 
   9887 	/* XXX is this needed? */
   9888 	(void)CSR_READ(sc, WMREG_RXCW);
   9889 	(void)CSR_READ(sc, WMREG_CTRL);
   9890 
   9891 	/* set link status */
   9892 	if ((status & STATUS_LU) == 0) {
   9893 		DPRINTF(WM_DEBUG_LINK,
   9894 		    ("%s: LINK: checklink -> down\n",
   9895 			device_xname(sc->sc_dev)));
   9896 		sc->sc_tbi_linkup = 0;
   9897 	} else if (sc->sc_tbi_linkup == 0) {
   9898 		DPRINTF(WM_DEBUG_LINK,
   9899 		    ("%s: LINK: checklink -> up %s\n",
   9900 			device_xname(sc->sc_dev),
   9901 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9902 		sc->sc_tbi_linkup = 1;
   9903 		sc->sc_tbi_serdes_ticks = 0;
   9904 	}
   9905 
   9906 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9907 		goto setled;
   9908 
   9909 	if ((status & STATUS_LU) == 0) {
   9910 		sc->sc_tbi_linkup = 0;
   9911 		/* If the timer expired, retry autonegotiation */
   9912 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9913 		    && (++sc->sc_tbi_serdes_ticks
   9914 			>= sc->sc_tbi_serdes_anegticks)) {
   9915 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9916 			sc->sc_tbi_serdes_ticks = 0;
   9917 			/*
   9918 			 * Reset the link, and let autonegotiation do
   9919 			 * its thing
   9920 			 */
   9921 			sc->sc_ctrl |= CTRL_LRST;
   9922 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9923 			CSR_WRITE_FLUSH(sc);
   9924 			delay(1000);
   9925 			sc->sc_ctrl &= ~CTRL_LRST;
   9926 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9927 			CSR_WRITE_FLUSH(sc);
   9928 			delay(1000);
   9929 			CSR_WRITE(sc, WMREG_TXCW,
   9930 			    sc->sc_txcw & ~TXCW_ANE);
   9931 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9932 		}
   9933 	}
   9934 
   9935 setled:
   9936 	wm_tbi_serdes_set_linkled(sc);
   9937 }
   9938 
   9939 /* SERDES related */
   9940 static void
   9941 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9942 {
   9943 	uint32_t reg;
   9944 
   9945 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9946 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9947 		return;
   9948 
   9949 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9950 	reg |= PCS_CFG_PCS_EN;
   9951 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9952 
   9953 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9954 	reg &= ~CTRL_EXT_SWDPIN(3);
   9955 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9956 	CSR_WRITE_FLUSH(sc);
   9957 }
   9958 
   9959 static int
   9960 wm_serdes_mediachange(struct ifnet *ifp)
   9961 {
   9962 	struct wm_softc *sc = ifp->if_softc;
   9963 	bool pcs_autoneg = true; /* XXX */
   9964 	uint32_t ctrl_ext, pcs_lctl, reg;
   9965 
   9966 	/* XXX Currently, this function is not called on 8257[12] */
   9967 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9968 	    || (sc->sc_type >= WM_T_82575))
   9969 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9970 
   9971 	wm_serdes_power_up_link_82575(sc);
   9972 
   9973 	sc->sc_ctrl |= CTRL_SLU;
   9974 
   9975 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9976 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9977 
   9978 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9979 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9980 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9981 	case CTRL_EXT_LINK_MODE_SGMII:
   9982 		pcs_autoneg = true;
   9983 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9984 		break;
   9985 	case CTRL_EXT_LINK_MODE_1000KX:
   9986 		pcs_autoneg = false;
   9987 		/* FALLTHROUGH */
   9988 	default:
   9989 		if ((sc->sc_type == WM_T_82575)
   9990 		    || (sc->sc_type == WM_T_82576)) {
   9991 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9992 				pcs_autoneg = false;
   9993 		}
   9994 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9995 		    | CTRL_FRCFDX;
   9996 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9997 	}
   9998 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9999 
   10000 	if (pcs_autoneg) {
   10001 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10002 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10003 
   10004 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10005 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10006 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10007 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10008 	} else
   10009 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10010 
   10011 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10012 
   10013 
   10014 	return 0;
   10015 }
   10016 
   10017 static void
   10018 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10019 {
   10020 	struct wm_softc *sc = ifp->if_softc;
   10021 	struct mii_data *mii = &sc->sc_mii;
   10022 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10023 	uint32_t pcs_adv, pcs_lpab, reg;
   10024 
   10025 	ifmr->ifm_status = IFM_AVALID;
   10026 	ifmr->ifm_active = IFM_ETHER;
   10027 
   10028 	/* Check PCS */
   10029 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10030 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10031 		ifmr->ifm_active |= IFM_NONE;
   10032 		sc->sc_tbi_linkup = 0;
   10033 		goto setled;
   10034 	}
   10035 
   10036 	sc->sc_tbi_linkup = 1;
   10037 	ifmr->ifm_status |= IFM_ACTIVE;
   10038 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10039 	if ((reg & PCS_LSTS_FDX) != 0)
   10040 		ifmr->ifm_active |= IFM_FDX;
   10041 	else
   10042 		ifmr->ifm_active |= IFM_HDX;
   10043 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10044 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10045 		/* Check flow */
   10046 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10047 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10048 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10049 			goto setled;
   10050 		}
   10051 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10052 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10053 		DPRINTF(WM_DEBUG_LINK,
   10054 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10055 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10056 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10057 			mii->mii_media_active |= IFM_FLOW
   10058 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10059 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10060 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10061 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10062 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10063 			mii->mii_media_active |= IFM_FLOW
   10064 			    | IFM_ETH_TXPAUSE;
   10065 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10066 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10067 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10068 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10069 			mii->mii_media_active |= IFM_FLOW
   10070 			    | IFM_ETH_RXPAUSE;
   10071 		} else {
   10072 		}
   10073 	}
   10074 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10075 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10076 setled:
   10077 	wm_tbi_serdes_set_linkled(sc);
   10078 }
   10079 
   10080 /*
   10081  * wm_serdes_tick:
   10082  *
   10083  *	Check the link on serdes devices.
   10084  */
   10085 static void
   10086 wm_serdes_tick(struct wm_softc *sc)
   10087 {
   10088 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10089 	struct mii_data *mii = &sc->sc_mii;
   10090 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10091 	uint32_t reg;
   10092 
   10093 	KASSERT(WM_CORE_LOCKED(sc));
   10094 
   10095 	mii->mii_media_status = IFM_AVALID;
   10096 	mii->mii_media_active = IFM_ETHER;
   10097 
   10098 	/* Check PCS */
   10099 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10100 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10101 		mii->mii_media_status |= IFM_ACTIVE;
   10102 		sc->sc_tbi_linkup = 1;
   10103 		sc->sc_tbi_serdes_ticks = 0;
   10104 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10105 		if ((reg & PCS_LSTS_FDX) != 0)
   10106 			mii->mii_media_active |= IFM_FDX;
   10107 		else
   10108 			mii->mii_media_active |= IFM_HDX;
   10109 	} else {
   10110 		mii->mii_media_status |= IFM_NONE;
   10111 		sc->sc_tbi_linkup = 0;
   10112 		    /* If the timer expired, retry autonegotiation */
   10113 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10114 		    && (++sc->sc_tbi_serdes_ticks
   10115 			>= sc->sc_tbi_serdes_anegticks)) {
   10116 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10117 			sc->sc_tbi_serdes_ticks = 0;
   10118 			/* XXX */
   10119 			wm_serdes_mediachange(ifp);
   10120 		}
   10121 	}
   10122 
   10123 	wm_tbi_serdes_set_linkled(sc);
   10124 }
   10125 
   10126 /* SFP related */
   10127 
   10128 static int
   10129 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10130 {
   10131 	uint32_t i2ccmd;
   10132 	int i;
   10133 
   10134 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10135 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10136 
   10137 	/* Poll the ready bit */
   10138 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10139 		delay(50);
   10140 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10141 		if (i2ccmd & I2CCMD_READY)
   10142 			break;
   10143 	}
   10144 	if ((i2ccmd & I2CCMD_READY) == 0)
   10145 		return -1;
   10146 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10147 		return -1;
   10148 
   10149 	*data = i2ccmd & 0x00ff;
   10150 
   10151 	return 0;
   10152 }
   10153 
   10154 static uint32_t
   10155 wm_sfp_get_media_type(struct wm_softc *sc)
   10156 {
   10157 	uint32_t ctrl_ext;
   10158 	uint8_t val = 0;
   10159 	int timeout = 3;
   10160 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10161 	int rv = -1;
   10162 
   10163 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10164 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10165 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10166 	CSR_WRITE_FLUSH(sc);
   10167 
   10168 	/* Read SFP module data */
   10169 	while (timeout) {
   10170 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10171 		if (rv == 0)
   10172 			break;
   10173 		delay(100*1000); /* XXX too big */
   10174 		timeout--;
   10175 	}
   10176 	if (rv != 0)
   10177 		goto out;
   10178 	switch (val) {
   10179 	case SFF_SFP_ID_SFF:
   10180 		aprint_normal_dev(sc->sc_dev,
   10181 		    "Module/Connector soldered to board\n");
   10182 		break;
   10183 	case SFF_SFP_ID_SFP:
   10184 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10185 		break;
   10186 	case SFF_SFP_ID_UNKNOWN:
   10187 		goto out;
   10188 	default:
   10189 		break;
   10190 	}
   10191 
   10192 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10193 	if (rv != 0) {
   10194 		goto out;
   10195 	}
   10196 
   10197 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10198 		mediatype = WM_MEDIATYPE_SERDES;
   10199 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10200 		sc->sc_flags |= WM_F_SGMII;
   10201 		mediatype = WM_MEDIATYPE_COPPER;
   10202 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10203 		sc->sc_flags |= WM_F_SGMII;
   10204 		mediatype = WM_MEDIATYPE_SERDES;
   10205 	}
   10206 
   10207 out:
   10208 	/* Restore I2C interface setting */
   10209 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10210 
   10211 	return mediatype;
   10212 }
   10213 
   10214 /*
   10215  * NVM related.
   10216  * Microwire, SPI (w/wo EERD) and Flash.
   10217  */
   10218 
   10219 /* Both spi and uwire */
   10220 
   10221 /*
   10222  * wm_eeprom_sendbits:
   10223  *
   10224  *	Send a series of bits to the EEPROM.
   10225  */
   10226 static void
   10227 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10228 {
   10229 	uint32_t reg;
   10230 	int x;
   10231 
   10232 	reg = CSR_READ(sc, WMREG_EECD);
   10233 
   10234 	for (x = nbits; x > 0; x--) {
   10235 		if (bits & (1U << (x - 1)))
   10236 			reg |= EECD_DI;
   10237 		else
   10238 			reg &= ~EECD_DI;
   10239 		CSR_WRITE(sc, WMREG_EECD, reg);
   10240 		CSR_WRITE_FLUSH(sc);
   10241 		delay(2);
   10242 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10243 		CSR_WRITE_FLUSH(sc);
   10244 		delay(2);
   10245 		CSR_WRITE(sc, WMREG_EECD, reg);
   10246 		CSR_WRITE_FLUSH(sc);
   10247 		delay(2);
   10248 	}
   10249 }
   10250 
   10251 /*
   10252  * wm_eeprom_recvbits:
   10253  *
   10254  *	Receive a series of bits from the EEPROM.
   10255  */
   10256 static void
   10257 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10258 {
   10259 	uint32_t reg, val;
   10260 	int x;
   10261 
   10262 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10263 
   10264 	val = 0;
   10265 	for (x = nbits; x > 0; x--) {
   10266 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10267 		CSR_WRITE_FLUSH(sc);
   10268 		delay(2);
   10269 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10270 			val |= (1U << (x - 1));
   10271 		CSR_WRITE(sc, WMREG_EECD, reg);
   10272 		CSR_WRITE_FLUSH(sc);
   10273 		delay(2);
   10274 	}
   10275 	*valp = val;
   10276 }
   10277 
   10278 /* Microwire */
   10279 
   10280 /*
   10281  * wm_nvm_read_uwire:
   10282  *
   10283  *	Read a word from the EEPROM using the MicroWire protocol.
   10284  */
   10285 static int
   10286 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10287 {
   10288 	uint32_t reg, val;
   10289 	int i;
   10290 
   10291 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10292 		device_xname(sc->sc_dev), __func__));
   10293 
   10294 	for (i = 0; i < wordcnt; i++) {
   10295 		/* Clear SK and DI. */
   10296 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10297 		CSR_WRITE(sc, WMREG_EECD, reg);
   10298 
   10299 		/*
   10300 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10301 		 * and Xen.
   10302 		 *
   10303 		 * We use this workaround only for 82540 because qemu's
   10304 		 * e1000 act as 82540.
   10305 		 */
   10306 		if (sc->sc_type == WM_T_82540) {
   10307 			reg |= EECD_SK;
   10308 			CSR_WRITE(sc, WMREG_EECD, reg);
   10309 			reg &= ~EECD_SK;
   10310 			CSR_WRITE(sc, WMREG_EECD, reg);
   10311 			CSR_WRITE_FLUSH(sc);
   10312 			delay(2);
   10313 		}
   10314 		/* XXX: end of workaround */
   10315 
   10316 		/* Set CHIP SELECT. */
   10317 		reg |= EECD_CS;
   10318 		CSR_WRITE(sc, WMREG_EECD, reg);
   10319 		CSR_WRITE_FLUSH(sc);
   10320 		delay(2);
   10321 
   10322 		/* Shift in the READ command. */
   10323 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10324 
   10325 		/* Shift in address. */
   10326 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10327 
   10328 		/* Shift out the data. */
   10329 		wm_eeprom_recvbits(sc, &val, 16);
   10330 		data[i] = val & 0xffff;
   10331 
   10332 		/* Clear CHIP SELECT. */
   10333 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10334 		CSR_WRITE(sc, WMREG_EECD, reg);
   10335 		CSR_WRITE_FLUSH(sc);
   10336 		delay(2);
   10337 	}
   10338 
   10339 	return 0;
   10340 }
   10341 
   10342 /* SPI */
   10343 
   10344 /*
   10345  * Set SPI and FLASH related information from the EECD register.
   10346  * For 82541 and 82547, the word size is taken from EEPROM.
   10347  */
   10348 static int
   10349 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10350 {
   10351 	int size;
   10352 	uint32_t reg;
   10353 	uint16_t data;
   10354 
   10355 	reg = CSR_READ(sc, WMREG_EECD);
   10356 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10357 
   10358 	/* Read the size of NVM from EECD by default */
   10359 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10360 	switch (sc->sc_type) {
   10361 	case WM_T_82541:
   10362 	case WM_T_82541_2:
   10363 	case WM_T_82547:
   10364 	case WM_T_82547_2:
   10365 		/* Set dummy value to access EEPROM */
   10366 		sc->sc_nvm_wordsize = 64;
   10367 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10368 		reg = data;
   10369 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10370 		if (size == 0)
   10371 			size = 6; /* 64 word size */
   10372 		else
   10373 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10374 		break;
   10375 	case WM_T_80003:
   10376 	case WM_T_82571:
   10377 	case WM_T_82572:
   10378 	case WM_T_82573: /* SPI case */
   10379 	case WM_T_82574: /* SPI case */
   10380 	case WM_T_82583: /* SPI case */
   10381 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10382 		if (size > 14)
   10383 			size = 14;
   10384 		break;
   10385 	case WM_T_82575:
   10386 	case WM_T_82576:
   10387 	case WM_T_82580:
   10388 	case WM_T_I350:
   10389 	case WM_T_I354:
   10390 	case WM_T_I210:
   10391 	case WM_T_I211:
   10392 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10393 		if (size > 15)
   10394 			size = 15;
   10395 		break;
   10396 	default:
   10397 		aprint_error_dev(sc->sc_dev,
   10398 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10399 		return -1;
   10400 		break;
   10401 	}
   10402 
   10403 	sc->sc_nvm_wordsize = 1 << size;
   10404 
   10405 	return 0;
   10406 }
   10407 
   10408 /*
   10409  * wm_nvm_ready_spi:
   10410  *
   10411  *	Wait for a SPI EEPROM to be ready for commands.
   10412  */
   10413 static int
   10414 wm_nvm_ready_spi(struct wm_softc *sc)
   10415 {
   10416 	uint32_t val;
   10417 	int usec;
   10418 
   10419 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10420 		device_xname(sc->sc_dev), __func__));
   10421 
   10422 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10423 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10424 		wm_eeprom_recvbits(sc, &val, 8);
   10425 		if ((val & SPI_SR_RDY) == 0)
   10426 			break;
   10427 	}
   10428 	if (usec >= SPI_MAX_RETRIES) {
   10429 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10430 		return 1;
   10431 	}
   10432 	return 0;
   10433 }
   10434 
   10435 /*
   10436  * wm_nvm_read_spi:
   10437  *
   10438  *	Read a work from the EEPROM using the SPI protocol.
   10439  */
   10440 static int
   10441 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10442 {
   10443 	uint32_t reg, val;
   10444 	int i;
   10445 	uint8_t opc;
   10446 
   10447 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10448 		device_xname(sc->sc_dev), __func__));
   10449 
   10450 	/* Clear SK and CS. */
   10451 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10452 	CSR_WRITE(sc, WMREG_EECD, reg);
   10453 	CSR_WRITE_FLUSH(sc);
   10454 	delay(2);
   10455 
   10456 	if (wm_nvm_ready_spi(sc))
   10457 		return 1;
   10458 
   10459 	/* Toggle CS to flush commands. */
   10460 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10461 	CSR_WRITE_FLUSH(sc);
   10462 	delay(2);
   10463 	CSR_WRITE(sc, WMREG_EECD, reg);
   10464 	CSR_WRITE_FLUSH(sc);
   10465 	delay(2);
   10466 
   10467 	opc = SPI_OPC_READ;
   10468 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10469 		opc |= SPI_OPC_A8;
   10470 
   10471 	wm_eeprom_sendbits(sc, opc, 8);
   10472 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10473 
   10474 	for (i = 0; i < wordcnt; i++) {
   10475 		wm_eeprom_recvbits(sc, &val, 16);
   10476 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10477 	}
   10478 
   10479 	/* Raise CS and clear SK. */
   10480 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10481 	CSR_WRITE(sc, WMREG_EECD, reg);
   10482 	CSR_WRITE_FLUSH(sc);
   10483 	delay(2);
   10484 
   10485 	return 0;
   10486 }
   10487 
   10488 /* Using with EERD */
   10489 
   10490 static int
   10491 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10492 {
   10493 	uint32_t attempts = 100000;
   10494 	uint32_t i, reg = 0;
   10495 	int32_t done = -1;
   10496 
   10497 	for (i = 0; i < attempts; i++) {
   10498 		reg = CSR_READ(sc, rw);
   10499 
   10500 		if (reg & EERD_DONE) {
   10501 			done = 0;
   10502 			break;
   10503 		}
   10504 		delay(5);
   10505 	}
   10506 
   10507 	return done;
   10508 }
   10509 
   10510 static int
   10511 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10512     uint16_t *data)
   10513 {
   10514 	int i, eerd = 0;
   10515 	int error = 0;
   10516 
   10517 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10518 		device_xname(sc->sc_dev), __func__));
   10519 
   10520 	for (i = 0; i < wordcnt; i++) {
   10521 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10522 
   10523 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10524 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10525 		if (error != 0)
   10526 			break;
   10527 
   10528 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10529 	}
   10530 
   10531 	return error;
   10532 }
   10533 
   10534 /* Flash */
   10535 
   10536 static int
   10537 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10538 {
   10539 	uint32_t eecd;
   10540 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10541 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10542 	uint8_t sig_byte = 0;
   10543 
   10544 	switch (sc->sc_type) {
   10545 	case WM_T_PCH_SPT:
   10546 		/*
   10547 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10548 		 * sector valid bits from the NVM.
   10549 		 */
   10550 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10551 		if ((*bank == 0) || (*bank == 1)) {
   10552 			aprint_error_dev(sc->sc_dev,
   10553 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10554 				*bank);
   10555 			return -1;
   10556 		} else {
   10557 			*bank = *bank - 2;
   10558 			return 0;
   10559 		}
   10560 	case WM_T_ICH8:
   10561 	case WM_T_ICH9:
   10562 		eecd = CSR_READ(sc, WMREG_EECD);
   10563 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10564 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10565 			return 0;
   10566 		}
   10567 		/* FALLTHROUGH */
   10568 	default:
   10569 		/* Default to 0 */
   10570 		*bank = 0;
   10571 
   10572 		/* Check bank 0 */
   10573 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10574 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10575 			*bank = 0;
   10576 			return 0;
   10577 		}
   10578 
   10579 		/* Check bank 1 */
   10580 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10581 		    &sig_byte);
   10582 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10583 			*bank = 1;
   10584 			return 0;
   10585 		}
   10586 	}
   10587 
   10588 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10589 		device_xname(sc->sc_dev)));
   10590 	return -1;
   10591 }
   10592 
   10593 /******************************************************************************
   10594  * This function does initial flash setup so that a new read/write/erase cycle
   10595  * can be started.
   10596  *
   10597  * sc - The pointer to the hw structure
   10598  ****************************************************************************/
   10599 static int32_t
   10600 wm_ich8_cycle_init(struct wm_softc *sc)
   10601 {
   10602 	uint16_t hsfsts;
   10603 	int32_t error = 1;
   10604 	int32_t i     = 0;
   10605 
   10606 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10607 
   10608 	/* May be check the Flash Des Valid bit in Hw status */
   10609 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10610 		return error;
   10611 	}
   10612 
   10613 	/* Clear FCERR in Hw status by writing 1 */
   10614 	/* Clear DAEL in Hw status by writing a 1 */
   10615 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10616 
   10617 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10618 
   10619 	/*
   10620 	 * Either we should have a hardware SPI cycle in progress bit to check
   10621 	 * against, in order to start a new cycle or FDONE bit should be
   10622 	 * changed in the hardware so that it is 1 after harware reset, which
   10623 	 * can then be used as an indication whether a cycle is in progress or
   10624 	 * has been completed .. we should also have some software semaphore
   10625 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10626 	 * threads access to those bits can be sequentiallized or a way so that
   10627 	 * 2 threads dont start the cycle at the same time
   10628 	 */
   10629 
   10630 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10631 		/*
   10632 		 * There is no cycle running at present, so we can start a
   10633 		 * cycle
   10634 		 */
   10635 
   10636 		/* Begin by setting Flash Cycle Done. */
   10637 		hsfsts |= HSFSTS_DONE;
   10638 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10639 		error = 0;
   10640 	} else {
   10641 		/*
   10642 		 * otherwise poll for sometime so the current cycle has a
   10643 		 * chance to end before giving up.
   10644 		 */
   10645 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10646 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10647 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10648 				error = 0;
   10649 				break;
   10650 			}
   10651 			delay(1);
   10652 		}
   10653 		if (error == 0) {
   10654 			/*
   10655 			 * Successful in waiting for previous cycle to timeout,
   10656 			 * now set the Flash Cycle Done.
   10657 			 */
   10658 			hsfsts |= HSFSTS_DONE;
   10659 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10660 		}
   10661 	}
   10662 	return error;
   10663 }
   10664 
   10665 /******************************************************************************
   10666  * This function starts a flash cycle and waits for its completion
   10667  *
   10668  * sc - The pointer to the hw structure
   10669  ****************************************************************************/
   10670 static int32_t
   10671 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10672 {
   10673 	uint16_t hsflctl;
   10674 	uint16_t hsfsts;
   10675 	int32_t error = 1;
   10676 	uint32_t i = 0;
   10677 
   10678 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10679 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10680 	hsflctl |= HSFCTL_GO;
   10681 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10682 
   10683 	/* Wait till FDONE bit is set to 1 */
   10684 	do {
   10685 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10686 		if (hsfsts & HSFSTS_DONE)
   10687 			break;
   10688 		delay(1);
   10689 		i++;
   10690 	} while (i < timeout);
   10691 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10692 		error = 0;
   10693 
   10694 	return error;
   10695 }
   10696 
   10697 /******************************************************************************
   10698  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10699  *
   10700  * sc - The pointer to the hw structure
   10701  * index - The index of the byte or word to read.
   10702  * size - Size of data to read, 1=byte 2=word, 4=dword
   10703  * data - Pointer to the word to store the value read.
   10704  *****************************************************************************/
   10705 static int32_t
   10706 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10707     uint32_t size, uint32_t *data)
   10708 {
   10709 	uint16_t hsfsts;
   10710 	uint16_t hsflctl;
   10711 	uint32_t flash_linear_address;
   10712 	uint32_t flash_data = 0;
   10713 	int32_t error = 1;
   10714 	int32_t count = 0;
   10715 
   10716 	if (size < 1  || size > 4 || data == 0x0 ||
   10717 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10718 		return error;
   10719 
   10720 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10721 	    sc->sc_ich8_flash_base;
   10722 
   10723 	do {
   10724 		delay(1);
   10725 		/* Steps */
   10726 		error = wm_ich8_cycle_init(sc);
   10727 		if (error)
   10728 			break;
   10729 
   10730 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10731 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10732 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10733 		    & HSFCTL_BCOUNT_MASK;
   10734 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10735 		if (sc->sc_type == WM_T_PCH_SPT) {
   10736 			/*
   10737 			 * In SPT, This register is in Lan memory space, not
   10738 			 * flash. Therefore, only 32 bit access is supported.
   10739 			 */
   10740 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10741 			    (uint32_t)hsflctl);
   10742 		} else
   10743 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10744 
   10745 		/*
   10746 		 * Write the last 24 bits of index into Flash Linear address
   10747 		 * field in Flash Address
   10748 		 */
   10749 		/* TODO: TBD maybe check the index against the size of flash */
   10750 
   10751 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10752 
   10753 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10754 
   10755 		/*
   10756 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10757 		 * the whole sequence a few more times, else read in (shift in)
   10758 		 * the Flash Data0, the order is least significant byte first
   10759 		 * msb to lsb
   10760 		 */
   10761 		if (error == 0) {
   10762 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10763 			if (size == 1)
   10764 				*data = (uint8_t)(flash_data & 0x000000FF);
   10765 			else if (size == 2)
   10766 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10767 			else if (size == 4)
   10768 				*data = (uint32_t)flash_data;
   10769 			break;
   10770 		} else {
   10771 			/*
   10772 			 * If we've gotten here, then things are probably
   10773 			 * completely hosed, but if the error condition is
   10774 			 * detected, it won't hurt to give it another try...
   10775 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10776 			 */
   10777 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10778 			if (hsfsts & HSFSTS_ERR) {
   10779 				/* Repeat for some time before giving up. */
   10780 				continue;
   10781 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10782 				break;
   10783 		}
   10784 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10785 
   10786 	return error;
   10787 }
   10788 
   10789 /******************************************************************************
   10790  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10791  *
   10792  * sc - pointer to wm_hw structure
   10793  * index - The index of the byte to read.
   10794  * data - Pointer to a byte to store the value read.
   10795  *****************************************************************************/
   10796 static int32_t
   10797 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10798 {
   10799 	int32_t status;
   10800 	uint32_t word = 0;
   10801 
   10802 	status = wm_read_ich8_data(sc, index, 1, &word);
   10803 	if (status == 0)
   10804 		*data = (uint8_t)word;
   10805 	else
   10806 		*data = 0;
   10807 
   10808 	return status;
   10809 }
   10810 
   10811 /******************************************************************************
   10812  * Reads a word from the NVM using the ICH8 flash access registers.
   10813  *
   10814  * sc - pointer to wm_hw structure
   10815  * index - The starting byte index of the word to read.
   10816  * data - Pointer to a word to store the value read.
   10817  *****************************************************************************/
   10818 static int32_t
   10819 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10820 {
   10821 	int32_t status;
   10822 	uint32_t word = 0;
   10823 
   10824 	status = wm_read_ich8_data(sc, index, 2, &word);
   10825 	if (status == 0)
   10826 		*data = (uint16_t)word;
   10827 	else
   10828 		*data = 0;
   10829 
   10830 	return status;
   10831 }
   10832 
   10833 /******************************************************************************
   10834  * Reads a dword from the NVM using the ICH8 flash access registers.
   10835  *
   10836  * sc - pointer to wm_hw structure
   10837  * index - The starting byte index of the word to read.
   10838  * data - Pointer to a word to store the value read.
   10839  *****************************************************************************/
   10840 static int32_t
   10841 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10842 {
   10843 	int32_t status;
   10844 
   10845 	status = wm_read_ich8_data(sc, index, 4, data);
   10846 	return status;
   10847 }
   10848 
   10849 /******************************************************************************
   10850  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10851  * register.
   10852  *
   10853  * sc - Struct containing variables accessed by shared code
   10854  * offset - offset of word in the EEPROM to read
   10855  * data - word read from the EEPROM
   10856  * words - number of words to read
   10857  *****************************************************************************/
   10858 static int
   10859 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10860 {
   10861 	int32_t  error = 0;
   10862 	uint32_t flash_bank = 0;
   10863 	uint32_t act_offset = 0;
   10864 	uint32_t bank_offset = 0;
   10865 	uint16_t word = 0;
   10866 	uint16_t i = 0;
   10867 
   10868 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10869 		device_xname(sc->sc_dev), __func__));
   10870 
   10871 	/*
   10872 	 * We need to know which is the valid flash bank.  In the event
   10873 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10874 	 * managing flash_bank.  So it cannot be trusted and needs
   10875 	 * to be updated with each read.
   10876 	 */
   10877 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10878 	if (error) {
   10879 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10880 			device_xname(sc->sc_dev)));
   10881 		flash_bank = 0;
   10882 	}
   10883 
   10884 	/*
   10885 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10886 	 * size
   10887 	 */
   10888 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10889 
   10890 	error = wm_get_swfwhw_semaphore(sc);
   10891 	if (error) {
   10892 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10893 		    __func__);
   10894 		return error;
   10895 	}
   10896 
   10897 	for (i = 0; i < words; i++) {
   10898 		/* The NVM part needs a byte offset, hence * 2 */
   10899 		act_offset = bank_offset + ((offset + i) * 2);
   10900 		error = wm_read_ich8_word(sc, act_offset, &word);
   10901 		if (error) {
   10902 			aprint_error_dev(sc->sc_dev,
   10903 			    "%s: failed to read NVM\n", __func__);
   10904 			break;
   10905 		}
   10906 		data[i] = word;
   10907 	}
   10908 
   10909 	wm_put_swfwhw_semaphore(sc);
   10910 	return error;
   10911 }
   10912 
   10913 /******************************************************************************
   10914  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10915  * register.
   10916  *
   10917  * sc - Struct containing variables accessed by shared code
   10918  * offset - offset of word in the EEPROM to read
   10919  * data - word read from the EEPROM
   10920  * words - number of words to read
   10921  *****************************************************************************/
   10922 static int
   10923 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10924 {
   10925 	int32_t  error = 0;
   10926 	uint32_t flash_bank = 0;
   10927 	uint32_t act_offset = 0;
   10928 	uint32_t bank_offset = 0;
   10929 	uint32_t dword = 0;
   10930 	uint16_t i = 0;
   10931 
   10932 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10933 		device_xname(sc->sc_dev), __func__));
   10934 
   10935 	/*
   10936 	 * We need to know which is the valid flash bank.  In the event
   10937 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10938 	 * managing flash_bank.  So it cannot be trusted and needs
   10939 	 * to be updated with each read.
   10940 	 */
   10941 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10942 	if (error) {
   10943 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10944 			device_xname(sc->sc_dev)));
   10945 		flash_bank = 0;
   10946 	}
   10947 
   10948 	/*
   10949 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10950 	 * size
   10951 	 */
   10952 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10953 
   10954 	error = wm_get_swfwhw_semaphore(sc);
   10955 	if (error) {
   10956 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10957 		    __func__);
   10958 		return error;
   10959 	}
   10960 
   10961 	for (i = 0; i < words; i++) {
   10962 		/* The NVM part needs a byte offset, hence * 2 */
   10963 		act_offset = bank_offset + ((offset + i) * 2);
   10964 		/* but we must read dword aligned, so mask ... */
   10965 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10966 		if (error) {
   10967 			aprint_error_dev(sc->sc_dev,
   10968 			    "%s: failed to read NVM\n", __func__);
   10969 			break;
   10970 		}
   10971 		/* ... and pick out low or high word */
   10972 		if ((act_offset & 0x2) == 0)
   10973 			data[i] = (uint16_t)(dword & 0xFFFF);
   10974 		else
   10975 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10976 	}
   10977 
   10978 	wm_put_swfwhw_semaphore(sc);
   10979 	return error;
   10980 }
   10981 
   10982 /* iNVM */
   10983 
   10984 static int
   10985 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10986 {
   10987 	int32_t  rv = 0;
   10988 	uint32_t invm_dword;
   10989 	uint16_t i;
   10990 	uint8_t record_type, word_address;
   10991 
   10992 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10993 		device_xname(sc->sc_dev), __func__));
   10994 
   10995 	for (i = 0; i < INVM_SIZE; i++) {
   10996 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10997 		/* Get record type */
   10998 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10999 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11000 			break;
   11001 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11002 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11003 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11004 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11005 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11006 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11007 			if (word_address == address) {
   11008 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11009 				rv = 0;
   11010 				break;
   11011 			}
   11012 		}
   11013 	}
   11014 
   11015 	return rv;
   11016 }
   11017 
   11018 static int
   11019 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11020 {
   11021 	int rv = 0;
   11022 	int i;
   11023 
   11024 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11025 		device_xname(sc->sc_dev), __func__));
   11026 
   11027 	for (i = 0; i < words; i++) {
   11028 		switch (offset + i) {
   11029 		case NVM_OFF_MACADDR:
   11030 		case NVM_OFF_MACADDR1:
   11031 		case NVM_OFF_MACADDR2:
   11032 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11033 			if (rv != 0) {
   11034 				data[i] = 0xffff;
   11035 				rv = -1;
   11036 			}
   11037 			break;
   11038 		case NVM_OFF_CFG2:
   11039 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11040 			if (rv != 0) {
   11041 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11042 				rv = 0;
   11043 			}
   11044 			break;
   11045 		case NVM_OFF_CFG4:
   11046 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11047 			if (rv != 0) {
   11048 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11049 				rv = 0;
   11050 			}
   11051 			break;
   11052 		case NVM_OFF_LED_1_CFG:
   11053 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11054 			if (rv != 0) {
   11055 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11056 				rv = 0;
   11057 			}
   11058 			break;
   11059 		case NVM_OFF_LED_0_2_CFG:
   11060 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11061 			if (rv != 0) {
   11062 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11063 				rv = 0;
   11064 			}
   11065 			break;
   11066 		case NVM_OFF_ID_LED_SETTINGS:
   11067 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11068 			if (rv != 0) {
   11069 				*data = ID_LED_RESERVED_FFFF;
   11070 				rv = 0;
   11071 			}
   11072 			break;
   11073 		default:
   11074 			DPRINTF(WM_DEBUG_NVM,
   11075 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11076 			*data = NVM_RESERVED_WORD;
   11077 			break;
   11078 		}
   11079 	}
   11080 
   11081 	return rv;
   11082 }
   11083 
   11084 /* Lock, detecting NVM type, validate checksum, version and read */
   11085 
   11086 /*
   11087  * wm_nvm_acquire:
   11088  *
   11089  *	Perform the EEPROM handshake required on some chips.
   11090  */
   11091 static int
   11092 wm_nvm_acquire(struct wm_softc *sc)
   11093 {
   11094 	uint32_t reg;
   11095 	int x;
   11096 	int ret = 0;
   11097 
   11098 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11099 		device_xname(sc->sc_dev), __func__));
   11100 
   11101 	if (sc->sc_type >= WM_T_ICH8) {
   11102 		ret = wm_get_nvm_ich8lan(sc);
   11103 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11104 		ret = wm_get_swfwhw_semaphore(sc);
   11105 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11106 		/* This will also do wm_get_swsm_semaphore() if needed */
   11107 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11108 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11109 		ret = wm_get_swsm_semaphore(sc);
   11110 	}
   11111 
   11112 	if (ret) {
   11113 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11114 			__func__);
   11115 		return 1;
   11116 	}
   11117 
   11118 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11119 		reg = CSR_READ(sc, WMREG_EECD);
   11120 
   11121 		/* Request EEPROM access. */
   11122 		reg |= EECD_EE_REQ;
   11123 		CSR_WRITE(sc, WMREG_EECD, reg);
   11124 
   11125 		/* ..and wait for it to be granted. */
   11126 		for (x = 0; x < 1000; x++) {
   11127 			reg = CSR_READ(sc, WMREG_EECD);
   11128 			if (reg & EECD_EE_GNT)
   11129 				break;
   11130 			delay(5);
   11131 		}
   11132 		if ((reg & EECD_EE_GNT) == 0) {
   11133 			aprint_error_dev(sc->sc_dev,
   11134 			    "could not acquire EEPROM GNT\n");
   11135 			reg &= ~EECD_EE_REQ;
   11136 			CSR_WRITE(sc, WMREG_EECD, reg);
   11137 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11138 				wm_put_swfwhw_semaphore(sc);
   11139 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11140 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11141 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11142 				wm_put_swsm_semaphore(sc);
   11143 			return 1;
   11144 		}
   11145 	}
   11146 
   11147 	return 0;
   11148 }
   11149 
   11150 /*
   11151  * wm_nvm_release:
   11152  *
   11153  *	Release the EEPROM mutex.
   11154  */
   11155 static void
   11156 wm_nvm_release(struct wm_softc *sc)
   11157 {
   11158 	uint32_t reg;
   11159 
   11160 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11161 		device_xname(sc->sc_dev), __func__));
   11162 
   11163 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11164 		reg = CSR_READ(sc, WMREG_EECD);
   11165 		reg &= ~EECD_EE_REQ;
   11166 		CSR_WRITE(sc, WMREG_EECD, reg);
   11167 	}
   11168 
   11169 	if (sc->sc_type >= WM_T_ICH8) {
   11170 		wm_put_nvm_ich8lan(sc);
   11171 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11172 		wm_put_swfwhw_semaphore(sc);
   11173 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11174 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11175 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11176 		wm_put_swsm_semaphore(sc);
   11177 }
   11178 
   11179 static int
   11180 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11181 {
   11182 	uint32_t eecd = 0;
   11183 
   11184 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11185 	    || sc->sc_type == WM_T_82583) {
   11186 		eecd = CSR_READ(sc, WMREG_EECD);
   11187 
   11188 		/* Isolate bits 15 & 16 */
   11189 		eecd = ((eecd >> 15) & 0x03);
   11190 
   11191 		/* If both bits are set, device is Flash type */
   11192 		if (eecd == 0x03)
   11193 			return 0;
   11194 	}
   11195 	return 1;
   11196 }
   11197 
   11198 static int
   11199 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11200 {
   11201 	uint32_t eec;
   11202 
   11203 	eec = CSR_READ(sc, WMREG_EEC);
   11204 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11205 		return 1;
   11206 
   11207 	return 0;
   11208 }
   11209 
   11210 /*
   11211  * wm_nvm_validate_checksum
   11212  *
   11213  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11214  */
   11215 static int
   11216 wm_nvm_validate_checksum(struct wm_softc *sc)
   11217 {
   11218 	uint16_t checksum;
   11219 	uint16_t eeprom_data;
   11220 #ifdef WM_DEBUG
   11221 	uint16_t csum_wordaddr, valid_checksum;
   11222 #endif
   11223 	int i;
   11224 
   11225 	checksum = 0;
   11226 
   11227 	/* Don't check for I211 */
   11228 	if (sc->sc_type == WM_T_I211)
   11229 		return 0;
   11230 
   11231 #ifdef WM_DEBUG
   11232 	if (sc->sc_type == WM_T_PCH_LPT) {
   11233 		csum_wordaddr = NVM_OFF_COMPAT;
   11234 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11235 	} else {
   11236 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11237 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11238 	}
   11239 
   11240 	/* Dump EEPROM image for debug */
   11241 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11242 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11243 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11244 		/* XXX PCH_SPT? */
   11245 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11246 		if ((eeprom_data & valid_checksum) == 0) {
   11247 			DPRINTF(WM_DEBUG_NVM,
   11248 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11249 				device_xname(sc->sc_dev), eeprom_data,
   11250 				    valid_checksum));
   11251 		}
   11252 	}
   11253 
   11254 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11255 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11256 		for (i = 0; i < NVM_SIZE; i++) {
   11257 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11258 				printf("XXXX ");
   11259 			else
   11260 				printf("%04hx ", eeprom_data);
   11261 			if (i % 8 == 7)
   11262 				printf("\n");
   11263 		}
   11264 	}
   11265 
   11266 #endif /* WM_DEBUG */
   11267 
   11268 	for (i = 0; i < NVM_SIZE; i++) {
   11269 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11270 			return 1;
   11271 		checksum += eeprom_data;
   11272 	}
   11273 
   11274 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11275 #ifdef WM_DEBUG
   11276 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11277 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11278 #endif
   11279 	}
   11280 
   11281 	return 0;
   11282 }
   11283 
   11284 static void
   11285 wm_nvm_version_invm(struct wm_softc *sc)
   11286 {
   11287 	uint32_t dword;
   11288 
   11289 	/*
   11290 	 * Linux's code to decode version is very strange, so we don't
   11291 	 * obey that algorithm and just use word 61 as the document.
   11292 	 * Perhaps it's not perfect though...
   11293 	 *
   11294 	 * Example:
   11295 	 *
   11296 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11297 	 */
   11298 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11299 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11300 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11301 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11302 }
   11303 
   11304 static void
   11305 wm_nvm_version(struct wm_softc *sc)
   11306 {
   11307 	uint16_t major, minor, build, patch;
   11308 	uint16_t uid0, uid1;
   11309 	uint16_t nvm_data;
   11310 	uint16_t off;
   11311 	bool check_version = false;
   11312 	bool check_optionrom = false;
   11313 	bool have_build = false;
   11314 
   11315 	/*
   11316 	 * Version format:
   11317 	 *
   11318 	 * XYYZ
   11319 	 * X0YZ
   11320 	 * X0YY
   11321 	 *
   11322 	 * Example:
   11323 	 *
   11324 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11325 	 *	82571	0x50a6	5.10.6?
   11326 	 *	82572	0x506a	5.6.10?
   11327 	 *	82572EI	0x5069	5.6.9?
   11328 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11329 	 *		0x2013	2.1.3?
   11330 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11331 	 */
   11332 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11333 	switch (sc->sc_type) {
   11334 	case WM_T_82571:
   11335 	case WM_T_82572:
   11336 	case WM_T_82574:
   11337 	case WM_T_82583:
   11338 		check_version = true;
   11339 		check_optionrom = true;
   11340 		have_build = true;
   11341 		break;
   11342 	case WM_T_82575:
   11343 	case WM_T_82576:
   11344 	case WM_T_82580:
   11345 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11346 			check_version = true;
   11347 		break;
   11348 	case WM_T_I211:
   11349 		wm_nvm_version_invm(sc);
   11350 		goto printver;
   11351 	case WM_T_I210:
   11352 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11353 			wm_nvm_version_invm(sc);
   11354 			goto printver;
   11355 		}
   11356 		/* FALLTHROUGH */
   11357 	case WM_T_I350:
   11358 	case WM_T_I354:
   11359 		check_version = true;
   11360 		check_optionrom = true;
   11361 		break;
   11362 	default:
   11363 		return;
   11364 	}
   11365 	if (check_version) {
   11366 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11367 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11368 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11369 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11370 			build = nvm_data & NVM_BUILD_MASK;
   11371 			have_build = true;
   11372 		} else
   11373 			minor = nvm_data & 0x00ff;
   11374 
   11375 		/* Decimal */
   11376 		minor = (minor / 16) * 10 + (minor % 16);
   11377 		sc->sc_nvm_ver_major = major;
   11378 		sc->sc_nvm_ver_minor = minor;
   11379 
   11380 printver:
   11381 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11382 		    sc->sc_nvm_ver_minor);
   11383 		if (have_build) {
   11384 			sc->sc_nvm_ver_build = build;
   11385 			aprint_verbose(".%d", build);
   11386 		}
   11387 	}
   11388 	if (check_optionrom) {
   11389 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11390 		/* Option ROM Version */
   11391 		if ((off != 0x0000) && (off != 0xffff)) {
   11392 			off += NVM_COMBO_VER_OFF;
   11393 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11394 			wm_nvm_read(sc, off, 1, &uid0);
   11395 			if ((uid0 != 0) && (uid0 != 0xffff)
   11396 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11397 				/* 16bits */
   11398 				major = uid0 >> 8;
   11399 				build = (uid0 << 8) | (uid1 >> 8);
   11400 				patch = uid1 & 0x00ff;
   11401 				aprint_verbose(", option ROM Version %d.%d.%d",
   11402 				    major, build, patch);
   11403 			}
   11404 		}
   11405 	}
   11406 
   11407 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11408 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11409 }
   11410 
   11411 /*
   11412  * wm_nvm_read:
   11413  *
   11414  *	Read data from the serial EEPROM.
   11415  */
   11416 static int
   11417 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11418 {
   11419 	int rv;
   11420 
   11421 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11422 		device_xname(sc->sc_dev), __func__));
   11423 
   11424 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11425 		return 1;
   11426 
   11427 	if (wm_nvm_acquire(sc))
   11428 		return 1;
   11429 
   11430 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11431 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11432 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11433 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11434 	else if (sc->sc_type == WM_T_PCH_SPT)
   11435 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11436 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11437 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11438 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11439 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11440 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11441 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11442 	else
   11443 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11444 
   11445 	wm_nvm_release(sc);
   11446 	return rv;
   11447 }
   11448 
   11449 /*
   11450  * Hardware semaphores.
   11451  * Very complexed...
   11452  */
   11453 
   11454 static int
   11455 wm_get_null(struct wm_softc *sc)
   11456 {
   11457 
   11458 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11459 		device_xname(sc->sc_dev), __func__));
   11460 	return 0;
   11461 }
   11462 
   11463 static void
   11464 wm_put_null(struct wm_softc *sc)
   11465 {
   11466 
   11467 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11468 		device_xname(sc->sc_dev), __func__));
   11469 	return;
   11470 }
   11471 
   11472 /*
   11473  * Get hardware semaphore.
   11474  * Same as e1000_get_hw_semaphore_generic()
   11475  */
   11476 static int
   11477 wm_get_swsm_semaphore(struct wm_softc *sc)
   11478 {
   11479 	int32_t timeout;
   11480 	uint32_t swsm;
   11481 
   11482 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11483 		device_xname(sc->sc_dev), __func__));
   11484 	KASSERT(sc->sc_nvm_wordsize > 0);
   11485 
   11486 	/* Get the SW semaphore. */
   11487 	timeout = sc->sc_nvm_wordsize + 1;
   11488 	while (timeout) {
   11489 		swsm = CSR_READ(sc, WMREG_SWSM);
   11490 
   11491 		if ((swsm & SWSM_SMBI) == 0)
   11492 			break;
   11493 
   11494 		delay(50);
   11495 		timeout--;
   11496 	}
   11497 
   11498 	if (timeout == 0) {
   11499 		aprint_error_dev(sc->sc_dev,
   11500 		    "could not acquire SWSM SMBI\n");
   11501 		return 1;
   11502 	}
   11503 
   11504 	/* Get the FW semaphore. */
   11505 	timeout = sc->sc_nvm_wordsize + 1;
   11506 	while (timeout) {
   11507 		swsm = CSR_READ(sc, WMREG_SWSM);
   11508 		swsm |= SWSM_SWESMBI;
   11509 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11510 		/* If we managed to set the bit we got the semaphore. */
   11511 		swsm = CSR_READ(sc, WMREG_SWSM);
   11512 		if (swsm & SWSM_SWESMBI)
   11513 			break;
   11514 
   11515 		delay(50);
   11516 		timeout--;
   11517 	}
   11518 
   11519 	if (timeout == 0) {
   11520 		aprint_error_dev(sc->sc_dev,
   11521 		    "could not acquire SWSM SWESMBI\n");
   11522 		/* Release semaphores */
   11523 		wm_put_swsm_semaphore(sc);
   11524 		return 1;
   11525 	}
   11526 	return 0;
   11527 }
   11528 
   11529 /*
   11530  * Put hardware semaphore.
   11531  * Same as e1000_put_hw_semaphore_generic()
   11532  */
   11533 static void
   11534 wm_put_swsm_semaphore(struct wm_softc *sc)
   11535 {
   11536 	uint32_t swsm;
   11537 
   11538 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11539 		device_xname(sc->sc_dev), __func__));
   11540 
   11541 	swsm = CSR_READ(sc, WMREG_SWSM);
   11542 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11543 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11544 }
   11545 
   11546 /*
   11547  * Get SW/FW semaphore.
   11548  * Same as e1000_acquire_swfw_sync_82575().
   11549  */
   11550 static int
   11551 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11552 {
   11553 	uint32_t swfw_sync;
   11554 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11555 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11556 	int timeout = 200;
   11557 
   11558 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11559 		device_xname(sc->sc_dev), __func__));
   11560 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11561 
   11562 	for (timeout = 0; timeout < 200; timeout++) {
   11563 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11564 			if (wm_get_swsm_semaphore(sc)) {
   11565 				aprint_error_dev(sc->sc_dev,
   11566 				    "%s: failed to get semaphore\n",
   11567 				    __func__);
   11568 				return 1;
   11569 			}
   11570 		}
   11571 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11572 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11573 			swfw_sync |= swmask;
   11574 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11575 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11576 				wm_put_swsm_semaphore(sc);
   11577 			return 0;
   11578 		}
   11579 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11580 			wm_put_swsm_semaphore(sc);
   11581 		delay(5000);
   11582 	}
   11583 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11584 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11585 	return 1;
   11586 }
   11587 
   11588 static void
   11589 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11590 {
   11591 	uint32_t swfw_sync;
   11592 
   11593 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11594 		device_xname(sc->sc_dev), __func__));
   11595 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11596 
   11597 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11598 		while (wm_get_swsm_semaphore(sc) != 0)
   11599 			continue;
   11600 	}
   11601 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11602 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11603 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11604 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11605 		wm_put_swsm_semaphore(sc);
   11606 }
   11607 
   11608 static int
   11609 wm_get_phy_82575(struct wm_softc *sc)
   11610 {
   11611 
   11612 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11613 		device_xname(sc->sc_dev), __func__));
   11614 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11615 }
   11616 
   11617 static void
   11618 wm_put_phy_82575(struct wm_softc *sc)
   11619 {
   11620 
   11621 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11622 		device_xname(sc->sc_dev), __func__));
   11623 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11624 }
   11625 
   11626 static int
   11627 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11628 {
   11629 	uint32_t ext_ctrl;
   11630 	int timeout = 200;
   11631 
   11632 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11633 		device_xname(sc->sc_dev), __func__));
   11634 
   11635 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11636 	for (timeout = 0; timeout < 200; timeout++) {
   11637 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11638 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11639 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11640 
   11641 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11642 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11643 			return 0;
   11644 		delay(5000);
   11645 	}
   11646 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11647 	    device_xname(sc->sc_dev), ext_ctrl);
   11648 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11649 	return 1;
   11650 }
   11651 
   11652 static void
   11653 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11654 {
   11655 	uint32_t ext_ctrl;
   11656 
   11657 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11658 		device_xname(sc->sc_dev), __func__));
   11659 
   11660 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11661 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11662 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11663 
   11664 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11665 }
   11666 
   11667 static int
   11668 wm_get_swflag_ich8lan(struct wm_softc *sc)
   11669 {
   11670 	uint32_t ext_ctrl;
   11671 	int timeout;
   11672 
   11673 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11674 		device_xname(sc->sc_dev), __func__));
   11675 	mutex_enter(sc->sc_ich_phymtx);
   11676 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   11677 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11678 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   11679 			break;
   11680 		delay(1000);
   11681 	}
   11682 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   11683 		printf("%s: SW has already locked the resource\n",
   11684 		    device_xname(sc->sc_dev));
   11685 		goto out;
   11686 	}
   11687 
   11688 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11689 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11690 	for (timeout = 0; timeout < 1000; timeout++) {
   11691 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11692 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11693 			break;
   11694 		delay(1000);
   11695 	}
   11696 	if (timeout >= 1000) {
   11697 		printf("%s: failed to acquire semaphore\n",
   11698 		    device_xname(sc->sc_dev));
   11699 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11700 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11701 		goto out;
   11702 	}
   11703 	return 0;
   11704 
   11705 out:
   11706 	mutex_exit(sc->sc_ich_phymtx);
   11707 	return 1;
   11708 }
   11709 
   11710 static void
   11711 wm_put_swflag_ich8lan(struct wm_softc *sc)
   11712 {
   11713 	uint32_t ext_ctrl;
   11714 
   11715 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11716 		device_xname(sc->sc_dev), __func__));
   11717 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11718 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   11719 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11720 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11721 	} else {
   11722 		printf("%s: Semaphore unexpectedly released\n",
   11723 		    device_xname(sc->sc_dev));
   11724 	}
   11725 
   11726 	mutex_exit(sc->sc_ich_phymtx);
   11727 }
   11728 
   11729 static int
   11730 wm_get_nvm_ich8lan(struct wm_softc *sc)
   11731 {
   11732 
   11733 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11734 		device_xname(sc->sc_dev), __func__));
   11735 	mutex_enter(sc->sc_ich_nvmmtx);
   11736 
   11737 	return 0;
   11738 }
   11739 
   11740 static void
   11741 wm_put_nvm_ich8lan(struct wm_softc *sc)
   11742 {
   11743 
   11744 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11745 		device_xname(sc->sc_dev), __func__));
   11746 	mutex_exit(sc->sc_ich_nvmmtx);
   11747 }
   11748 
   11749 static int
   11750 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11751 {
   11752 	int i = 0;
   11753 	uint32_t reg;
   11754 
   11755 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11756 		device_xname(sc->sc_dev), __func__));
   11757 
   11758 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11759 	do {
   11760 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11761 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11762 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11763 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11764 			break;
   11765 		delay(2*1000);
   11766 		i++;
   11767 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11768 
   11769 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11770 		wm_put_hw_semaphore_82573(sc);
   11771 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11772 		    device_xname(sc->sc_dev));
   11773 		return -1;
   11774 	}
   11775 
   11776 	return 0;
   11777 }
   11778 
   11779 static void
   11780 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11781 {
   11782 	uint32_t reg;
   11783 
   11784 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11785 		device_xname(sc->sc_dev), __func__));
   11786 
   11787 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11788 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11789 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11790 }
   11791 
   11792 /*
   11793  * Management mode and power management related subroutines.
   11794  * BMC, AMT, suspend/resume and EEE.
   11795  */
   11796 
   11797 #ifdef WM_WOL
   11798 static int
   11799 wm_check_mng_mode(struct wm_softc *sc)
   11800 {
   11801 	int rv;
   11802 
   11803 	switch (sc->sc_type) {
   11804 	case WM_T_ICH8:
   11805 	case WM_T_ICH9:
   11806 	case WM_T_ICH10:
   11807 	case WM_T_PCH:
   11808 	case WM_T_PCH2:
   11809 	case WM_T_PCH_LPT:
   11810 	case WM_T_PCH_SPT:
   11811 		rv = wm_check_mng_mode_ich8lan(sc);
   11812 		break;
   11813 	case WM_T_82574:
   11814 	case WM_T_82583:
   11815 		rv = wm_check_mng_mode_82574(sc);
   11816 		break;
   11817 	case WM_T_82571:
   11818 	case WM_T_82572:
   11819 	case WM_T_82573:
   11820 	case WM_T_80003:
   11821 		rv = wm_check_mng_mode_generic(sc);
   11822 		break;
   11823 	default:
   11824 		/* noting to do */
   11825 		rv = 0;
   11826 		break;
   11827 	}
   11828 
   11829 	return rv;
   11830 }
   11831 
   11832 static int
   11833 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11834 {
   11835 	uint32_t fwsm;
   11836 
   11837 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11838 
   11839 	if (((fwsm & FWSM_FW_VALID) != 0)
   11840 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11841 		return 1;
   11842 
   11843 	return 0;
   11844 }
   11845 
   11846 static int
   11847 wm_check_mng_mode_82574(struct wm_softc *sc)
   11848 {
   11849 	uint16_t data;
   11850 
   11851 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11852 
   11853 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11854 		return 1;
   11855 
   11856 	return 0;
   11857 }
   11858 
   11859 static int
   11860 wm_check_mng_mode_generic(struct wm_softc *sc)
   11861 {
   11862 	uint32_t fwsm;
   11863 
   11864 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11865 
   11866 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11867 		return 1;
   11868 
   11869 	return 0;
   11870 }
   11871 #endif /* WM_WOL */
   11872 
   11873 static int
   11874 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11875 {
   11876 	uint32_t manc, fwsm, factps;
   11877 
   11878 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11879 		return 0;
   11880 
   11881 	manc = CSR_READ(sc, WMREG_MANC);
   11882 
   11883 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11884 		device_xname(sc->sc_dev), manc));
   11885 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11886 		return 0;
   11887 
   11888 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11889 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11890 		factps = CSR_READ(sc, WMREG_FACTPS);
   11891 		if (((factps & FACTPS_MNGCG) == 0)
   11892 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11893 			return 1;
   11894 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11895 		uint16_t data;
   11896 
   11897 		factps = CSR_READ(sc, WMREG_FACTPS);
   11898 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11899 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11900 			device_xname(sc->sc_dev), factps, data));
   11901 		if (((factps & FACTPS_MNGCG) == 0)
   11902 		    && ((data & NVM_CFG2_MNGM_MASK)
   11903 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11904 			return 1;
   11905 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11906 	    && ((manc & MANC_ASF_EN) == 0))
   11907 		return 1;
   11908 
   11909 	return 0;
   11910 }
   11911 
   11912 static bool
   11913 wm_phy_resetisblocked(struct wm_softc *sc)
   11914 {
   11915 	bool blocked = false;
   11916 	uint32_t reg;
   11917 	int i = 0;
   11918 
   11919 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11920 		device_xname(sc->sc_dev), __func__));
   11921 
   11922 	switch (sc->sc_type) {
   11923 	case WM_T_ICH8:
   11924 	case WM_T_ICH9:
   11925 	case WM_T_ICH10:
   11926 	case WM_T_PCH:
   11927 	case WM_T_PCH2:
   11928 	case WM_T_PCH_LPT:
   11929 	case WM_T_PCH_SPT:
   11930 		do {
   11931 			reg = CSR_READ(sc, WMREG_FWSM);
   11932 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11933 				blocked = true;
   11934 				delay(10*1000);
   11935 				continue;
   11936 			}
   11937 			blocked = false;
   11938 		} while (blocked && (i++ < 30));
   11939 		return blocked;
   11940 		break;
   11941 	case WM_T_82571:
   11942 	case WM_T_82572:
   11943 	case WM_T_82573:
   11944 	case WM_T_82574:
   11945 	case WM_T_82583:
   11946 	case WM_T_80003:
   11947 		reg = CSR_READ(sc, WMREG_MANC);
   11948 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11949 			return true;
   11950 		else
   11951 			return false;
   11952 		break;
   11953 	default:
   11954 		/* no problem */
   11955 		break;
   11956 	}
   11957 
   11958 	return false;
   11959 }
   11960 
   11961 static void
   11962 wm_get_hw_control(struct wm_softc *sc)
   11963 {
   11964 	uint32_t reg;
   11965 
   11966 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11967 		device_xname(sc->sc_dev), __func__));
   11968 
   11969 	if (sc->sc_type == WM_T_82573) {
   11970 		reg = CSR_READ(sc, WMREG_SWSM);
   11971 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11972 	} else if (sc->sc_type >= WM_T_82571) {
   11973 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11974 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11975 	}
   11976 }
   11977 
   11978 static void
   11979 wm_release_hw_control(struct wm_softc *sc)
   11980 {
   11981 	uint32_t reg;
   11982 
   11983 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11984 		device_xname(sc->sc_dev), __func__));
   11985 
   11986 	if (sc->sc_type == WM_T_82573) {
   11987 		reg = CSR_READ(sc, WMREG_SWSM);
   11988 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11989 	} else if (sc->sc_type >= WM_T_82571) {
   11990 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11991 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11992 	}
   11993 }
   11994 
   11995 static void
   11996 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11997 {
   11998 	uint32_t reg;
   11999 
   12000 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12001 		device_xname(sc->sc_dev), __func__));
   12002 
   12003 	if (sc->sc_type < WM_T_PCH2)
   12004 		return;
   12005 
   12006 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12007 
   12008 	if (gate)
   12009 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12010 	else
   12011 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12012 
   12013 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12014 }
   12015 
   12016 static void
   12017 wm_smbustopci(struct wm_softc *sc)
   12018 {
   12019 	uint32_t fwsm, reg;
   12020 	int rv = 0;
   12021 
   12022 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12023 		device_xname(sc->sc_dev), __func__));
   12024 
   12025 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12026 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12027 
   12028 	/* Disable ULP */
   12029 	wm_ulp_disable(sc);
   12030 
   12031 	/* Acquire PHY semaphore */
   12032 	sc->phy.acquire(sc);
   12033 
   12034 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12035 	switch (sc->sc_type) {
   12036 	case WM_T_PCH_LPT:
   12037 	case WM_T_PCH_SPT:
   12038 		if (wm_phy_is_accessible_pchlan(sc))
   12039 			break;
   12040 
   12041 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12042 		reg |= CTRL_EXT_FORCE_SMBUS;
   12043 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12044 #if 0
   12045 		/* XXX Isn't this required??? */
   12046 		CSR_WRITE_FLUSH(sc);
   12047 #endif
   12048 		delay(50 * 1000);
   12049 		/* FALLTHROUGH */
   12050 	case WM_T_PCH2:
   12051 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12052 			break;
   12053 		/* FALLTHROUGH */
   12054 	case WM_T_PCH:
   12055 		if (sc->sc_type == WM_T_PCH)
   12056 			if ((fwsm & FWSM_FW_VALID) != 0)
   12057 				break;
   12058 
   12059 		if (wm_phy_resetisblocked(sc) == true) {
   12060 			printf("XXX reset is blocked(3)\n");
   12061 			break;
   12062 		}
   12063 
   12064 		wm_toggle_lanphypc_pch_lpt(sc);
   12065 
   12066 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12067 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12068 				break;
   12069 
   12070 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12071 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12072 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12073 
   12074 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12075 				break;
   12076 			rv = -1;
   12077 		}
   12078 		break;
   12079 	default:
   12080 		break;
   12081 	}
   12082 
   12083 	/* Release semaphore */
   12084 	sc->phy.release(sc);
   12085 
   12086 	if (rv == 0) {
   12087 		if (wm_phy_resetisblocked(sc)) {
   12088 			printf("XXX reset is blocked(4)\n");
   12089 			goto out;
   12090 		}
   12091 		wm_reset_phy(sc);
   12092 		if (wm_phy_resetisblocked(sc))
   12093 			printf("XXX reset is blocked(4)\n");
   12094 	}
   12095 
   12096 out:
   12097 	/*
   12098 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12099 	 */
   12100 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12101 		delay(10*1000);
   12102 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12103 	}
   12104 }
   12105 
   12106 static void
   12107 wm_init_manageability(struct wm_softc *sc)
   12108 {
   12109 
   12110 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12111 		device_xname(sc->sc_dev), __func__));
   12112 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12113 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12114 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12115 
   12116 		/* Disable hardware interception of ARP */
   12117 		manc &= ~MANC_ARP_EN;
   12118 
   12119 		/* Enable receiving management packets to the host */
   12120 		if (sc->sc_type >= WM_T_82571) {
   12121 			manc |= MANC_EN_MNG2HOST;
   12122 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12123 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12124 		}
   12125 
   12126 		CSR_WRITE(sc, WMREG_MANC, manc);
   12127 	}
   12128 }
   12129 
   12130 static void
   12131 wm_release_manageability(struct wm_softc *sc)
   12132 {
   12133 
   12134 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12135 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12136 
   12137 		manc |= MANC_ARP_EN;
   12138 		if (sc->sc_type >= WM_T_82571)
   12139 			manc &= ~MANC_EN_MNG2HOST;
   12140 
   12141 		CSR_WRITE(sc, WMREG_MANC, manc);
   12142 	}
   12143 }
   12144 
   12145 static void
   12146 wm_get_wakeup(struct wm_softc *sc)
   12147 {
   12148 
   12149 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12150 	switch (sc->sc_type) {
   12151 	case WM_T_82573:
   12152 	case WM_T_82583:
   12153 		sc->sc_flags |= WM_F_HAS_AMT;
   12154 		/* FALLTHROUGH */
   12155 	case WM_T_80003:
   12156 	case WM_T_82575:
   12157 	case WM_T_82576:
   12158 	case WM_T_82580:
   12159 	case WM_T_I350:
   12160 	case WM_T_I354:
   12161 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12162 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12163 		/* FALLTHROUGH */
   12164 	case WM_T_82541:
   12165 	case WM_T_82541_2:
   12166 	case WM_T_82547:
   12167 	case WM_T_82547_2:
   12168 	case WM_T_82571:
   12169 	case WM_T_82572:
   12170 	case WM_T_82574:
   12171 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12172 		break;
   12173 	case WM_T_ICH8:
   12174 	case WM_T_ICH9:
   12175 	case WM_T_ICH10:
   12176 	case WM_T_PCH:
   12177 	case WM_T_PCH2:
   12178 	case WM_T_PCH_LPT:
   12179 	case WM_T_PCH_SPT:
   12180 		sc->sc_flags |= WM_F_HAS_AMT;
   12181 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12182 		break;
   12183 	default:
   12184 		break;
   12185 	}
   12186 
   12187 	/* 1: HAS_MANAGE */
   12188 	if (wm_enable_mng_pass_thru(sc) != 0)
   12189 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12190 
   12191 #ifdef WM_DEBUG
   12192 	printf("\n");
   12193 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12194 		printf("HAS_AMT,");
   12195 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12196 		printf("ARC_SUBSYS_VALID,");
   12197 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12198 		printf("ASF_FIRMWARE_PRES,");
   12199 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12200 		printf("HAS_MANAGE,");
   12201 	printf("\n");
   12202 #endif
   12203 	/*
   12204 	 * Note that the WOL flags is set after the resetting of the eeprom
   12205 	 * stuff
   12206 	 */
   12207 }
   12208 
   12209 /*
   12210  * Unconfigure Ultra Low Power mode.
   12211  * Only for I217 and newer (see below).
   12212  */
   12213 static void
   12214 wm_ulp_disable(struct wm_softc *sc)
   12215 {
   12216 	uint32_t reg;
   12217 	int i = 0;
   12218 
   12219 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12220 		device_xname(sc->sc_dev), __func__));
   12221 	/* Exclude old devices */
   12222 	if ((sc->sc_type < WM_T_PCH_LPT)
   12223 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12224 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12225 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   12226 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   12227 		return;
   12228 
   12229 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   12230 		/* Request ME un-configure ULP mode in the PHY */
   12231 		reg = CSR_READ(sc, WMREG_H2ME);
   12232 		reg &= ~H2ME_ULP;
   12233 		reg |= H2ME_ENFORCE_SETTINGS;
   12234 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12235 
   12236 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   12237 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   12238 			if (i++ == 30) {
   12239 				printf("%s timed out\n", __func__);
   12240 				return;
   12241 			}
   12242 			delay(10 * 1000);
   12243 		}
   12244 		reg = CSR_READ(sc, WMREG_H2ME);
   12245 		reg &= ~H2ME_ENFORCE_SETTINGS;
   12246 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12247 
   12248 		return;
   12249 	}
   12250 
   12251 	/* Acquire semaphore */
   12252 	sc->phy.acquire(sc);
   12253 
   12254 	/* Toggle LANPHYPC */
   12255 	wm_toggle_lanphypc_pch_lpt(sc);
   12256 
   12257 	/* Unforce SMBus mode in PHY */
   12258 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12259 	if (reg == 0x0000 || reg == 0xffff) {
   12260 		uint32_t reg2;
   12261 
   12262 		printf("%s: Force SMBus first.\n", __func__);
   12263 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   12264 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   12265 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   12266 		delay(50 * 1000);
   12267 
   12268 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12269 	}
   12270 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12271 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   12272 
   12273 	/* Unforce SMBus mode in MAC */
   12274 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12275 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   12276 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12277 
   12278 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   12279 	reg |= HV_PM_CTRL_K1_ENA;
   12280 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   12281 
   12282 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   12283 	reg &= ~(I218_ULP_CONFIG1_IND
   12284 	    | I218_ULP_CONFIG1_STICKY_ULP
   12285 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   12286 	    | I218_ULP_CONFIG1_WOL_HOST
   12287 	    | I218_ULP_CONFIG1_INBAND_EXIT
   12288 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   12289 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   12290 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   12291 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12292 	reg |= I218_ULP_CONFIG1_START;
   12293 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12294 
   12295 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   12296 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   12297 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   12298 
   12299 	/* Release semaphore */
   12300 	sc->phy.release(sc);
   12301 	wm_gmii_reset(sc);
   12302 	delay(50 * 1000);
   12303 }
   12304 
   12305 /* WOL in the newer chipset interfaces (pchlan) */
   12306 static void
   12307 wm_enable_phy_wakeup(struct wm_softc *sc)
   12308 {
   12309 #if 0
   12310 	uint16_t preg;
   12311 
   12312 	/* Copy MAC RARs to PHY RARs */
   12313 
   12314 	/* Copy MAC MTA to PHY MTA */
   12315 
   12316 	/* Configure PHY Rx Control register */
   12317 
   12318 	/* Enable PHY wakeup in MAC register */
   12319 
   12320 	/* Configure and enable PHY wakeup in PHY registers */
   12321 
   12322 	/* Activate PHY wakeup */
   12323 
   12324 	/* XXX */
   12325 #endif
   12326 }
   12327 
   12328 /* Power down workaround on D3 */
   12329 static void
   12330 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12331 {
   12332 	uint32_t reg;
   12333 	int i;
   12334 
   12335 	for (i = 0; i < 2; i++) {
   12336 		/* Disable link */
   12337 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12338 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12339 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12340 
   12341 		/*
   12342 		 * Call gig speed drop workaround on Gig disable before
   12343 		 * accessing any PHY registers
   12344 		 */
   12345 		if (sc->sc_type == WM_T_ICH8)
   12346 			wm_gig_downshift_workaround_ich8lan(sc);
   12347 
   12348 		/* Write VR power-down enable */
   12349 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12350 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12351 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12352 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12353 
   12354 		/* Read it back and test */
   12355 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12356 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12357 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12358 			break;
   12359 
   12360 		/* Issue PHY reset and repeat at most one more time */
   12361 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12362 	}
   12363 }
   12364 
   12365 static void
   12366 wm_enable_wakeup(struct wm_softc *sc)
   12367 {
   12368 	uint32_t reg, pmreg;
   12369 	pcireg_t pmode;
   12370 
   12371 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12372 		device_xname(sc->sc_dev), __func__));
   12373 
   12374 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12375 		&pmreg, NULL) == 0)
   12376 		return;
   12377 
   12378 	/* Advertise the wakeup capability */
   12379 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12380 	    | CTRL_SWDPIN(3));
   12381 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12382 
   12383 	/* ICH workaround */
   12384 	switch (sc->sc_type) {
   12385 	case WM_T_ICH8:
   12386 	case WM_T_ICH9:
   12387 	case WM_T_ICH10:
   12388 	case WM_T_PCH:
   12389 	case WM_T_PCH2:
   12390 	case WM_T_PCH_LPT:
   12391 	case WM_T_PCH_SPT:
   12392 		/* Disable gig during WOL */
   12393 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12394 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12395 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12396 		if (sc->sc_type == WM_T_PCH)
   12397 			wm_gmii_reset(sc);
   12398 
   12399 		/* Power down workaround */
   12400 		if (sc->sc_phytype == WMPHY_82577) {
   12401 			struct mii_softc *child;
   12402 
   12403 			/* Assume that the PHY is copper */
   12404 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12405 			if (child->mii_mpd_rev <= 2)
   12406 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12407 				    (768 << 5) | 25, 0x0444); /* magic num */
   12408 		}
   12409 		break;
   12410 	default:
   12411 		break;
   12412 	}
   12413 
   12414 	/* Keep the laser running on fiber adapters */
   12415 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12416 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12417 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12418 		reg |= CTRL_EXT_SWDPIN(3);
   12419 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12420 	}
   12421 
   12422 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12423 #if 0	/* for the multicast packet */
   12424 	reg |= WUFC_MC;
   12425 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12426 #endif
   12427 
   12428 	if (sc->sc_type >= WM_T_PCH)
   12429 		wm_enable_phy_wakeup(sc);
   12430 	else {
   12431 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   12432 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12433 	}
   12434 
   12435 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12436 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12437 		|| (sc->sc_type == WM_T_PCH2))
   12438 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12439 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12440 
   12441 	/* Request PME */
   12442 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12443 #if 0
   12444 	/* Disable WOL */
   12445 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12446 #else
   12447 	/* For WOL */
   12448 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12449 #endif
   12450 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12451 }
   12452 
   12453 /* LPLU */
   12454 
   12455 static void
   12456 wm_lplu_d0_disable(struct wm_softc *sc)
   12457 {
   12458 	uint32_t reg;
   12459 
   12460 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12461 		device_xname(sc->sc_dev), __func__));
   12462 
   12463 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12464 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12465 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12466 }
   12467 
   12468 static void
   12469 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12470 {
   12471 	uint32_t reg;
   12472 
   12473 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12474 		device_xname(sc->sc_dev), __func__));
   12475 
   12476 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12477 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12478 	reg |= HV_OEM_BITS_ANEGNOW;
   12479 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12480 }
   12481 
   12482 /* EEE */
   12483 
   12484 static void
   12485 wm_set_eee_i350(struct wm_softc *sc)
   12486 {
   12487 	uint32_t ipcnfg, eeer;
   12488 
   12489 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12490 	eeer = CSR_READ(sc, WMREG_EEER);
   12491 
   12492 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12493 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12494 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12495 		    | EEER_LPI_FC);
   12496 	} else {
   12497 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12498 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12499 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12500 		    | EEER_LPI_FC);
   12501 	}
   12502 
   12503 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12504 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12505 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12506 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12507 }
   12508 
   12509 /*
   12510  * Workarounds (mainly PHY related).
   12511  * Basically, PHY's workarounds are in the PHY drivers.
   12512  */
   12513 
   12514 /* Work-around for 82566 Kumeran PCS lock loss */
   12515 static void
   12516 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12517 {
   12518 #if 0
   12519 	int miistatus, active, i;
   12520 	int reg;
   12521 
   12522 	miistatus = sc->sc_mii.mii_media_status;
   12523 
   12524 	/* If the link is not up, do nothing */
   12525 	if ((miistatus & IFM_ACTIVE) == 0)
   12526 		return;
   12527 
   12528 	active = sc->sc_mii.mii_media_active;
   12529 
   12530 	/* Nothing to do if the link is other than 1Gbps */
   12531 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12532 		return;
   12533 
   12534 	for (i = 0; i < 10; i++) {
   12535 		/* read twice */
   12536 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12537 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12538 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12539 			goto out;	/* GOOD! */
   12540 
   12541 		/* Reset the PHY */
   12542 		wm_gmii_reset(sc);
   12543 		delay(5*1000);
   12544 	}
   12545 
   12546 	/* Disable GigE link negotiation */
   12547 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12548 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12549 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12550 
   12551 	/*
   12552 	 * Call gig speed drop workaround on Gig disable before accessing
   12553 	 * any PHY registers.
   12554 	 */
   12555 	wm_gig_downshift_workaround_ich8lan(sc);
   12556 
   12557 out:
   12558 	return;
   12559 #endif
   12560 }
   12561 
   12562 /* WOL from S5 stops working */
   12563 static void
   12564 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12565 {
   12566 	uint16_t kmrn_reg;
   12567 
   12568 	/* Only for igp3 */
   12569 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12570 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12571 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12572 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12573 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12574 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12575 	}
   12576 }
   12577 
   12578 /*
   12579  * Workaround for pch's PHYs
   12580  * XXX should be moved to new PHY driver?
   12581  */
   12582 static void
   12583 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12584 {
   12585 
   12586 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12587 		device_xname(sc->sc_dev), __func__));
   12588 	KASSERT(sc->sc_type == WM_T_PCH);
   12589 
   12590 	if (sc->sc_phytype == WMPHY_82577)
   12591 		wm_set_mdio_slow_mode_hv(sc);
   12592 
   12593 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12594 
   12595 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12596 
   12597 	/* 82578 */
   12598 	if (sc->sc_phytype == WMPHY_82578) {
   12599 		struct mii_softc *child;
   12600 
   12601 		/*
   12602 		 * Return registers to default by doing a soft reset then
   12603 		 * writing 0x3140 to the control register
   12604 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   12605 		 */
   12606 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12607 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   12608 			PHY_RESET(child);
   12609 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   12610 			    0x3140);
   12611 		}
   12612 	}
   12613 
   12614 	/* Select page 0 */
   12615 	sc->phy.acquire(sc);
   12616 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12617 	sc->phy.release(sc);
   12618 
   12619 	/*
   12620 	 * Configure the K1 Si workaround during phy reset assuming there is
   12621 	 * link so that it disables K1 if link is in 1Gbps.
   12622 	 */
   12623 	wm_k1_gig_workaround_hv(sc, 1);
   12624 }
   12625 
   12626 static void
   12627 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12628 {
   12629 
   12630 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12631 		device_xname(sc->sc_dev), __func__));
   12632 	KASSERT(sc->sc_type == WM_T_PCH2);
   12633 
   12634 	wm_set_mdio_slow_mode_hv(sc);
   12635 }
   12636 
   12637 static int
   12638 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12639 {
   12640 	int k1_enable = sc->sc_nvm_k1_enabled;
   12641 
   12642 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12643 		device_xname(sc->sc_dev), __func__));
   12644 
   12645 	if (sc->phy.acquire(sc) != 0)
   12646 		return -1;
   12647 
   12648 	if (link) {
   12649 		k1_enable = 0;
   12650 
   12651 		/* Link stall fix for link up */
   12652 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12653 	} else {
   12654 		/* Link stall fix for link down */
   12655 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12656 	}
   12657 
   12658 	wm_configure_k1_ich8lan(sc, k1_enable);
   12659 	sc->phy.release(sc);
   12660 
   12661 	return 0;
   12662 }
   12663 
   12664 static void
   12665 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12666 {
   12667 	uint32_t reg;
   12668 
   12669 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12670 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12671 	    reg | HV_KMRN_MDIO_SLOW);
   12672 }
   12673 
   12674 static void
   12675 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12676 {
   12677 	uint32_t ctrl, ctrl_ext, tmp;
   12678 	uint16_t kmrn_reg;
   12679 
   12680 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12681 
   12682 	if (k1_enable)
   12683 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12684 	else
   12685 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12686 
   12687 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12688 
   12689 	delay(20);
   12690 
   12691 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12692 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12693 
   12694 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12695 	tmp |= CTRL_FRCSPD;
   12696 
   12697 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12698 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12699 	CSR_WRITE_FLUSH(sc);
   12700 	delay(20);
   12701 
   12702 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12703 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12704 	CSR_WRITE_FLUSH(sc);
   12705 	delay(20);
   12706 }
   12707 
   12708 /* special case - for 82575 - need to do manual init ... */
   12709 static void
   12710 wm_reset_init_script_82575(struct wm_softc *sc)
   12711 {
   12712 	/*
   12713 	 * remark: this is untested code - we have no board without EEPROM
   12714 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12715 	 */
   12716 
   12717 	/* SerDes configuration via SERDESCTRL */
   12718 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12719 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12720 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12721 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12722 
   12723 	/* CCM configuration via CCMCTL register */
   12724 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12725 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12726 
   12727 	/* PCIe lanes configuration */
   12728 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12729 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12730 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12731 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12732 
   12733 	/* PCIe PLL Configuration */
   12734 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12735 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12736 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12737 }
   12738 
   12739 static void
   12740 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12741 {
   12742 	uint32_t reg;
   12743 	uint16_t nvmword;
   12744 	int rv;
   12745 
   12746 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12747 		return;
   12748 
   12749 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12750 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12751 	if (rv != 0) {
   12752 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12753 		    __func__);
   12754 		return;
   12755 	}
   12756 
   12757 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12758 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12759 		reg |= MDICNFG_DEST;
   12760 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12761 		reg |= MDICNFG_COM_MDIO;
   12762 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12763 }
   12764 
   12765 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   12766 
   12767 static bool
   12768 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   12769 {
   12770 	int i;
   12771 	uint32_t reg;
   12772 	uint16_t id1, id2;
   12773 
   12774 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12775 		device_xname(sc->sc_dev), __func__));
   12776 	id1 = id2 = 0xffff;
   12777 	for (i = 0; i < 2; i++) {
   12778 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   12779 		if (MII_INVALIDID(id1))
   12780 			continue;
   12781 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   12782 		if (MII_INVALIDID(id2))
   12783 			continue;
   12784 		break;
   12785 	}
   12786 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   12787 		goto out;
   12788 	}
   12789 
   12790 	if (sc->sc_type < WM_T_PCH_LPT) {
   12791 		sc->phy.release(sc);
   12792 		wm_set_mdio_slow_mode_hv(sc);
   12793 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   12794 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   12795 		sc->phy.acquire(sc);
   12796 	}
   12797 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   12798 		printf("XXX return with false\n");
   12799 		return false;
   12800 	}
   12801 out:
   12802 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   12803 		/* Only unforce SMBus if ME is not active */
   12804 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   12805 			/* Unforce SMBus mode in PHY */
   12806 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   12807 			    CV_SMB_CTRL);
   12808 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12809 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   12810 			    CV_SMB_CTRL, reg);
   12811 
   12812 			/* Unforce SMBus mode in MAC */
   12813 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12814 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12815 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12816 		}
   12817 	}
   12818 	return true;
   12819 }
   12820 
   12821 static void
   12822 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   12823 {
   12824 	uint32_t reg;
   12825 	int i;
   12826 
   12827 	/* Set PHY Config Counter to 50msec */
   12828 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   12829 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   12830 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   12831 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   12832 
   12833 	/* Toggle LANPHYPC */
   12834 	reg = CSR_READ(sc, WMREG_CTRL);
   12835 	reg |= CTRL_LANPHYPC_OVERRIDE;
   12836 	reg &= ~CTRL_LANPHYPC_VALUE;
   12837 	CSR_WRITE(sc, WMREG_CTRL, reg);
   12838 	CSR_WRITE_FLUSH(sc);
   12839 	delay(1000);
   12840 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   12841 	CSR_WRITE(sc, WMREG_CTRL, reg);
   12842 	CSR_WRITE_FLUSH(sc);
   12843 
   12844 	if (sc->sc_type < WM_T_PCH_LPT)
   12845 		delay(50 * 1000);
   12846 	else {
   12847 		i = 20;
   12848 
   12849 		do {
   12850 			delay(5 * 1000);
   12851 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   12852 		    && i--);
   12853 
   12854 		delay(30 * 1000);
   12855 	}
   12856 }
   12857 
   12858 static int
   12859 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   12860 {
   12861 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   12862 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   12863 	uint32_t rxa;
   12864 	uint16_t scale = 0, lat_enc = 0;
   12865 	int64_t lat_ns, value;
   12866 
   12867 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12868 		device_xname(sc->sc_dev), __func__));
   12869 
   12870 	if (link) {
   12871 		pcireg_t preg;
   12872 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   12873 
   12874 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   12875 
   12876 		/*
   12877 		 * Determine the maximum latency tolerated by the device.
   12878 		 *
   12879 		 * Per the PCIe spec, the tolerated latencies are encoded as
   12880 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   12881 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   12882 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   12883 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   12884 		 */
   12885 		lat_ns = ((int64_t)rxa * 1024 -
   12886 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   12887 		if (lat_ns < 0)
   12888 			lat_ns = 0;
   12889 		else {
   12890 			uint32_t status;
   12891 			uint16_t speed;
   12892 
   12893 			status = CSR_READ(sc, WMREG_STATUS);
   12894 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   12895 			case STATUS_SPEED_10:
   12896 				speed = 10;
   12897 				break;
   12898 			case STATUS_SPEED_100:
   12899 				speed = 100;
   12900 				break;
   12901 			case STATUS_SPEED_1000:
   12902 				speed = 1000;
   12903 				break;
   12904 			default:
   12905 				printf("%s: Unknown speed (status = %08x)\n",
   12906 				    device_xname(sc->sc_dev), status);
   12907 				return -1;
   12908 			}
   12909 			lat_ns /= speed;
   12910 		}
   12911 		value = lat_ns;
   12912 
   12913 		while (value > LTRV_VALUE) {
   12914 			scale ++;
   12915 			value = howmany(value, __BIT(5));
   12916 		}
   12917 		if (scale > LTRV_SCALE_MAX) {
   12918 			printf("%s: Invalid LTR latency scale %d\n",
   12919 			    device_xname(sc->sc_dev), scale);
   12920 			return -1;
   12921 		}
   12922 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   12923 
   12924 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12925 		    WM_PCI_LTR_CAP_LPT);
   12926 		max_snoop = preg & 0xffff;
   12927 		max_nosnoop = preg >> 16;
   12928 
   12929 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   12930 
   12931 		if (lat_enc > max_ltr_enc) {
   12932 			lat_enc = max_ltr_enc;
   12933 		}
   12934 	}
   12935 	/* Snoop and No-Snoop latencies the same */
   12936 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   12937 	CSR_WRITE(sc, WMREG_LTRV, reg);
   12938 
   12939 	return 0;
   12940 }
   12941 
   12942 /*
   12943  * I210 Errata 25 and I211 Errata 10
   12944  * Slow System Clock.
   12945  */
   12946 static void
   12947 wm_pll_workaround_i210(struct wm_softc *sc)
   12948 {
   12949 	uint32_t mdicnfg, wuc;
   12950 	uint32_t reg;
   12951 	pcireg_t pcireg;
   12952 	uint32_t pmreg;
   12953 	uint16_t nvmword, tmp_nvmword;
   12954 	int phyval;
   12955 	bool wa_done = false;
   12956 	int i;
   12957 
   12958 	/* Save WUC and MDICNFG registers */
   12959 	wuc = CSR_READ(sc, WMREG_WUC);
   12960 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12961 
   12962 	reg = mdicnfg & ~MDICNFG_DEST;
   12963 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12964 
   12965 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12966 		nvmword = INVM_DEFAULT_AL;
   12967 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12968 
   12969 	/* Get Power Management cap offset */
   12970 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12971 		&pmreg, NULL) == 0)
   12972 		return;
   12973 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12974 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12975 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12976 
   12977 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12978 			break; /* OK */
   12979 		}
   12980 
   12981 		wa_done = true;
   12982 		/* Directly reset the internal PHY */
   12983 		reg = CSR_READ(sc, WMREG_CTRL);
   12984 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12985 
   12986 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12987 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12988 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12989 
   12990 		CSR_WRITE(sc, WMREG_WUC, 0);
   12991 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12992 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12993 
   12994 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12995 		    pmreg + PCI_PMCSR);
   12996 		pcireg |= PCI_PMCSR_STATE_D3;
   12997 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12998 		    pmreg + PCI_PMCSR, pcireg);
   12999 		delay(1000);
   13000 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13001 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13002 		    pmreg + PCI_PMCSR, pcireg);
   13003 
   13004 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13005 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13006 
   13007 		/* Restore WUC register */
   13008 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13009 	}
   13010 
   13011 	/* Restore MDICNFG setting */
   13012 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13013 	if (wa_done)
   13014 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13015 }
   13016