Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.470
      1 /*	$NetBSD: if_wm.c,v 1.470 2017/01/27 05:04:47 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.470 2017/01/27 05:04:47 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 typedef union rxdescs {
    219 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    220 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    221 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    222 } rxdescs_t;
    223 
    224 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    225 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    226 
    227 /*
    228  * Software state for transmit jobs.
    229  */
    230 struct wm_txsoft {
    231 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    232 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    233 	int txs_firstdesc;		/* first descriptor in packet */
    234 	int txs_lastdesc;		/* last descriptor in packet */
    235 	int txs_ndesc;			/* # of descriptors used */
    236 };
    237 
    238 /*
    239  * Software state for receive buffers.  Each descriptor gets a
    240  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    241  * more than one buffer, we chain them together.
    242  */
    243 struct wm_rxsoft {
    244 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    245 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    246 };
    247 
    248 #define WM_LINKUP_TIMEOUT	50
    249 
    250 static uint16_t swfwphysem[] = {
    251 	SWFW_PHY0_SM,
    252 	SWFW_PHY1_SM,
    253 	SWFW_PHY2_SM,
    254 	SWFW_PHY3_SM
    255 };
    256 
    257 static const uint32_t wm_82580_rxpbs_table[] = {
    258 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    259 };
    260 
    261 struct wm_softc;
    262 
    263 #ifdef WM_EVENT_COUNTERS
    264 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    265 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    266 	struct evcnt qname##_ev_##evname;
    267 
    268 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    269 	do{								\
    270 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    271 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    272 		    "%s%02d%s", #qname, (qnum), #evname);		\
    273 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    274 		    (evtype), NULL, (xname),				\
    275 		    (q)->qname##_##evname##_evcnt_name);		\
    276 	}while(0)
    277 
    278 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    279 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    280 
    281 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    282 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    283 #endif /* WM_EVENT_COUNTERS */
    284 
    285 struct wm_txqueue {
    286 	kmutex_t *txq_lock;		/* lock for tx operations */
    287 
    288 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    289 
    290 	/* Software state for the transmit descriptors. */
    291 	int txq_num;			/* must be a power of two */
    292 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    293 
    294 	/* TX control data structures. */
    295 	int txq_ndesc;			/* must be a power of two */
    296 	size_t txq_descsize;		/* a tx descriptor size */
    297 	txdescs_t *txq_descs_u;
    298         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    299 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    300 	int txq_desc_rseg;		/* real number of control segment */
    301 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    302 #define	txq_descs	txq_descs_u->sctxu_txdescs
    303 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    304 
    305 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    306 
    307 	int txq_free;			/* number of free Tx descriptors */
    308 	int txq_next;			/* next ready Tx descriptor */
    309 
    310 	int txq_sfree;			/* number of free Tx jobs */
    311 	int txq_snext;			/* next free Tx job */
    312 	int txq_sdirty;			/* dirty Tx jobs */
    313 
    314 	/* These 4 variables are used only on the 82547. */
    315 	int txq_fifo_size;		/* Tx FIFO size */
    316 	int txq_fifo_head;		/* current head of FIFO */
    317 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    318 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    319 
    320 	/*
    321 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    322 	 * CPUs. This queue intermediate them without block.
    323 	 */
    324 	pcq_t *txq_interq;
    325 
    326 	/*
    327 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    328 	 * to manage Tx H/W queue's busy flag.
    329 	 */
    330 	int txq_flags;			/* flags for H/W queue, see below */
    331 #define	WM_TXQ_NO_SPACE	0x1
    332 
    333 	bool txq_stopping;
    334 
    335 #ifdef WM_EVENT_COUNTERS
    336 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    337 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    338 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    339 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    340 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    341 						/* XXX not used? */
    342 
    343 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    344 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    345 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    346 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    347 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    348 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    349 
    350 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    351 
    352 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    353 
    354 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    355 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    356 #endif /* WM_EVENT_COUNTERS */
    357 };
    358 
    359 struct wm_rxqueue {
    360 	kmutex_t *rxq_lock;		/* lock for rx operations */
    361 
    362 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    363 
    364 	/* Software state for the receive descriptors. */
    365 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    366 
    367 	/* RX control data structures. */
    368 	int rxq_ndesc;			/* must be a power of two */
    369 	size_t rxq_descsize;		/* a rx descriptor size */
    370 	rxdescs_t *rxq_descs_u;
    371 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    372 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    373 	int rxq_desc_rseg;		/* real number of control segment */
    374 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    375 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    376 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    377 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    378 
    379 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    380 
    381 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    382 	int rxq_discard;
    383 	int rxq_len;
    384 	struct mbuf *rxq_head;
    385 	struct mbuf *rxq_tail;
    386 	struct mbuf **rxq_tailp;
    387 
    388 	bool rxq_stopping;
    389 
    390 #ifdef WM_EVENT_COUNTERS
    391 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    392 
    393 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    394 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    395 #endif
    396 };
    397 
    398 struct wm_queue {
    399 	int wmq_id;			/* index of transmit and receive queues */
    400 	int wmq_intr_idx;		/* index of MSI-X tables */
    401 
    402 	struct wm_txqueue wmq_txq;
    403 	struct wm_rxqueue wmq_rxq;
    404 };
    405 
    406 struct wm_phyop {
    407 	int (*acquire)(struct wm_softc *);
    408 	void (*release)(struct wm_softc *);
    409 	int reset_delay_us;
    410 };
    411 
    412 /*
    413  * Software state per device.
    414  */
    415 struct wm_softc {
    416 	device_t sc_dev;		/* generic device information */
    417 	bus_space_tag_t sc_st;		/* bus space tag */
    418 	bus_space_handle_t sc_sh;	/* bus space handle */
    419 	bus_size_t sc_ss;		/* bus space size */
    420 	bus_space_tag_t sc_iot;		/* I/O space tag */
    421 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    422 	bus_size_t sc_ios;		/* I/O space size */
    423 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    424 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    425 	bus_size_t sc_flashs;		/* flash registers space size */
    426 	off_t sc_flashreg_offset;	/*
    427 					 * offset to flash registers from
    428 					 * start of BAR
    429 					 */
    430 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    431 
    432 	struct ethercom sc_ethercom;	/* ethernet common data */
    433 	struct mii_data sc_mii;		/* MII/media information */
    434 
    435 	pci_chipset_tag_t sc_pc;
    436 	pcitag_t sc_pcitag;
    437 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    438 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    439 
    440 	uint16_t sc_pcidevid;		/* PCI device ID */
    441 	wm_chip_type sc_type;		/* MAC type */
    442 	int sc_rev;			/* MAC revision */
    443 	wm_phy_type sc_phytype;		/* PHY type */
    444 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    445 #define	WM_MEDIATYPE_UNKNOWN		0x00
    446 #define	WM_MEDIATYPE_FIBER		0x01
    447 #define	WM_MEDIATYPE_COPPER		0x02
    448 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    449 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    450 	int sc_flags;			/* flags; see below */
    451 	int sc_if_flags;		/* last if_flags */
    452 	int sc_flowflags;		/* 802.3x flow control flags */
    453 	int sc_align_tweak;
    454 
    455 	void *sc_ihs[WM_MAX_NINTR];	/*
    456 					 * interrupt cookie.
    457 					 * legacy and msi use sc_ihs[0].
    458 					 */
    459 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    460 	int sc_nintrs;			/* number of interrupts */
    461 
    462 	int sc_link_intr_idx;		/* index of MSI-X tables */
    463 
    464 	callout_t sc_tick_ch;		/* tick callout */
    465 	bool sc_core_stopping;
    466 
    467 	int sc_nvm_ver_major;
    468 	int sc_nvm_ver_minor;
    469 	int sc_nvm_ver_build;
    470 	int sc_nvm_addrbits;		/* NVM address bits */
    471 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    472 	int sc_ich8_flash_base;
    473 	int sc_ich8_flash_bank_size;
    474 	int sc_nvm_k1_enabled;
    475 
    476 	int sc_nqueues;
    477 	struct wm_queue *sc_queue;
    478 
    479 	int sc_affinity_offset;
    480 
    481 #ifdef WM_EVENT_COUNTERS
    482 	/* Event counters. */
    483 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    484 
    485         /* WM_T_82542_2_1 only */
    486 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    487 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    488 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    489 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    490 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    491 #endif /* WM_EVENT_COUNTERS */
    492 
    493 	/* This variable are used only on the 82547. */
    494 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    495 
    496 	uint32_t sc_ctrl;		/* prototype CTRL register */
    497 #if 0
    498 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    499 #endif
    500 	uint32_t sc_icr;		/* prototype interrupt bits */
    501 	uint32_t sc_itr;		/* prototype intr throttling reg */
    502 	uint32_t sc_tctl;		/* prototype TCTL register */
    503 	uint32_t sc_rctl;		/* prototype RCTL register */
    504 	uint32_t sc_txcw;		/* prototype TXCW register */
    505 	uint32_t sc_tipg;		/* prototype TIPG register */
    506 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    507 	uint32_t sc_pba;		/* prototype PBA register */
    508 
    509 	int sc_tbi_linkup;		/* TBI link status */
    510 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    511 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    512 
    513 	int sc_mchash_type;		/* multicast filter offset */
    514 
    515 	krndsource_t rnd_source;	/* random source */
    516 
    517 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    518 
    519 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    520 	kmutex_t *sc_ich_phymtx;	/*
    521 					 * 82574/82583/ICH/PCH specific PHY
    522 					 * mutex. For 82574/82583, the mutex
    523 					 * is used for both PHY and NVM.
    524 					 */
    525 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    526 
    527 	struct wm_phyop phy;
    528 };
    529 
    530 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    531 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    532 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    533 
    534 #ifdef WM_MPSAFE
    535 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    536 #else
    537 #define CALLOUT_FLAGS	0
    538 #endif
    539 
    540 #define	WM_RXCHAIN_RESET(rxq)						\
    541 do {									\
    542 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    543 	*(rxq)->rxq_tailp = NULL;					\
    544 	(rxq)->rxq_len = 0;						\
    545 } while (/*CONSTCOND*/0)
    546 
    547 #define	WM_RXCHAIN_LINK(rxq, m)						\
    548 do {									\
    549 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    550 	(rxq)->rxq_tailp = &(m)->m_next;				\
    551 } while (/*CONSTCOND*/0)
    552 
    553 #ifdef WM_EVENT_COUNTERS
    554 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    555 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    556 
    557 #define WM_Q_EVCNT_INCR(qname, evname)			\
    558 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    559 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    560 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    561 #else /* !WM_EVENT_COUNTERS */
    562 #define	WM_EVCNT_INCR(ev)	/* nothing */
    563 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    564 
    565 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    566 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    567 #endif /* !WM_EVENT_COUNTERS */
    568 
    569 #define	CSR_READ(sc, reg)						\
    570 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    571 #define	CSR_WRITE(sc, reg, val)						\
    572 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    573 #define	CSR_WRITE_FLUSH(sc)						\
    574 	(void) CSR_READ((sc), WMREG_STATUS)
    575 
    576 #define ICH8_FLASH_READ32(sc, reg)					\
    577 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    578 	    (reg) + sc->sc_flashreg_offset)
    579 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    580 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    581 	    (reg) + sc->sc_flashreg_offset, (data))
    582 
    583 #define ICH8_FLASH_READ16(sc, reg)					\
    584 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    585 	    (reg) + sc->sc_flashreg_offset)
    586 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    587 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    588 	    (reg) + sc->sc_flashreg_offset, (data))
    589 
    590 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    591 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    592 
    593 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    594 #define	WM_CDTXADDR_HI(txq, x)						\
    595 	(sizeof(bus_addr_t) == 8 ?					\
    596 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    597 
    598 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    599 #define	WM_CDRXADDR_HI(rxq, x)						\
    600 	(sizeof(bus_addr_t) == 8 ?					\
    601 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    602 
    603 /*
    604  * Register read/write functions.
    605  * Other than CSR_{READ|WRITE}().
    606  */
    607 #if 0
    608 static inline uint32_t wm_io_read(struct wm_softc *, int);
    609 #endif
    610 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    611 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    612 	uint32_t, uint32_t);
    613 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    614 
    615 /*
    616  * Descriptor sync/init functions.
    617  */
    618 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    619 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    620 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    621 
    622 /*
    623  * Device driver interface functions and commonly used functions.
    624  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    625  */
    626 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    627 static int	wm_match(device_t, cfdata_t, void *);
    628 static void	wm_attach(device_t, device_t, void *);
    629 static int	wm_detach(device_t, int);
    630 static bool	wm_suspend(device_t, const pmf_qual_t *);
    631 static bool	wm_resume(device_t, const pmf_qual_t *);
    632 static void	wm_watchdog(struct ifnet *);
    633 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    634 static void	wm_tick(void *);
    635 static int	wm_ifflags_cb(struct ethercom *);
    636 static int	wm_ioctl(struct ifnet *, u_long, void *);
    637 /* MAC address related */
    638 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    639 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    640 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    641 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    642 static void	wm_set_filter(struct wm_softc *);
    643 /* Reset and init related */
    644 static void	wm_set_vlan(struct wm_softc *);
    645 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    646 static void	wm_get_auto_rd_done(struct wm_softc *);
    647 static void	wm_lan_init_done(struct wm_softc *);
    648 static void	wm_get_cfg_done(struct wm_softc *);
    649 static void	wm_initialize_hardware_bits(struct wm_softc *);
    650 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    651 static void	wm_reset_phy(struct wm_softc *);
    652 static void	wm_flush_desc_rings(struct wm_softc *);
    653 static void	wm_reset(struct wm_softc *);
    654 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    655 static void	wm_rxdrain(struct wm_rxqueue *);
    656 static void	wm_rss_getkey(uint8_t *);
    657 static void	wm_init_rss(struct wm_softc *);
    658 static void	wm_adjust_qnum(struct wm_softc *, int);
    659 static int	wm_setup_legacy(struct wm_softc *);
    660 static int	wm_setup_msix(struct wm_softc *);
    661 static int	wm_init(struct ifnet *);
    662 static int	wm_init_locked(struct ifnet *);
    663 static void	wm_turnon(struct wm_softc *);
    664 static void	wm_turnoff(struct wm_softc *);
    665 static void	wm_stop(struct ifnet *, int);
    666 static void	wm_stop_locked(struct ifnet *, int);
    667 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    668 static void	wm_82547_txfifo_stall(void *);
    669 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    670 /* DMA related */
    671 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    672 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    673 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    674 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    675     struct wm_txqueue *);
    676 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    677 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    678 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    679     struct wm_rxqueue *);
    680 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    681 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    682 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    683 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    684 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    685 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    686 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    687     struct wm_txqueue *);
    688 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    689     struct wm_rxqueue *);
    690 static int	wm_alloc_txrx_queues(struct wm_softc *);
    691 static void	wm_free_txrx_queues(struct wm_softc *);
    692 static int	wm_init_txrx_queues(struct wm_softc *);
    693 /* Start */
    694 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    695     uint32_t *, uint8_t *);
    696 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    697 static void	wm_start(struct ifnet *);
    698 static void	wm_start_locked(struct ifnet *);
    699 static int	wm_transmit(struct ifnet *, struct mbuf *);
    700 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    701 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    702 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    703     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    704 static void	wm_nq_start(struct ifnet *);
    705 static void	wm_nq_start_locked(struct ifnet *);
    706 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    707 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    708 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    709 static void	wm_deferred_start(struct ifnet *);
    710 /* Interrupt */
    711 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    712 static void	wm_rxeof(struct wm_rxqueue *);
    713 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    714 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    715 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    716 static void	wm_linkintr(struct wm_softc *, uint32_t);
    717 static int	wm_intr_legacy(void *);
    718 static int	wm_txrxintr_msix(void *);
    719 static int	wm_linkintr_msix(void *);
    720 
    721 /*
    722  * Media related.
    723  * GMII, SGMII, TBI, SERDES and SFP.
    724  */
    725 /* Common */
    726 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    727 /* GMII related */
    728 static void	wm_gmii_reset(struct wm_softc *);
    729 static int	wm_get_phy_id_82575(struct wm_softc *);
    730 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    731 static int	wm_gmii_mediachange(struct ifnet *);
    732 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    733 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    734 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    735 static int	wm_gmii_i82543_readreg(device_t, int, int);
    736 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    737 static int	wm_gmii_mdic_readreg(device_t, int, int);
    738 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    739 static int	wm_gmii_i82544_readreg(device_t, int, int);
    740 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    741 static int	wm_gmii_i80003_readreg(device_t, int, int);
    742 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    743 static int	wm_gmii_bm_readreg(device_t, int, int);
    744 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    745 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    746 static int	wm_gmii_hv_readreg(device_t, int, int);
    747 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    748 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    749 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    750 static int	wm_gmii_82580_readreg(device_t, int, int);
    751 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    752 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    753 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    754 static void	wm_gmii_statchg(struct ifnet *);
    755 /*
    756  * kumeran related (80003, ICH* and PCH*).
    757  * These functions are not for accessing MII registers but for accessing
    758  * kumeran specific registers.
    759  */
    760 static int	wm_kmrn_readreg(struct wm_softc *, int);
    761 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    762 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    763 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    764 /* SGMII */
    765 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    766 static int	wm_sgmii_readreg(device_t, int, int);
    767 static void	wm_sgmii_writereg(device_t, int, int, int);
    768 /* TBI related */
    769 static void	wm_tbi_mediainit(struct wm_softc *);
    770 static int	wm_tbi_mediachange(struct ifnet *);
    771 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    772 static int	wm_check_for_link(struct wm_softc *);
    773 static void	wm_tbi_tick(struct wm_softc *);
    774 /* SERDES related */
    775 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    776 static int	wm_serdes_mediachange(struct ifnet *);
    777 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    778 static void	wm_serdes_tick(struct wm_softc *);
    779 /* SFP related */
    780 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    781 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    782 
    783 /*
    784  * NVM related.
    785  * Microwire, SPI (w/wo EERD) and Flash.
    786  */
    787 /* Misc functions */
    788 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    789 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    790 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    791 /* Microwire */
    792 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    793 /* SPI */
    794 static int	wm_nvm_ready_spi(struct wm_softc *);
    795 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    796 /* Using with EERD */
    797 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    798 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    799 /* Flash */
    800 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    801     unsigned int *);
    802 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    803 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    804 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    805 	uint32_t *);
    806 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    807 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    808 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    809 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    810 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    811 /* iNVM */
    812 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    813 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    814 /* Lock, detecting NVM type, validate checksum and read */
    815 static int	wm_nvm_acquire(struct wm_softc *);
    816 static void	wm_nvm_release(struct wm_softc *);
    817 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    818 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    819 static int	wm_nvm_validate_checksum(struct wm_softc *);
    820 static void	wm_nvm_version_invm(struct wm_softc *);
    821 static void	wm_nvm_version(struct wm_softc *);
    822 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    823 
    824 /*
    825  * Hardware semaphores.
    826  * Very complexed...
    827  */
    828 static int	wm_get_null(struct wm_softc *);
    829 static void	wm_put_null(struct wm_softc *);
    830 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    831 static void	wm_put_swsm_semaphore(struct wm_softc *);
    832 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    833 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    834 static int	wm_get_phy_82575(struct wm_softc *);
    835 static void	wm_put_phy_82575(struct wm_softc *);
    836 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    837 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    838 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    839 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    840 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    841 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    842 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    843 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    844 
    845 /*
    846  * Management mode and power management related subroutines.
    847  * BMC, AMT, suspend/resume and EEE.
    848  */
    849 #if 0
    850 static int	wm_check_mng_mode(struct wm_softc *);
    851 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    852 static int	wm_check_mng_mode_82574(struct wm_softc *);
    853 static int	wm_check_mng_mode_generic(struct wm_softc *);
    854 #endif
    855 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    856 static bool	wm_phy_resetisblocked(struct wm_softc *);
    857 static void	wm_get_hw_control(struct wm_softc *);
    858 static void	wm_release_hw_control(struct wm_softc *);
    859 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    860 static void	wm_smbustopci(struct wm_softc *);
    861 static void	wm_init_manageability(struct wm_softc *);
    862 static void	wm_release_manageability(struct wm_softc *);
    863 static void	wm_get_wakeup(struct wm_softc *);
    864 static void	wm_ulp_disable(struct wm_softc *);
    865 static void	wm_enable_phy_wakeup(struct wm_softc *);
    866 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    867 static void	wm_enable_wakeup(struct wm_softc *);
    868 /* LPLU (Low Power Link Up) */
    869 static void	wm_lplu_d0_disable(struct wm_softc *);
    870 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    871 /* EEE */
    872 static void	wm_set_eee_i350(struct wm_softc *);
    873 
    874 /*
    875  * Workarounds (mainly PHY related).
    876  * Basically, PHY's workarounds are in the PHY drivers.
    877  */
    878 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    879 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    880 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    881 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    882 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    883 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    884 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    885 static void	wm_reset_init_script_82575(struct wm_softc *);
    886 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    887 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    888 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    889 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    890 static void	wm_pll_workaround_i210(struct wm_softc *);
    891 
    892 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    893     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    894 
    895 /*
    896  * Devices supported by this driver.
    897  */
    898 static const struct wm_product {
    899 	pci_vendor_id_t		wmp_vendor;
    900 	pci_product_id_t	wmp_product;
    901 	const char		*wmp_name;
    902 	wm_chip_type		wmp_type;
    903 	uint32_t		wmp_flags;
    904 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    905 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    906 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    907 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    908 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    909 } wm_products[] = {
    910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    911 	  "Intel i82542 1000BASE-X Ethernet",
    912 	  WM_T_82542_2_1,	WMP_F_FIBER },
    913 
    914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    915 	  "Intel i82543GC 1000BASE-X Ethernet",
    916 	  WM_T_82543,		WMP_F_FIBER },
    917 
    918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    919 	  "Intel i82543GC 1000BASE-T Ethernet",
    920 	  WM_T_82543,		WMP_F_COPPER },
    921 
    922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    923 	  "Intel i82544EI 1000BASE-T Ethernet",
    924 	  WM_T_82544,		WMP_F_COPPER },
    925 
    926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    927 	  "Intel i82544EI 1000BASE-X Ethernet",
    928 	  WM_T_82544,		WMP_F_FIBER },
    929 
    930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    931 	  "Intel i82544GC 1000BASE-T Ethernet",
    932 	  WM_T_82544,		WMP_F_COPPER },
    933 
    934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    935 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    936 	  WM_T_82544,		WMP_F_COPPER },
    937 
    938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    939 	  "Intel i82540EM 1000BASE-T Ethernet",
    940 	  WM_T_82540,		WMP_F_COPPER },
    941 
    942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    943 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    944 	  WM_T_82540,		WMP_F_COPPER },
    945 
    946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    947 	  "Intel i82540EP 1000BASE-T Ethernet",
    948 	  WM_T_82540,		WMP_F_COPPER },
    949 
    950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    951 	  "Intel i82540EP 1000BASE-T Ethernet",
    952 	  WM_T_82540,		WMP_F_COPPER },
    953 
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    955 	  "Intel i82540EP 1000BASE-T Ethernet",
    956 	  WM_T_82540,		WMP_F_COPPER },
    957 
    958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    959 	  "Intel i82545EM 1000BASE-T Ethernet",
    960 	  WM_T_82545,		WMP_F_COPPER },
    961 
    962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    963 	  "Intel i82545GM 1000BASE-T Ethernet",
    964 	  WM_T_82545_3,		WMP_F_COPPER },
    965 
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    967 	  "Intel i82545GM 1000BASE-X Ethernet",
    968 	  WM_T_82545_3,		WMP_F_FIBER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    971 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    972 	  WM_T_82545_3,		WMP_F_SERDES },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    975 	  "Intel i82546EB 1000BASE-T Ethernet",
    976 	  WM_T_82546,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    979 	  "Intel i82546EB 1000BASE-T Ethernet",
    980 	  WM_T_82546,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    983 	  "Intel i82545EM 1000BASE-X Ethernet",
    984 	  WM_T_82545,		WMP_F_FIBER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    987 	  "Intel i82546EB 1000BASE-X Ethernet",
    988 	  WM_T_82546,		WMP_F_FIBER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    991 	  "Intel i82546GB 1000BASE-T Ethernet",
    992 	  WM_T_82546_3,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    995 	  "Intel i82546GB 1000BASE-X Ethernet",
    996 	  WM_T_82546_3,		WMP_F_FIBER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    999 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1000 	  WM_T_82546_3,		WMP_F_SERDES },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1003 	  "i82546GB quad-port Gigabit Ethernet",
   1004 	  WM_T_82546_3,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1007 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1008 	  WM_T_82546_3,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1011 	  "Intel PRO/1000MT (82546GB)",
   1012 	  WM_T_82546_3,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1015 	  "Intel i82541EI 1000BASE-T Ethernet",
   1016 	  WM_T_82541,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1019 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1020 	  WM_T_82541,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1023 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1024 	  WM_T_82541,		WMP_F_COPPER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1027 	  "Intel i82541ER 1000BASE-T Ethernet",
   1028 	  WM_T_82541_2,		WMP_F_COPPER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1031 	  "Intel i82541GI 1000BASE-T Ethernet",
   1032 	  WM_T_82541_2,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1035 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1036 	  WM_T_82541_2,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1039 	  "Intel i82541PI 1000BASE-T Ethernet",
   1040 	  WM_T_82541_2,		WMP_F_COPPER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1043 	  "Intel i82547EI 1000BASE-T Ethernet",
   1044 	  WM_T_82547,		WMP_F_COPPER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1047 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1048 	  WM_T_82547,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1051 	  "Intel i82547GI 1000BASE-T Ethernet",
   1052 	  WM_T_82547_2,		WMP_F_COPPER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1055 	  "Intel PRO/1000 PT (82571EB)",
   1056 	  WM_T_82571,		WMP_F_COPPER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1059 	  "Intel PRO/1000 PF (82571EB)",
   1060 	  WM_T_82571,		WMP_F_FIBER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1063 	  "Intel PRO/1000 PB (82571EB)",
   1064 	  WM_T_82571,		WMP_F_SERDES },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1067 	  "Intel PRO/1000 QT (82571EB)",
   1068 	  WM_T_82571,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1071 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1072 	  WM_T_82571,		WMP_F_COPPER, },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1075 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1076 	  WM_T_82571,		WMP_F_COPPER, },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1079 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1080 	  WM_T_82571,		WMP_F_SERDES, },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1083 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1084 	  WM_T_82571,		WMP_F_SERDES, },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1087 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1088 	  WM_T_82571,		WMP_F_FIBER, },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1091 	  "Intel i82572EI 1000baseT Ethernet",
   1092 	  WM_T_82572,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1095 	  "Intel i82572EI 1000baseX Ethernet",
   1096 	  WM_T_82572,		WMP_F_FIBER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1099 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1100 	  WM_T_82572,		WMP_F_SERDES },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1103 	  "Intel i82572EI 1000baseT Ethernet",
   1104 	  WM_T_82572,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1107 	  "Intel i82573E",
   1108 	  WM_T_82573,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1111 	  "Intel i82573E IAMT",
   1112 	  WM_T_82573,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1115 	  "Intel i82573L Gigabit Ethernet",
   1116 	  WM_T_82573,		WMP_F_COPPER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1119 	  "Intel i82574L",
   1120 	  WM_T_82574,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1123 	  "Intel i82574L",
   1124 	  WM_T_82574,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1127 	  "Intel i82583V",
   1128 	  WM_T_82583,		WMP_F_COPPER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1131 	  "i80003 dual 1000baseT Ethernet",
   1132 	  WM_T_80003,		WMP_F_COPPER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1135 	  "i80003 dual 1000baseX Ethernet",
   1136 	  WM_T_80003,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1139 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1140 	  WM_T_80003,		WMP_F_SERDES },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1143 	  "Intel i80003 1000baseT Ethernet",
   1144 	  WM_T_80003,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1147 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1148 	  WM_T_80003,		WMP_F_SERDES },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1151 	  "Intel i82801H (M_AMT) LAN Controller",
   1152 	  WM_T_ICH8,		WMP_F_COPPER },
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1154 	  "Intel i82801H (AMT) LAN Controller",
   1155 	  WM_T_ICH8,		WMP_F_COPPER },
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1157 	  "Intel i82801H LAN Controller",
   1158 	  WM_T_ICH8,		WMP_F_COPPER },
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1160 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1161 	  WM_T_ICH8,		WMP_F_COPPER },
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1163 	  "Intel i82801H (M) LAN Controller",
   1164 	  WM_T_ICH8,		WMP_F_COPPER },
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1166 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1167 	  WM_T_ICH8,		WMP_F_COPPER },
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1169 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1170 	  WM_T_ICH8,		WMP_F_COPPER },
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1172 	  "82567V-3 LAN Controller",
   1173 	  WM_T_ICH8,		WMP_F_COPPER },
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1175 	  "82801I (AMT) LAN Controller",
   1176 	  WM_T_ICH9,		WMP_F_COPPER },
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1178 	  "82801I 10/100 LAN Controller",
   1179 	  WM_T_ICH9,		WMP_F_COPPER },
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1181 	  "82801I (G) 10/100 LAN Controller",
   1182 	  WM_T_ICH9,		WMP_F_COPPER },
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1184 	  "82801I (GT) 10/100 LAN Controller",
   1185 	  WM_T_ICH9,		WMP_F_COPPER },
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1187 	  "82801I (C) LAN Controller",
   1188 	  WM_T_ICH9,		WMP_F_COPPER },
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1190 	  "82801I mobile LAN Controller",
   1191 	  WM_T_ICH9,		WMP_F_COPPER },
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1193 	  "82801I mobile (V) LAN Controller",
   1194 	  WM_T_ICH9,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1196 	  "82801I mobile (AMT) LAN Controller",
   1197 	  WM_T_ICH9,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1199 	  "82567LM-4 LAN Controller",
   1200 	  WM_T_ICH9,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1202 	  "82567LM-2 LAN Controller",
   1203 	  WM_T_ICH10,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1205 	  "82567LF-2 LAN Controller",
   1206 	  WM_T_ICH10,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1208 	  "82567LM-3 LAN Controller",
   1209 	  WM_T_ICH10,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1211 	  "82567LF-3 LAN Controller",
   1212 	  WM_T_ICH10,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1214 	  "82567V-2 LAN Controller",
   1215 	  WM_T_ICH10,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1217 	  "82567V-3? LAN Controller",
   1218 	  WM_T_ICH10,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1220 	  "HANKSVILLE LAN Controller",
   1221 	  WM_T_ICH10,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1223 	  "PCH LAN (82577LM) Controller",
   1224 	  WM_T_PCH,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1226 	  "PCH LAN (82577LC) Controller",
   1227 	  WM_T_PCH,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1229 	  "PCH LAN (82578DM) Controller",
   1230 	  WM_T_PCH,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1232 	  "PCH LAN (82578DC) Controller",
   1233 	  WM_T_PCH,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1235 	  "PCH2 LAN (82579LM) Controller",
   1236 	  WM_T_PCH2,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1238 	  "PCH2 LAN (82579V) Controller",
   1239 	  WM_T_PCH2,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1241 	  "82575EB dual-1000baseT Ethernet",
   1242 	  WM_T_82575,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1244 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1245 	  WM_T_82575,		WMP_F_SERDES },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1247 	  "82575GB quad-1000baseT Ethernet",
   1248 	  WM_T_82575,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1250 	  "82575GB quad-1000baseT Ethernet (PM)",
   1251 	  WM_T_82575,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1253 	  "82576 1000BaseT Ethernet",
   1254 	  WM_T_82576,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1256 	  "82576 1000BaseX Ethernet",
   1257 	  WM_T_82576,		WMP_F_FIBER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1260 	  "82576 gigabit Ethernet (SERDES)",
   1261 	  WM_T_82576,		WMP_F_SERDES },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1264 	  "82576 quad-1000BaseT Ethernet",
   1265 	  WM_T_82576,		WMP_F_COPPER },
   1266 
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1268 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1269 	  WM_T_82576,		WMP_F_COPPER },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1272 	  "82576 gigabit Ethernet",
   1273 	  WM_T_82576,		WMP_F_COPPER },
   1274 
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1276 	  "82576 gigabit Ethernet (SERDES)",
   1277 	  WM_T_82576,		WMP_F_SERDES },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1279 	  "82576 quad-gigabit Ethernet (SERDES)",
   1280 	  WM_T_82576,		WMP_F_SERDES },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1283 	  "82580 1000BaseT Ethernet",
   1284 	  WM_T_82580,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1286 	  "82580 1000BaseX Ethernet",
   1287 	  WM_T_82580,		WMP_F_FIBER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1290 	  "82580 1000BaseT Ethernet (SERDES)",
   1291 	  WM_T_82580,		WMP_F_SERDES },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1294 	  "82580 gigabit Ethernet (SGMII)",
   1295 	  WM_T_82580,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1297 	  "82580 dual-1000BaseT Ethernet",
   1298 	  WM_T_82580,		WMP_F_COPPER },
   1299 
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1301 	  "82580 quad-1000BaseX Ethernet",
   1302 	  WM_T_82580,		WMP_F_FIBER },
   1303 
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1305 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1306 	  WM_T_82580,		WMP_F_COPPER },
   1307 
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1309 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1310 	  WM_T_82580,		WMP_F_SERDES },
   1311 
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1313 	  "DH89XXCC 1000BASE-KX Ethernet",
   1314 	  WM_T_82580,		WMP_F_SERDES },
   1315 
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1317 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1318 	  WM_T_82580,		WMP_F_SERDES },
   1319 
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1321 	  "I350 Gigabit Network Connection",
   1322 	  WM_T_I350,		WMP_F_COPPER },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1325 	  "I350 Gigabit Fiber Network Connection",
   1326 	  WM_T_I350,		WMP_F_FIBER },
   1327 
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1329 	  "I350 Gigabit Backplane Connection",
   1330 	  WM_T_I350,		WMP_F_SERDES },
   1331 
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1333 	  "I350 Quad Port Gigabit Ethernet",
   1334 	  WM_T_I350,		WMP_F_SERDES },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1337 	  "I350 Gigabit Connection",
   1338 	  WM_T_I350,		WMP_F_COPPER },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1341 	  "I354 Gigabit Ethernet (KX)",
   1342 	  WM_T_I354,		WMP_F_SERDES },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1345 	  "I354 Gigabit Ethernet (SGMII)",
   1346 	  WM_T_I354,		WMP_F_COPPER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1349 	  "I354 Gigabit Ethernet (2.5G)",
   1350 	  WM_T_I354,		WMP_F_COPPER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1353 	  "I210-T1 Ethernet Server Adapter",
   1354 	  WM_T_I210,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1357 	  "I210 Ethernet (Copper OEM)",
   1358 	  WM_T_I210,		WMP_F_COPPER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1361 	  "I210 Ethernet (Copper IT)",
   1362 	  WM_T_I210,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1365 	  "I210 Ethernet (FLASH less)",
   1366 	  WM_T_I210,		WMP_F_COPPER },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1369 	  "I210 Gigabit Ethernet (Fiber)",
   1370 	  WM_T_I210,		WMP_F_FIBER },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1373 	  "I210 Gigabit Ethernet (SERDES)",
   1374 	  WM_T_I210,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1377 	  "I210 Gigabit Ethernet (FLASH less)",
   1378 	  WM_T_I210,		WMP_F_SERDES },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1381 	  "I210 Gigabit Ethernet (SGMII)",
   1382 	  WM_T_I210,		WMP_F_COPPER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1385 	  "I211 Ethernet (COPPER)",
   1386 	  WM_T_I211,		WMP_F_COPPER },
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1388 	  "I217 V Ethernet Connection",
   1389 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1391 	  "I217 LM Ethernet Connection",
   1392 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1394 	  "I218 V Ethernet Connection",
   1395 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1397 	  "I218 V Ethernet Connection",
   1398 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1400 	  "I218 V Ethernet Connection",
   1401 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1403 	  "I218 LM Ethernet Connection",
   1404 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1406 	  "I218 LM Ethernet Connection",
   1407 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1409 	  "I218 LM Ethernet Connection",
   1410 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1411 #if 0
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1413 	  "I219 V Ethernet Connection",
   1414 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1416 	  "I219 V Ethernet Connection",
   1417 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1419 	  "I219 V Ethernet Connection",
   1420 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1422 	  "I219 V Ethernet Connection",
   1423 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1425 	  "I219 LM Ethernet Connection",
   1426 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1428 	  "I219 LM Ethernet Connection",
   1429 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1431 	  "I219 LM Ethernet Connection",
   1432 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1434 	  "I219 LM Ethernet Connection",
   1435 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1437 	  "I219 LM Ethernet Connection",
   1438 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1439 #endif
   1440 	{ 0,			0,
   1441 	  NULL,
   1442 	  0,			0 },
   1443 };
   1444 
   1445 /*
   1446  * Register read/write functions.
   1447  * Other than CSR_{READ|WRITE}().
   1448  */
   1449 
   1450 #if 0 /* Not currently used */
   1451 static inline uint32_t
   1452 wm_io_read(struct wm_softc *sc, int reg)
   1453 {
   1454 
   1455 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1456 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1457 }
   1458 #endif
   1459 
   1460 static inline void
   1461 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1462 {
   1463 
   1464 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1465 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1466 }
   1467 
   1468 static inline void
   1469 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1470     uint32_t data)
   1471 {
   1472 	uint32_t regval;
   1473 	int i;
   1474 
   1475 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1476 
   1477 	CSR_WRITE(sc, reg, regval);
   1478 
   1479 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1480 		delay(5);
   1481 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1482 			break;
   1483 	}
   1484 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1485 		aprint_error("%s: WARNING:"
   1486 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1487 		    device_xname(sc->sc_dev), reg);
   1488 	}
   1489 }
   1490 
   1491 static inline void
   1492 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1493 {
   1494 	wa->wa_low = htole32(v & 0xffffffffU);
   1495 	if (sizeof(bus_addr_t) == 8)
   1496 		wa->wa_high = htole32((uint64_t) v >> 32);
   1497 	else
   1498 		wa->wa_high = 0;
   1499 }
   1500 
   1501 /*
   1502  * Descriptor sync/init functions.
   1503  */
   1504 static inline void
   1505 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1506 {
   1507 	struct wm_softc *sc = txq->txq_sc;
   1508 
   1509 	/* If it will wrap around, sync to the end of the ring. */
   1510 	if ((start + num) > WM_NTXDESC(txq)) {
   1511 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1512 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1513 		    (WM_NTXDESC(txq) - start), ops);
   1514 		num -= (WM_NTXDESC(txq) - start);
   1515 		start = 0;
   1516 	}
   1517 
   1518 	/* Now sync whatever is left. */
   1519 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1520 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1521 }
   1522 
   1523 static inline void
   1524 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1525 {
   1526 	struct wm_softc *sc = rxq->rxq_sc;
   1527 
   1528 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1529 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1530 }
   1531 
   1532 static inline void
   1533 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1534 {
   1535 	struct wm_softc *sc = rxq->rxq_sc;
   1536 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1537 	struct mbuf *m = rxs->rxs_mbuf;
   1538 
   1539 	/*
   1540 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1541 	 * so that the payload after the Ethernet header is aligned
   1542 	 * to a 4-byte boundary.
   1543 
   1544 	 * XXX BRAINDAMAGE ALERT!
   1545 	 * The stupid chip uses the same size for every buffer, which
   1546 	 * is set in the Receive Control register.  We are using the 2K
   1547 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1548 	 * reason, we can't "scoot" packets longer than the standard
   1549 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1550 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1551 	 * the upper layer copy the headers.
   1552 	 */
   1553 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1554 
   1555 	if (sc->sc_type == WM_T_82574) {
   1556 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1557 		rxd->erx_data.erxd_addr =
   1558 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1559 		rxd->erx_data.erxd_dd = 0;
   1560 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1561 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1562 
   1563 		rxd->nqrx_data.nrxd_paddr =
   1564 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1565 		/* Currently, split header is not supported. */
   1566 		rxd->nqrx_data.nrxd_haddr = 0;
   1567 	} else {
   1568 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1569 
   1570 		wm_set_dma_addr(&rxd->wrx_addr,
   1571 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1572 		rxd->wrx_len = 0;
   1573 		rxd->wrx_cksum = 0;
   1574 		rxd->wrx_status = 0;
   1575 		rxd->wrx_errors = 0;
   1576 		rxd->wrx_special = 0;
   1577 	}
   1578 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1579 
   1580 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1581 }
   1582 
   1583 /*
   1584  * Device driver interface functions and commonly used functions.
   1585  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1586  */
   1587 
   1588 /* Lookup supported device table */
   1589 static const struct wm_product *
   1590 wm_lookup(const struct pci_attach_args *pa)
   1591 {
   1592 	const struct wm_product *wmp;
   1593 
   1594 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1595 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1596 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1597 			return wmp;
   1598 	}
   1599 	return NULL;
   1600 }
   1601 
   1602 /* The match function (ca_match) */
   1603 static int
   1604 wm_match(device_t parent, cfdata_t cf, void *aux)
   1605 {
   1606 	struct pci_attach_args *pa = aux;
   1607 
   1608 	if (wm_lookup(pa) != NULL)
   1609 		return 1;
   1610 
   1611 	return 0;
   1612 }
   1613 
   1614 /* The attach function (ca_attach) */
   1615 static void
   1616 wm_attach(device_t parent, device_t self, void *aux)
   1617 {
   1618 	struct wm_softc *sc = device_private(self);
   1619 	struct pci_attach_args *pa = aux;
   1620 	prop_dictionary_t dict;
   1621 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1622 	pci_chipset_tag_t pc = pa->pa_pc;
   1623 	int counts[PCI_INTR_TYPE_SIZE];
   1624 	pci_intr_type_t max_type;
   1625 	const char *eetype, *xname;
   1626 	bus_space_tag_t memt;
   1627 	bus_space_handle_t memh;
   1628 	bus_size_t memsize;
   1629 	int memh_valid;
   1630 	int i, error;
   1631 	const struct wm_product *wmp;
   1632 	prop_data_t ea;
   1633 	prop_number_t pn;
   1634 	uint8_t enaddr[ETHER_ADDR_LEN];
   1635 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1636 	pcireg_t preg, memtype;
   1637 	uint16_t eeprom_data, apme_mask;
   1638 	bool force_clear_smbi;
   1639 	uint32_t link_mode;
   1640 	uint32_t reg;
   1641 	void (*deferred_start_func)(struct ifnet *) = NULL;
   1642 
   1643 	sc->sc_dev = self;
   1644 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1645 	sc->sc_core_stopping = false;
   1646 
   1647 	wmp = wm_lookup(pa);
   1648 #ifdef DIAGNOSTIC
   1649 	if (wmp == NULL) {
   1650 		printf("\n");
   1651 		panic("wm_attach: impossible");
   1652 	}
   1653 #endif
   1654 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1655 
   1656 	sc->sc_pc = pa->pa_pc;
   1657 	sc->sc_pcitag = pa->pa_tag;
   1658 
   1659 	if (pci_dma64_available(pa))
   1660 		sc->sc_dmat = pa->pa_dmat64;
   1661 	else
   1662 		sc->sc_dmat = pa->pa_dmat;
   1663 
   1664 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1665 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1666 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1667 
   1668 	sc->sc_type = wmp->wmp_type;
   1669 
   1670 	/* Set default function pointers */
   1671 	sc->phy.acquire = wm_get_null;
   1672 	sc->phy.release = wm_put_null;
   1673 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1674 
   1675 	if (sc->sc_type < WM_T_82543) {
   1676 		if (sc->sc_rev < 2) {
   1677 			aprint_error_dev(sc->sc_dev,
   1678 			    "i82542 must be at least rev. 2\n");
   1679 			return;
   1680 		}
   1681 		if (sc->sc_rev < 3)
   1682 			sc->sc_type = WM_T_82542_2_0;
   1683 	}
   1684 
   1685 	/*
   1686 	 * Disable MSI for Errata:
   1687 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1688 	 *
   1689 	 *  82544: Errata 25
   1690 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1691 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1692 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1693 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1694 	 *
   1695 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1696 	 *
   1697 	 *  82571 & 82572: Errata 63
   1698 	 */
   1699 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1700 	    || (sc->sc_type == WM_T_82572))
   1701 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1702 
   1703 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1704 	    || (sc->sc_type == WM_T_82580)
   1705 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1706 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1707 		sc->sc_flags |= WM_F_NEWQUEUE;
   1708 
   1709 	/* Set device properties (mactype) */
   1710 	dict = device_properties(sc->sc_dev);
   1711 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1712 
   1713 	/*
   1714 	 * Map the device.  All devices support memory-mapped acccess,
   1715 	 * and it is really required for normal operation.
   1716 	 */
   1717 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1718 	switch (memtype) {
   1719 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1720 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1721 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1722 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1723 		break;
   1724 	default:
   1725 		memh_valid = 0;
   1726 		break;
   1727 	}
   1728 
   1729 	if (memh_valid) {
   1730 		sc->sc_st = memt;
   1731 		sc->sc_sh = memh;
   1732 		sc->sc_ss = memsize;
   1733 	} else {
   1734 		aprint_error_dev(sc->sc_dev,
   1735 		    "unable to map device registers\n");
   1736 		return;
   1737 	}
   1738 
   1739 	/*
   1740 	 * In addition, i82544 and later support I/O mapped indirect
   1741 	 * register access.  It is not desirable (nor supported in
   1742 	 * this driver) to use it for normal operation, though it is
   1743 	 * required to work around bugs in some chip versions.
   1744 	 */
   1745 	if (sc->sc_type >= WM_T_82544) {
   1746 		/* First we have to find the I/O BAR. */
   1747 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1748 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1749 			if (memtype == PCI_MAPREG_TYPE_IO)
   1750 				break;
   1751 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1752 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1753 				i += 4;	/* skip high bits, too */
   1754 		}
   1755 		if (i < PCI_MAPREG_END) {
   1756 			/*
   1757 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1758 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1759 			 * It's no problem because newer chips has no this
   1760 			 * bug.
   1761 			 *
   1762 			 * The i8254x doesn't apparently respond when the
   1763 			 * I/O BAR is 0, which looks somewhat like it's not
   1764 			 * been configured.
   1765 			 */
   1766 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1767 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1768 				aprint_error_dev(sc->sc_dev,
   1769 				    "WARNING: I/O BAR at zero.\n");
   1770 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1771 					0, &sc->sc_iot, &sc->sc_ioh,
   1772 					NULL, &sc->sc_ios) == 0) {
   1773 				sc->sc_flags |= WM_F_IOH_VALID;
   1774 			} else {
   1775 				aprint_error_dev(sc->sc_dev,
   1776 				    "WARNING: unable to map I/O space\n");
   1777 			}
   1778 		}
   1779 
   1780 	}
   1781 
   1782 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1783 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1784 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1785 	if (sc->sc_type < WM_T_82542_2_1)
   1786 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1787 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1788 
   1789 	/* power up chip */
   1790 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1791 	    NULL)) && error != EOPNOTSUPP) {
   1792 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1793 		return;
   1794 	}
   1795 
   1796 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1797 
   1798 	/* Allocation settings */
   1799 	max_type = PCI_INTR_TYPE_MSIX;
   1800 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1801 	counts[PCI_INTR_TYPE_MSI] = 1;
   1802 	counts[PCI_INTR_TYPE_INTX] = 1;
   1803 
   1804 alloc_retry:
   1805 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1806 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1807 		return;
   1808 	}
   1809 
   1810 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1811 		error = wm_setup_msix(sc);
   1812 		if (error) {
   1813 			pci_intr_release(pc, sc->sc_intrs,
   1814 			    counts[PCI_INTR_TYPE_MSIX]);
   1815 
   1816 			/* Setup for MSI: Disable MSI-X */
   1817 			max_type = PCI_INTR_TYPE_MSI;
   1818 			counts[PCI_INTR_TYPE_MSI] = 1;
   1819 			counts[PCI_INTR_TYPE_INTX] = 1;
   1820 			goto alloc_retry;
   1821 		}
   1822 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1823 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1824 		error = wm_setup_legacy(sc);
   1825 		if (error) {
   1826 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1827 			    counts[PCI_INTR_TYPE_MSI]);
   1828 
   1829 			/* The next try is for INTx: Disable MSI */
   1830 			max_type = PCI_INTR_TYPE_INTX;
   1831 			counts[PCI_INTR_TYPE_INTX] = 1;
   1832 			goto alloc_retry;
   1833 		}
   1834 	} else {
   1835 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1836 		error = wm_setup_legacy(sc);
   1837 		if (error) {
   1838 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1839 			    counts[PCI_INTR_TYPE_INTX]);
   1840 			return;
   1841 		}
   1842 	}
   1843 
   1844 	/*
   1845 	 * Check the function ID (unit number of the chip).
   1846 	 */
   1847 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1848 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1849 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1850 	    || (sc->sc_type == WM_T_82580)
   1851 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1852 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1853 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1854 	else
   1855 		sc->sc_funcid = 0;
   1856 
   1857 	/*
   1858 	 * Determine a few things about the bus we're connected to.
   1859 	 */
   1860 	if (sc->sc_type < WM_T_82543) {
   1861 		/* We don't really know the bus characteristics here. */
   1862 		sc->sc_bus_speed = 33;
   1863 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1864 		/*
   1865 		 * CSA (Communication Streaming Architecture) is about as fast
   1866 		 * a 32-bit 66MHz PCI Bus.
   1867 		 */
   1868 		sc->sc_flags |= WM_F_CSA;
   1869 		sc->sc_bus_speed = 66;
   1870 		aprint_verbose_dev(sc->sc_dev,
   1871 		    "Communication Streaming Architecture\n");
   1872 		if (sc->sc_type == WM_T_82547) {
   1873 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1874 			callout_setfunc(&sc->sc_txfifo_ch,
   1875 					wm_82547_txfifo_stall, sc);
   1876 			aprint_verbose_dev(sc->sc_dev,
   1877 			    "using 82547 Tx FIFO stall work-around\n");
   1878 		}
   1879 	} else if (sc->sc_type >= WM_T_82571) {
   1880 		sc->sc_flags |= WM_F_PCIE;
   1881 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1882 		    && (sc->sc_type != WM_T_ICH10)
   1883 		    && (sc->sc_type != WM_T_PCH)
   1884 		    && (sc->sc_type != WM_T_PCH2)
   1885 		    && (sc->sc_type != WM_T_PCH_LPT)
   1886 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1887 			/* ICH* and PCH* have no PCIe capability registers */
   1888 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1889 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1890 				NULL) == 0)
   1891 				aprint_error_dev(sc->sc_dev,
   1892 				    "unable to find PCIe capability\n");
   1893 		}
   1894 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1895 	} else {
   1896 		reg = CSR_READ(sc, WMREG_STATUS);
   1897 		if (reg & STATUS_BUS64)
   1898 			sc->sc_flags |= WM_F_BUS64;
   1899 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1900 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1901 
   1902 			sc->sc_flags |= WM_F_PCIX;
   1903 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1904 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1905 				aprint_error_dev(sc->sc_dev,
   1906 				    "unable to find PCIX capability\n");
   1907 			else if (sc->sc_type != WM_T_82545_3 &&
   1908 				 sc->sc_type != WM_T_82546_3) {
   1909 				/*
   1910 				 * Work around a problem caused by the BIOS
   1911 				 * setting the max memory read byte count
   1912 				 * incorrectly.
   1913 				 */
   1914 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1915 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1916 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1917 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1918 
   1919 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1920 				    PCIX_CMD_BYTECNT_SHIFT;
   1921 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1922 				    PCIX_STATUS_MAXB_SHIFT;
   1923 				if (bytecnt > maxb) {
   1924 					aprint_verbose_dev(sc->sc_dev,
   1925 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1926 					    512 << bytecnt, 512 << maxb);
   1927 					pcix_cmd = (pcix_cmd &
   1928 					    ~PCIX_CMD_BYTECNT_MASK) |
   1929 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1930 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1931 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1932 					    pcix_cmd);
   1933 				}
   1934 			}
   1935 		}
   1936 		/*
   1937 		 * The quad port adapter is special; it has a PCIX-PCIX
   1938 		 * bridge on the board, and can run the secondary bus at
   1939 		 * a higher speed.
   1940 		 */
   1941 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1942 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1943 								      : 66;
   1944 		} else if (sc->sc_flags & WM_F_PCIX) {
   1945 			switch (reg & STATUS_PCIXSPD_MASK) {
   1946 			case STATUS_PCIXSPD_50_66:
   1947 				sc->sc_bus_speed = 66;
   1948 				break;
   1949 			case STATUS_PCIXSPD_66_100:
   1950 				sc->sc_bus_speed = 100;
   1951 				break;
   1952 			case STATUS_PCIXSPD_100_133:
   1953 				sc->sc_bus_speed = 133;
   1954 				break;
   1955 			default:
   1956 				aprint_error_dev(sc->sc_dev,
   1957 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1958 				    reg & STATUS_PCIXSPD_MASK);
   1959 				sc->sc_bus_speed = 66;
   1960 				break;
   1961 			}
   1962 		} else
   1963 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1964 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1965 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1966 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1967 	}
   1968 
   1969 	/* clear interesting stat counters */
   1970 	CSR_READ(sc, WMREG_COLC);
   1971 	CSR_READ(sc, WMREG_RXERRC);
   1972 
   1973 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1974 	    || (sc->sc_type >= WM_T_ICH8))
   1975 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1976 	if (sc->sc_type >= WM_T_ICH8)
   1977 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1978 
   1979 	/* Set PHY, NVM mutex related stuff */
   1980 	switch (sc->sc_type) {
   1981 	case WM_T_82542_2_0:
   1982 	case WM_T_82542_2_1:
   1983 	case WM_T_82543:
   1984 	case WM_T_82544:
   1985 		/* Microwire */
   1986 		sc->sc_nvm_wordsize = 64;
   1987 		sc->sc_nvm_addrbits = 6;
   1988 		break;
   1989 	case WM_T_82540:
   1990 	case WM_T_82545:
   1991 	case WM_T_82545_3:
   1992 	case WM_T_82546:
   1993 	case WM_T_82546_3:
   1994 		/* Microwire */
   1995 		reg = CSR_READ(sc, WMREG_EECD);
   1996 		if (reg & EECD_EE_SIZE) {
   1997 			sc->sc_nvm_wordsize = 256;
   1998 			sc->sc_nvm_addrbits = 8;
   1999 		} else {
   2000 			sc->sc_nvm_wordsize = 64;
   2001 			sc->sc_nvm_addrbits = 6;
   2002 		}
   2003 		sc->sc_flags |= WM_F_LOCK_EECD;
   2004 		break;
   2005 	case WM_T_82541:
   2006 	case WM_T_82541_2:
   2007 	case WM_T_82547:
   2008 	case WM_T_82547_2:
   2009 		sc->sc_flags |= WM_F_LOCK_EECD;
   2010 		reg = CSR_READ(sc, WMREG_EECD);
   2011 		if (reg & EECD_EE_TYPE) {
   2012 			/* SPI */
   2013 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2014 			wm_nvm_set_addrbits_size_eecd(sc);
   2015 		} else {
   2016 			/* Microwire */
   2017 			if ((reg & EECD_EE_ABITS) != 0) {
   2018 				sc->sc_nvm_wordsize = 256;
   2019 				sc->sc_nvm_addrbits = 8;
   2020 			} else {
   2021 				sc->sc_nvm_wordsize = 64;
   2022 				sc->sc_nvm_addrbits = 6;
   2023 			}
   2024 		}
   2025 		break;
   2026 	case WM_T_82571:
   2027 	case WM_T_82572:
   2028 		/* SPI */
   2029 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2030 		wm_nvm_set_addrbits_size_eecd(sc);
   2031 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2032 		sc->phy.acquire = wm_get_swsm_semaphore;
   2033 		sc->phy.release = wm_put_swsm_semaphore;
   2034 		break;
   2035 	case WM_T_82573:
   2036 	case WM_T_82574:
   2037 	case WM_T_82583:
   2038 		if (sc->sc_type == WM_T_82573) {
   2039 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2040 			sc->phy.acquire = wm_get_swsm_semaphore;
   2041 			sc->phy.release = wm_put_swsm_semaphore;
   2042 		} else {
   2043 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2044 			/* Both PHY and NVM use the same semaphore. */
   2045 			sc->phy.acquire
   2046 			    = wm_get_swfwhw_semaphore;
   2047 			sc->phy.release
   2048 			    = wm_put_swfwhw_semaphore;
   2049 		}
   2050 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2051 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2052 			sc->sc_nvm_wordsize = 2048;
   2053 		} else {
   2054 			/* SPI */
   2055 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2056 			wm_nvm_set_addrbits_size_eecd(sc);
   2057 		}
   2058 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2059 		break;
   2060 	case WM_T_82575:
   2061 	case WM_T_82576:
   2062 	case WM_T_82580:
   2063 	case WM_T_I350:
   2064 	case WM_T_I354:
   2065 	case WM_T_80003:
   2066 		/* SPI */
   2067 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2068 		wm_nvm_set_addrbits_size_eecd(sc);
   2069 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2070 		    | WM_F_LOCK_SWSM;
   2071 		sc->phy.acquire = wm_get_phy_82575;
   2072 		sc->phy.release = wm_put_phy_82575;
   2073 		break;
   2074 	case WM_T_ICH8:
   2075 	case WM_T_ICH9:
   2076 	case WM_T_ICH10:
   2077 	case WM_T_PCH:
   2078 	case WM_T_PCH2:
   2079 	case WM_T_PCH_LPT:
   2080 		/* FLASH */
   2081 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2082 		sc->sc_nvm_wordsize = 2048;
   2083 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2084 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2085 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2086 			aprint_error_dev(sc->sc_dev,
   2087 			    "can't map FLASH registers\n");
   2088 			goto out;
   2089 		}
   2090 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2091 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2092 		    ICH_FLASH_SECTOR_SIZE;
   2093 		sc->sc_ich8_flash_bank_size =
   2094 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2095 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2096 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2097 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2098 		sc->sc_flashreg_offset = 0;
   2099 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2100 		sc->phy.release = wm_put_swflag_ich8lan;
   2101 		break;
   2102 	case WM_T_PCH_SPT:
   2103 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2104 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2105 		sc->sc_flasht = sc->sc_st;
   2106 		sc->sc_flashh = sc->sc_sh;
   2107 		sc->sc_ich8_flash_base = 0;
   2108 		sc->sc_nvm_wordsize =
   2109 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2110 			* NVM_SIZE_MULTIPLIER;
   2111 		/* It is size in bytes, we want words */
   2112 		sc->sc_nvm_wordsize /= 2;
   2113 		/* assume 2 banks */
   2114 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2115 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2116 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2117 		sc->phy.release = wm_put_swflag_ich8lan;
   2118 		break;
   2119 	case WM_T_I210:
   2120 	case WM_T_I211:
   2121 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2122 			wm_nvm_set_addrbits_size_eecd(sc);
   2123 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2124 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2125 		} else {
   2126 			sc->sc_nvm_wordsize = INVM_SIZE;
   2127 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2128 		}
   2129 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2130 		sc->phy.acquire = wm_get_phy_82575;
   2131 		sc->phy.release = wm_put_phy_82575;
   2132 		break;
   2133 	default:
   2134 		break;
   2135 	}
   2136 
   2137 	/* Reset the chip to a known state. */
   2138 	wm_reset(sc);
   2139 
   2140 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2141 	switch (sc->sc_type) {
   2142 	case WM_T_82571:
   2143 	case WM_T_82572:
   2144 		reg = CSR_READ(sc, WMREG_SWSM2);
   2145 		if ((reg & SWSM2_LOCK) == 0) {
   2146 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2147 			force_clear_smbi = true;
   2148 		} else
   2149 			force_clear_smbi = false;
   2150 		break;
   2151 	case WM_T_82573:
   2152 	case WM_T_82574:
   2153 	case WM_T_82583:
   2154 		force_clear_smbi = true;
   2155 		break;
   2156 	default:
   2157 		force_clear_smbi = false;
   2158 		break;
   2159 	}
   2160 	if (force_clear_smbi) {
   2161 		reg = CSR_READ(sc, WMREG_SWSM);
   2162 		if ((reg & SWSM_SMBI) != 0)
   2163 			aprint_error_dev(sc->sc_dev,
   2164 			    "Please update the Bootagent\n");
   2165 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2166 	}
   2167 
   2168 	/*
   2169 	 * Defer printing the EEPROM type until after verifying the checksum
   2170 	 * This allows the EEPROM type to be printed correctly in the case
   2171 	 * that no EEPROM is attached.
   2172 	 */
   2173 	/*
   2174 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2175 	 * this for later, so we can fail future reads from the EEPROM.
   2176 	 */
   2177 	if (wm_nvm_validate_checksum(sc)) {
   2178 		/*
   2179 		 * Read twice again because some PCI-e parts fail the
   2180 		 * first check due to the link being in sleep state.
   2181 		 */
   2182 		if (wm_nvm_validate_checksum(sc))
   2183 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2184 	}
   2185 
   2186 	/* Set device properties (macflags) */
   2187 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2188 
   2189 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2190 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2191 	else {
   2192 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2193 		    sc->sc_nvm_wordsize);
   2194 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2195 			aprint_verbose("iNVM");
   2196 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2197 			aprint_verbose("FLASH(HW)");
   2198 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2199 			aprint_verbose("FLASH");
   2200 		else {
   2201 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2202 				eetype = "SPI";
   2203 			else
   2204 				eetype = "MicroWire";
   2205 			aprint_verbose("(%d address bits) %s EEPROM",
   2206 			    sc->sc_nvm_addrbits, eetype);
   2207 		}
   2208 	}
   2209 	wm_nvm_version(sc);
   2210 	aprint_verbose("\n");
   2211 
   2212 	/* Check for I21[01] PLL workaround */
   2213 	if (sc->sc_type == WM_T_I210)
   2214 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2215 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2216 		/* NVM image release 3.25 has a workaround */
   2217 		if ((sc->sc_nvm_ver_major < 3)
   2218 		    || ((sc->sc_nvm_ver_major == 3)
   2219 			&& (sc->sc_nvm_ver_minor < 25))) {
   2220 			aprint_verbose_dev(sc->sc_dev,
   2221 			    "ROM image version %d.%d is older than 3.25\n",
   2222 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2223 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2224 		}
   2225 	}
   2226 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2227 		wm_pll_workaround_i210(sc);
   2228 
   2229 	wm_get_wakeup(sc);
   2230 
   2231 	/* Non-AMT based hardware can now take control from firmware */
   2232 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2233 		wm_get_hw_control(sc);
   2234 
   2235 	/*
   2236 	 * Read the Ethernet address from the EEPROM, if not first found
   2237 	 * in device properties.
   2238 	 */
   2239 	ea = prop_dictionary_get(dict, "mac-address");
   2240 	if (ea != NULL) {
   2241 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2242 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2243 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2244 	} else {
   2245 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2246 			aprint_error_dev(sc->sc_dev,
   2247 			    "unable to read Ethernet address\n");
   2248 			goto out;
   2249 		}
   2250 	}
   2251 
   2252 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2253 	    ether_sprintf(enaddr));
   2254 
   2255 	/*
   2256 	 * Read the config info from the EEPROM, and set up various
   2257 	 * bits in the control registers based on their contents.
   2258 	 */
   2259 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2260 	if (pn != NULL) {
   2261 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2262 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2263 	} else {
   2264 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2265 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2266 			goto out;
   2267 		}
   2268 	}
   2269 
   2270 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2271 	if (pn != NULL) {
   2272 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2273 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2274 	} else {
   2275 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2276 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2277 			goto out;
   2278 		}
   2279 	}
   2280 
   2281 	/* check for WM_F_WOL */
   2282 	switch (sc->sc_type) {
   2283 	case WM_T_82542_2_0:
   2284 	case WM_T_82542_2_1:
   2285 	case WM_T_82543:
   2286 		/* dummy? */
   2287 		eeprom_data = 0;
   2288 		apme_mask = NVM_CFG3_APME;
   2289 		break;
   2290 	case WM_T_82544:
   2291 		apme_mask = NVM_CFG2_82544_APM_EN;
   2292 		eeprom_data = cfg2;
   2293 		break;
   2294 	case WM_T_82546:
   2295 	case WM_T_82546_3:
   2296 	case WM_T_82571:
   2297 	case WM_T_82572:
   2298 	case WM_T_82573:
   2299 	case WM_T_82574:
   2300 	case WM_T_82583:
   2301 	case WM_T_80003:
   2302 	default:
   2303 		apme_mask = NVM_CFG3_APME;
   2304 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2305 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2306 		break;
   2307 	case WM_T_82575:
   2308 	case WM_T_82576:
   2309 	case WM_T_82580:
   2310 	case WM_T_I350:
   2311 	case WM_T_I354: /* XXX ok? */
   2312 	case WM_T_ICH8:
   2313 	case WM_T_ICH9:
   2314 	case WM_T_ICH10:
   2315 	case WM_T_PCH:
   2316 	case WM_T_PCH2:
   2317 	case WM_T_PCH_LPT:
   2318 	case WM_T_PCH_SPT:
   2319 		/* XXX The funcid should be checked on some devices */
   2320 		apme_mask = WUC_APME;
   2321 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2322 		break;
   2323 	}
   2324 
   2325 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2326 	if ((eeprom_data & apme_mask) != 0)
   2327 		sc->sc_flags |= WM_F_WOL;
   2328 #ifdef WM_DEBUG
   2329 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2330 		printf("WOL\n");
   2331 #endif
   2332 
   2333 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2334 		/* Check NVM for autonegotiation */
   2335 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2336 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2337 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2338 		}
   2339 	}
   2340 
   2341 	/*
   2342 	 * XXX need special handling for some multiple port cards
   2343 	 * to disable a paticular port.
   2344 	 */
   2345 
   2346 	if (sc->sc_type >= WM_T_82544) {
   2347 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2348 		if (pn != NULL) {
   2349 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2350 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2351 		} else {
   2352 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2353 				aprint_error_dev(sc->sc_dev,
   2354 				    "unable to read SWDPIN\n");
   2355 				goto out;
   2356 			}
   2357 		}
   2358 	}
   2359 
   2360 	if (cfg1 & NVM_CFG1_ILOS)
   2361 		sc->sc_ctrl |= CTRL_ILOS;
   2362 
   2363 	/*
   2364 	 * XXX
   2365 	 * This code isn't correct because pin 2 and 3 are located
   2366 	 * in different position on newer chips. Check all datasheet.
   2367 	 *
   2368 	 * Until resolve this problem, check if a chip < 82580
   2369 	 */
   2370 	if (sc->sc_type <= WM_T_82580) {
   2371 		if (sc->sc_type >= WM_T_82544) {
   2372 			sc->sc_ctrl |=
   2373 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2374 			    CTRL_SWDPIO_SHIFT;
   2375 			sc->sc_ctrl |=
   2376 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2377 			    CTRL_SWDPINS_SHIFT;
   2378 		} else {
   2379 			sc->sc_ctrl |=
   2380 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2381 			    CTRL_SWDPIO_SHIFT;
   2382 		}
   2383 	}
   2384 
   2385 	/* XXX For other than 82580? */
   2386 	if (sc->sc_type == WM_T_82580) {
   2387 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2388 		if (nvmword & __BIT(13))
   2389 			sc->sc_ctrl |= CTRL_ILOS;
   2390 	}
   2391 
   2392 #if 0
   2393 	if (sc->sc_type >= WM_T_82544) {
   2394 		if (cfg1 & NVM_CFG1_IPS0)
   2395 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2396 		if (cfg1 & NVM_CFG1_IPS1)
   2397 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2398 		sc->sc_ctrl_ext |=
   2399 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2400 		    CTRL_EXT_SWDPIO_SHIFT;
   2401 		sc->sc_ctrl_ext |=
   2402 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2403 		    CTRL_EXT_SWDPINS_SHIFT;
   2404 	} else {
   2405 		sc->sc_ctrl_ext |=
   2406 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2407 		    CTRL_EXT_SWDPIO_SHIFT;
   2408 	}
   2409 #endif
   2410 
   2411 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2412 #if 0
   2413 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2414 #endif
   2415 
   2416 	if (sc->sc_type == WM_T_PCH) {
   2417 		uint16_t val;
   2418 
   2419 		/* Save the NVM K1 bit setting */
   2420 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2421 
   2422 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2423 			sc->sc_nvm_k1_enabled = 1;
   2424 		else
   2425 			sc->sc_nvm_k1_enabled = 0;
   2426 	}
   2427 
   2428 	/*
   2429 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2430 	 * media structures accordingly.
   2431 	 */
   2432 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2433 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2434 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2435 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2436 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2437 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2438 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2439 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2440 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2441 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2442 	    || (sc->sc_type ==WM_T_I211)) {
   2443 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2444 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2445 		switch (link_mode) {
   2446 		case CTRL_EXT_LINK_MODE_1000KX:
   2447 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2448 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2449 			break;
   2450 		case CTRL_EXT_LINK_MODE_SGMII:
   2451 			if (wm_sgmii_uses_mdio(sc)) {
   2452 				aprint_verbose_dev(sc->sc_dev,
   2453 				    "SGMII(MDIO)\n");
   2454 				sc->sc_flags |= WM_F_SGMII;
   2455 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2456 				break;
   2457 			}
   2458 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2459 			/*FALLTHROUGH*/
   2460 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2461 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2462 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2463 				if (link_mode
   2464 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2465 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2466 					sc->sc_flags |= WM_F_SGMII;
   2467 				} else {
   2468 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2469 					aprint_verbose_dev(sc->sc_dev,
   2470 					    "SERDES\n");
   2471 				}
   2472 				break;
   2473 			}
   2474 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2475 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2476 
   2477 			/* Change current link mode setting */
   2478 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2479 			switch (sc->sc_mediatype) {
   2480 			case WM_MEDIATYPE_COPPER:
   2481 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2482 				break;
   2483 			case WM_MEDIATYPE_SERDES:
   2484 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2485 				break;
   2486 			default:
   2487 				break;
   2488 			}
   2489 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2490 			break;
   2491 		case CTRL_EXT_LINK_MODE_GMII:
   2492 		default:
   2493 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2494 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2495 			break;
   2496 		}
   2497 
   2498 		reg &= ~CTRL_EXT_I2C_ENA;
   2499 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2500 			reg |= CTRL_EXT_I2C_ENA;
   2501 		else
   2502 			reg &= ~CTRL_EXT_I2C_ENA;
   2503 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2504 
   2505 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2506 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2507 		else
   2508 			wm_tbi_mediainit(sc);
   2509 	} else if (sc->sc_type < WM_T_82543 ||
   2510 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2511 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2512 			aprint_error_dev(sc->sc_dev,
   2513 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2514 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2515 		}
   2516 		wm_tbi_mediainit(sc);
   2517 	} else {
   2518 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2519 			aprint_error_dev(sc->sc_dev,
   2520 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2521 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2522 		}
   2523 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2524 	}
   2525 
   2526 	ifp = &sc->sc_ethercom.ec_if;
   2527 	xname = device_xname(sc->sc_dev);
   2528 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2529 	ifp->if_softc = sc;
   2530 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2531 	ifp->if_extflags = IFEF_START_MPSAFE;
   2532 	ifp->if_ioctl = wm_ioctl;
   2533 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2534 		ifp->if_start = wm_nq_start;
   2535 		if (sc->sc_nqueues > 1) {
   2536 			ifp->if_transmit = wm_nq_transmit;
   2537 			deferred_start_func = wm_deferred_start;
   2538 		}
   2539 	} else {
   2540 		ifp->if_start = wm_start;
   2541 		if (sc->sc_nqueues > 1) {
   2542 			ifp->if_transmit = wm_transmit;
   2543 			deferred_start_func = wm_deferred_start;
   2544 		}
   2545 	}
   2546 	ifp->if_watchdog = wm_watchdog;
   2547 	ifp->if_init = wm_init;
   2548 	ifp->if_stop = wm_stop;
   2549 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2550 	IFQ_SET_READY(&ifp->if_snd);
   2551 
   2552 	/* Check for jumbo frame */
   2553 	switch (sc->sc_type) {
   2554 	case WM_T_82573:
   2555 		/* XXX limited to 9234 if ASPM is disabled */
   2556 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2557 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2558 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2559 		break;
   2560 	case WM_T_82571:
   2561 	case WM_T_82572:
   2562 	case WM_T_82574:
   2563 	case WM_T_82575:
   2564 	case WM_T_82576:
   2565 	case WM_T_82580:
   2566 	case WM_T_I350:
   2567 	case WM_T_I354: /* XXXX ok? */
   2568 	case WM_T_I210:
   2569 	case WM_T_I211:
   2570 	case WM_T_80003:
   2571 	case WM_T_ICH9:
   2572 	case WM_T_ICH10:
   2573 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2574 	case WM_T_PCH_LPT:
   2575 	case WM_T_PCH_SPT:
   2576 		/* XXX limited to 9234 */
   2577 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2578 		break;
   2579 	case WM_T_PCH:
   2580 		/* XXX limited to 4096 */
   2581 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2582 		break;
   2583 	case WM_T_82542_2_0:
   2584 	case WM_T_82542_2_1:
   2585 	case WM_T_82583:
   2586 	case WM_T_ICH8:
   2587 		/* No support for jumbo frame */
   2588 		break;
   2589 	default:
   2590 		/* ETHER_MAX_LEN_JUMBO */
   2591 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2592 		break;
   2593 	}
   2594 
   2595 	/* If we're a i82543 or greater, we can support VLANs. */
   2596 	if (sc->sc_type >= WM_T_82543)
   2597 		sc->sc_ethercom.ec_capabilities |=
   2598 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2599 
   2600 	/*
   2601 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2602 	 * on i82543 and later.
   2603 	 */
   2604 	if (sc->sc_type >= WM_T_82543) {
   2605 		ifp->if_capabilities |=
   2606 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2607 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2608 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2609 		    IFCAP_CSUM_TCPv6_Tx |
   2610 		    IFCAP_CSUM_UDPv6_Tx;
   2611 	}
   2612 
   2613 	/*
   2614 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2615 	 *
   2616 	 *	82541GI (8086:1076) ... no
   2617 	 *	82572EI (8086:10b9) ... yes
   2618 	 */
   2619 	if (sc->sc_type >= WM_T_82571) {
   2620 		ifp->if_capabilities |=
   2621 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2622 	}
   2623 
   2624 	/*
   2625 	 * If we're a i82544 or greater (except i82547), we can do
   2626 	 * TCP segmentation offload.
   2627 	 */
   2628 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2629 		ifp->if_capabilities |= IFCAP_TSOv4;
   2630 	}
   2631 
   2632 	if (sc->sc_type >= WM_T_82571) {
   2633 		ifp->if_capabilities |= IFCAP_TSOv6;
   2634 	}
   2635 
   2636 #ifdef WM_MPSAFE
   2637 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2638 #else
   2639 	sc->sc_core_lock = NULL;
   2640 #endif
   2641 
   2642 	/* Attach the interface. */
   2643 	if_initialize(ifp);
   2644 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2645 	if_deferred_start_init(ifp, deferred_start_func);
   2646 	ether_ifattach(ifp, enaddr);
   2647 	if_register(ifp);
   2648 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2649 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2650 			  RND_FLAG_DEFAULT);
   2651 
   2652 #ifdef WM_EVENT_COUNTERS
   2653 	/* Attach event counters. */
   2654 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2655 	    NULL, xname, "linkintr");
   2656 
   2657 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2658 	    NULL, xname, "tx_xoff");
   2659 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2660 	    NULL, xname, "tx_xon");
   2661 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2662 	    NULL, xname, "rx_xoff");
   2663 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2664 	    NULL, xname, "rx_xon");
   2665 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2666 	    NULL, xname, "rx_macctl");
   2667 #endif /* WM_EVENT_COUNTERS */
   2668 
   2669 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2670 		pmf_class_network_register(self, ifp);
   2671 	else
   2672 		aprint_error_dev(self, "couldn't establish power handler\n");
   2673 
   2674 	sc->sc_flags |= WM_F_ATTACHED;
   2675  out:
   2676 	return;
   2677 }
   2678 
   2679 /* The detach function (ca_detach) */
   2680 static int
   2681 wm_detach(device_t self, int flags __unused)
   2682 {
   2683 	struct wm_softc *sc = device_private(self);
   2684 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2685 	int i;
   2686 
   2687 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2688 		return 0;
   2689 
   2690 	/* Stop the interface. Callouts are stopped in it. */
   2691 	wm_stop(ifp, 1);
   2692 
   2693 	pmf_device_deregister(self);
   2694 
   2695 	/* Tell the firmware about the release */
   2696 	WM_CORE_LOCK(sc);
   2697 	wm_release_manageability(sc);
   2698 	wm_release_hw_control(sc);
   2699 	wm_enable_wakeup(sc);
   2700 	WM_CORE_UNLOCK(sc);
   2701 
   2702 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2703 
   2704 	/* Delete all remaining media. */
   2705 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2706 
   2707 	ether_ifdetach(ifp);
   2708 	if_detach(ifp);
   2709 	if_percpuq_destroy(sc->sc_ipq);
   2710 
   2711 	/* Unload RX dmamaps and free mbufs */
   2712 	for (i = 0; i < sc->sc_nqueues; i++) {
   2713 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2714 		mutex_enter(rxq->rxq_lock);
   2715 		wm_rxdrain(rxq);
   2716 		mutex_exit(rxq->rxq_lock);
   2717 	}
   2718 	/* Must unlock here */
   2719 
   2720 	/* Disestablish the interrupt handler */
   2721 	for (i = 0; i < sc->sc_nintrs; i++) {
   2722 		if (sc->sc_ihs[i] != NULL) {
   2723 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2724 			sc->sc_ihs[i] = NULL;
   2725 		}
   2726 	}
   2727 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2728 
   2729 	wm_free_txrx_queues(sc);
   2730 
   2731 	/* Unmap the registers */
   2732 	if (sc->sc_ss) {
   2733 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2734 		sc->sc_ss = 0;
   2735 	}
   2736 	if (sc->sc_ios) {
   2737 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2738 		sc->sc_ios = 0;
   2739 	}
   2740 	if (sc->sc_flashs) {
   2741 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2742 		sc->sc_flashs = 0;
   2743 	}
   2744 
   2745 	if (sc->sc_core_lock)
   2746 		mutex_obj_free(sc->sc_core_lock);
   2747 	if (sc->sc_ich_phymtx)
   2748 		mutex_obj_free(sc->sc_ich_phymtx);
   2749 	if (sc->sc_ich_nvmmtx)
   2750 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2751 
   2752 	return 0;
   2753 }
   2754 
   2755 static bool
   2756 wm_suspend(device_t self, const pmf_qual_t *qual)
   2757 {
   2758 	struct wm_softc *sc = device_private(self);
   2759 
   2760 	wm_release_manageability(sc);
   2761 	wm_release_hw_control(sc);
   2762 	wm_enable_wakeup(sc);
   2763 
   2764 	return true;
   2765 }
   2766 
   2767 static bool
   2768 wm_resume(device_t self, const pmf_qual_t *qual)
   2769 {
   2770 	struct wm_softc *sc = device_private(self);
   2771 
   2772 	wm_init_manageability(sc);
   2773 
   2774 	return true;
   2775 }
   2776 
   2777 /*
   2778  * wm_watchdog:		[ifnet interface function]
   2779  *
   2780  *	Watchdog timer handler.
   2781  */
   2782 static void
   2783 wm_watchdog(struct ifnet *ifp)
   2784 {
   2785 	int qid;
   2786 	struct wm_softc *sc = ifp->if_softc;
   2787 
   2788 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2789 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2790 
   2791 		wm_watchdog_txq(ifp, txq);
   2792 	}
   2793 
   2794 	/* Reset the interface. */
   2795 	(void) wm_init(ifp);
   2796 
   2797 	/*
   2798 	 * There are still some upper layer processing which call
   2799 	 * ifp->if_start(). e.g. ALTQ
   2800 	 */
   2801 	/* Try to get more packets going. */
   2802 	ifp->if_start(ifp);
   2803 }
   2804 
   2805 static void
   2806 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2807 {
   2808 	struct wm_softc *sc = ifp->if_softc;
   2809 
   2810 	/*
   2811 	 * Since we're using delayed interrupts, sweep up
   2812 	 * before we report an error.
   2813 	 */
   2814 	mutex_enter(txq->txq_lock);
   2815 	wm_txeof(sc, txq);
   2816 	mutex_exit(txq->txq_lock);
   2817 
   2818 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2819 #ifdef WM_DEBUG
   2820 		int i, j;
   2821 		struct wm_txsoft *txs;
   2822 #endif
   2823 		log(LOG_ERR,
   2824 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2825 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2826 		    txq->txq_next);
   2827 		ifp->if_oerrors++;
   2828 #ifdef WM_DEBUG
   2829 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2830 		    i = WM_NEXTTXS(txq, i)) {
   2831 		    txs = &txq->txq_soft[i];
   2832 		    printf("txs %d tx %d -> %d\n",
   2833 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2834 		    for (j = txs->txs_firstdesc; ;
   2835 			j = WM_NEXTTX(txq, j)) {
   2836 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2837 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2838 			printf("\t %#08x%08x\n",
   2839 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2840 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2841 			if (j == txs->txs_lastdesc)
   2842 				break;
   2843 			}
   2844 		}
   2845 #endif
   2846 	}
   2847 }
   2848 
   2849 /*
   2850  * wm_tick:
   2851  *
   2852  *	One second timer, used to check link status, sweep up
   2853  *	completed transmit jobs, etc.
   2854  */
   2855 static void
   2856 wm_tick(void *arg)
   2857 {
   2858 	struct wm_softc *sc = arg;
   2859 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2860 #ifndef WM_MPSAFE
   2861 	int s = splnet();
   2862 #endif
   2863 
   2864 	WM_CORE_LOCK(sc);
   2865 
   2866 	if (sc->sc_core_stopping)
   2867 		goto out;
   2868 
   2869 	if (sc->sc_type >= WM_T_82542_2_1) {
   2870 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2871 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2872 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2873 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2874 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2875 	}
   2876 
   2877 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2878 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2879 	    + CSR_READ(sc, WMREG_CRCERRS)
   2880 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2881 	    + CSR_READ(sc, WMREG_SYMERRC)
   2882 	    + CSR_READ(sc, WMREG_RXERRC)
   2883 	    + CSR_READ(sc, WMREG_SEC)
   2884 	    + CSR_READ(sc, WMREG_CEXTERR)
   2885 	    + CSR_READ(sc, WMREG_RLEC);
   2886 	/*
   2887 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2888 	 * memory. It does not mean the number of dropped packet. Because
   2889 	 * ethernet controller can receive packets in such case if there is
   2890 	 * space in phy's FIFO.
   2891 	 *
   2892 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2893 	 * own EVCNT instead of if_iqdrops.
   2894 	 */
   2895 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2896 
   2897 	if (sc->sc_flags & WM_F_HAS_MII)
   2898 		mii_tick(&sc->sc_mii);
   2899 	else if ((sc->sc_type >= WM_T_82575)
   2900 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2901 		wm_serdes_tick(sc);
   2902 	else
   2903 		wm_tbi_tick(sc);
   2904 
   2905 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2906 out:
   2907 	WM_CORE_UNLOCK(sc);
   2908 #ifndef WM_MPSAFE
   2909 	splx(s);
   2910 #endif
   2911 }
   2912 
   2913 static int
   2914 wm_ifflags_cb(struct ethercom *ec)
   2915 {
   2916 	struct ifnet *ifp = &ec->ec_if;
   2917 	struct wm_softc *sc = ifp->if_softc;
   2918 	int rc = 0;
   2919 
   2920 	WM_CORE_LOCK(sc);
   2921 
   2922 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2923 	sc->sc_if_flags = ifp->if_flags;
   2924 
   2925 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2926 		rc = ENETRESET;
   2927 		goto out;
   2928 	}
   2929 
   2930 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2931 		wm_set_filter(sc);
   2932 
   2933 	wm_set_vlan(sc);
   2934 
   2935 out:
   2936 	WM_CORE_UNLOCK(sc);
   2937 
   2938 	return rc;
   2939 }
   2940 
   2941 /*
   2942  * wm_ioctl:		[ifnet interface function]
   2943  *
   2944  *	Handle control requests from the operator.
   2945  */
   2946 static int
   2947 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2948 {
   2949 	struct wm_softc *sc = ifp->if_softc;
   2950 	struct ifreq *ifr = (struct ifreq *) data;
   2951 	struct ifaddr *ifa = (struct ifaddr *)data;
   2952 	struct sockaddr_dl *sdl;
   2953 	int s, error;
   2954 
   2955 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2956 		device_xname(sc->sc_dev), __func__));
   2957 
   2958 #ifndef WM_MPSAFE
   2959 	s = splnet();
   2960 #endif
   2961 	switch (cmd) {
   2962 	case SIOCSIFMEDIA:
   2963 	case SIOCGIFMEDIA:
   2964 		WM_CORE_LOCK(sc);
   2965 		/* Flow control requires full-duplex mode. */
   2966 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2967 		    (ifr->ifr_media & IFM_FDX) == 0)
   2968 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2969 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2970 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2971 				/* We can do both TXPAUSE and RXPAUSE. */
   2972 				ifr->ifr_media |=
   2973 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2974 			}
   2975 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2976 		}
   2977 		WM_CORE_UNLOCK(sc);
   2978 #ifdef WM_MPSAFE
   2979 		s = splnet();
   2980 #endif
   2981 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2982 #ifdef WM_MPSAFE
   2983 		splx(s);
   2984 #endif
   2985 		break;
   2986 	case SIOCINITIFADDR:
   2987 		WM_CORE_LOCK(sc);
   2988 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2989 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2990 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2991 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2992 			/* unicast address is first multicast entry */
   2993 			wm_set_filter(sc);
   2994 			error = 0;
   2995 			WM_CORE_UNLOCK(sc);
   2996 			break;
   2997 		}
   2998 		WM_CORE_UNLOCK(sc);
   2999 		/*FALLTHROUGH*/
   3000 	default:
   3001 #ifdef WM_MPSAFE
   3002 		s = splnet();
   3003 #endif
   3004 		/* It may call wm_start, so unlock here */
   3005 		error = ether_ioctl(ifp, cmd, data);
   3006 #ifdef WM_MPSAFE
   3007 		splx(s);
   3008 #endif
   3009 		if (error != ENETRESET)
   3010 			break;
   3011 
   3012 		error = 0;
   3013 
   3014 		if (cmd == SIOCSIFCAP) {
   3015 			error = (*ifp->if_init)(ifp);
   3016 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3017 			;
   3018 		else if (ifp->if_flags & IFF_RUNNING) {
   3019 			/*
   3020 			 * Multicast list has changed; set the hardware filter
   3021 			 * accordingly.
   3022 			 */
   3023 			WM_CORE_LOCK(sc);
   3024 			wm_set_filter(sc);
   3025 			WM_CORE_UNLOCK(sc);
   3026 		}
   3027 		break;
   3028 	}
   3029 
   3030 #ifndef WM_MPSAFE
   3031 	splx(s);
   3032 #endif
   3033 	return error;
   3034 }
   3035 
   3036 /* MAC address related */
   3037 
   3038 /*
   3039  * Get the offset of MAC address and return it.
   3040  * If error occured, use offset 0.
   3041  */
   3042 static uint16_t
   3043 wm_check_alt_mac_addr(struct wm_softc *sc)
   3044 {
   3045 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3046 	uint16_t offset = NVM_OFF_MACADDR;
   3047 
   3048 	/* Try to read alternative MAC address pointer */
   3049 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3050 		return 0;
   3051 
   3052 	/* Check pointer if it's valid or not. */
   3053 	if ((offset == 0x0000) || (offset == 0xffff))
   3054 		return 0;
   3055 
   3056 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3057 	/*
   3058 	 * Check whether alternative MAC address is valid or not.
   3059 	 * Some cards have non 0xffff pointer but those don't use
   3060 	 * alternative MAC address in reality.
   3061 	 *
   3062 	 * Check whether the broadcast bit is set or not.
   3063 	 */
   3064 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3065 		if (((myea[0] & 0xff) & 0x01) == 0)
   3066 			return offset; /* Found */
   3067 
   3068 	/* Not found */
   3069 	return 0;
   3070 }
   3071 
   3072 static int
   3073 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3074 {
   3075 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3076 	uint16_t offset = NVM_OFF_MACADDR;
   3077 	int do_invert = 0;
   3078 
   3079 	switch (sc->sc_type) {
   3080 	case WM_T_82580:
   3081 	case WM_T_I350:
   3082 	case WM_T_I354:
   3083 		/* EEPROM Top Level Partitioning */
   3084 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3085 		break;
   3086 	case WM_T_82571:
   3087 	case WM_T_82575:
   3088 	case WM_T_82576:
   3089 	case WM_T_80003:
   3090 	case WM_T_I210:
   3091 	case WM_T_I211:
   3092 		offset = wm_check_alt_mac_addr(sc);
   3093 		if (offset == 0)
   3094 			if ((sc->sc_funcid & 0x01) == 1)
   3095 				do_invert = 1;
   3096 		break;
   3097 	default:
   3098 		if ((sc->sc_funcid & 0x01) == 1)
   3099 			do_invert = 1;
   3100 		break;
   3101 	}
   3102 
   3103 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3104 		goto bad;
   3105 
   3106 	enaddr[0] = myea[0] & 0xff;
   3107 	enaddr[1] = myea[0] >> 8;
   3108 	enaddr[2] = myea[1] & 0xff;
   3109 	enaddr[3] = myea[1] >> 8;
   3110 	enaddr[4] = myea[2] & 0xff;
   3111 	enaddr[5] = myea[2] >> 8;
   3112 
   3113 	/*
   3114 	 * Toggle the LSB of the MAC address on the second port
   3115 	 * of some dual port cards.
   3116 	 */
   3117 	if (do_invert != 0)
   3118 		enaddr[5] ^= 1;
   3119 
   3120 	return 0;
   3121 
   3122  bad:
   3123 	return -1;
   3124 }
   3125 
   3126 /*
   3127  * wm_set_ral:
   3128  *
   3129  *	Set an entery in the receive address list.
   3130  */
   3131 static void
   3132 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3133 {
   3134 	uint32_t ral_lo, ral_hi;
   3135 
   3136 	if (enaddr != NULL) {
   3137 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3138 		    (enaddr[3] << 24);
   3139 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3140 		ral_hi |= RAL_AV;
   3141 	} else {
   3142 		ral_lo = 0;
   3143 		ral_hi = 0;
   3144 	}
   3145 
   3146 	if (sc->sc_type >= WM_T_82544) {
   3147 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3148 		    ral_lo);
   3149 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3150 		    ral_hi);
   3151 	} else {
   3152 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3153 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3154 	}
   3155 }
   3156 
   3157 /*
   3158  * wm_mchash:
   3159  *
   3160  *	Compute the hash of the multicast address for the 4096-bit
   3161  *	multicast filter.
   3162  */
   3163 static uint32_t
   3164 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3165 {
   3166 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3167 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3168 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3169 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3170 	uint32_t hash;
   3171 
   3172 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3173 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3174 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3175 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3176 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3177 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3178 		return (hash & 0x3ff);
   3179 	}
   3180 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3181 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3182 
   3183 	return (hash & 0xfff);
   3184 }
   3185 
   3186 /*
   3187  * wm_set_filter:
   3188  *
   3189  *	Set up the receive filter.
   3190  */
   3191 static void
   3192 wm_set_filter(struct wm_softc *sc)
   3193 {
   3194 	struct ethercom *ec = &sc->sc_ethercom;
   3195 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3196 	struct ether_multi *enm;
   3197 	struct ether_multistep step;
   3198 	bus_addr_t mta_reg;
   3199 	uint32_t hash, reg, bit;
   3200 	int i, size, ralmax;
   3201 
   3202 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3203 		device_xname(sc->sc_dev), __func__));
   3204 
   3205 	if (sc->sc_type >= WM_T_82544)
   3206 		mta_reg = WMREG_CORDOVA_MTA;
   3207 	else
   3208 		mta_reg = WMREG_MTA;
   3209 
   3210 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3211 
   3212 	if (ifp->if_flags & IFF_BROADCAST)
   3213 		sc->sc_rctl |= RCTL_BAM;
   3214 	if (ifp->if_flags & IFF_PROMISC) {
   3215 		sc->sc_rctl |= RCTL_UPE;
   3216 		goto allmulti;
   3217 	}
   3218 
   3219 	/*
   3220 	 * Set the station address in the first RAL slot, and
   3221 	 * clear the remaining slots.
   3222 	 */
   3223 	if (sc->sc_type == WM_T_ICH8)
   3224 		size = WM_RAL_TABSIZE_ICH8 -1;
   3225 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3226 	    || (sc->sc_type == WM_T_PCH))
   3227 		size = WM_RAL_TABSIZE_ICH8;
   3228 	else if (sc->sc_type == WM_T_PCH2)
   3229 		size = WM_RAL_TABSIZE_PCH2;
   3230 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3231 		size = WM_RAL_TABSIZE_PCH_LPT;
   3232 	else if (sc->sc_type == WM_T_82575)
   3233 		size = WM_RAL_TABSIZE_82575;
   3234 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3235 		size = WM_RAL_TABSIZE_82576;
   3236 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3237 		size = WM_RAL_TABSIZE_I350;
   3238 	else
   3239 		size = WM_RAL_TABSIZE;
   3240 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3241 
   3242 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3243 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3244 		switch (i) {
   3245 		case 0:
   3246 			/* We can use all entries */
   3247 			ralmax = size;
   3248 			break;
   3249 		case 1:
   3250 			/* Only RAR[0] */
   3251 			ralmax = 1;
   3252 			break;
   3253 		default:
   3254 			/* available SHRA + RAR[0] */
   3255 			ralmax = i + 1;
   3256 		}
   3257 	} else
   3258 		ralmax = size;
   3259 	for (i = 1; i < size; i++) {
   3260 		if (i < ralmax)
   3261 			wm_set_ral(sc, NULL, i);
   3262 	}
   3263 
   3264 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3265 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3266 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3267 	    || (sc->sc_type == WM_T_PCH_SPT))
   3268 		size = WM_ICH8_MC_TABSIZE;
   3269 	else
   3270 		size = WM_MC_TABSIZE;
   3271 	/* Clear out the multicast table. */
   3272 	for (i = 0; i < size; i++)
   3273 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3274 
   3275 	ETHER_LOCK(ec);
   3276 	ETHER_FIRST_MULTI(step, ec, enm);
   3277 	while (enm != NULL) {
   3278 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3279 			ETHER_UNLOCK(ec);
   3280 			/*
   3281 			 * We must listen to a range of multicast addresses.
   3282 			 * For now, just accept all multicasts, rather than
   3283 			 * trying to set only those filter bits needed to match
   3284 			 * the range.  (At this time, the only use of address
   3285 			 * ranges is for IP multicast routing, for which the
   3286 			 * range is big enough to require all bits set.)
   3287 			 */
   3288 			goto allmulti;
   3289 		}
   3290 
   3291 		hash = wm_mchash(sc, enm->enm_addrlo);
   3292 
   3293 		reg = (hash >> 5);
   3294 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3295 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3296 		    || (sc->sc_type == WM_T_PCH2)
   3297 		    || (sc->sc_type == WM_T_PCH_LPT)
   3298 		    || (sc->sc_type == WM_T_PCH_SPT))
   3299 			reg &= 0x1f;
   3300 		else
   3301 			reg &= 0x7f;
   3302 		bit = hash & 0x1f;
   3303 
   3304 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3305 		hash |= 1U << bit;
   3306 
   3307 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3308 			/*
   3309 			 * 82544 Errata 9: Certain register cannot be written
   3310 			 * with particular alignments in PCI-X bus operation
   3311 			 * (FCAH, MTA and VFTA).
   3312 			 */
   3313 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3314 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3315 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3316 		} else
   3317 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3318 
   3319 		ETHER_NEXT_MULTI(step, enm);
   3320 	}
   3321 	ETHER_UNLOCK(ec);
   3322 
   3323 	ifp->if_flags &= ~IFF_ALLMULTI;
   3324 	goto setit;
   3325 
   3326  allmulti:
   3327 	ifp->if_flags |= IFF_ALLMULTI;
   3328 	sc->sc_rctl |= RCTL_MPE;
   3329 
   3330  setit:
   3331 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3332 }
   3333 
   3334 /* Reset and init related */
   3335 
   3336 static void
   3337 wm_set_vlan(struct wm_softc *sc)
   3338 {
   3339 
   3340 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3341 		device_xname(sc->sc_dev), __func__));
   3342 
   3343 	/* Deal with VLAN enables. */
   3344 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3345 		sc->sc_ctrl |= CTRL_VME;
   3346 	else
   3347 		sc->sc_ctrl &= ~CTRL_VME;
   3348 
   3349 	/* Write the control registers. */
   3350 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3351 }
   3352 
   3353 static void
   3354 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3355 {
   3356 	uint32_t gcr;
   3357 	pcireg_t ctrl2;
   3358 
   3359 	gcr = CSR_READ(sc, WMREG_GCR);
   3360 
   3361 	/* Only take action if timeout value is defaulted to 0 */
   3362 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3363 		goto out;
   3364 
   3365 	if ((gcr & GCR_CAP_VER2) == 0) {
   3366 		gcr |= GCR_CMPL_TMOUT_10MS;
   3367 		goto out;
   3368 	}
   3369 
   3370 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3371 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3372 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3373 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3374 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3375 
   3376 out:
   3377 	/* Disable completion timeout resend */
   3378 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3379 
   3380 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3381 }
   3382 
   3383 void
   3384 wm_get_auto_rd_done(struct wm_softc *sc)
   3385 {
   3386 	int i;
   3387 
   3388 	/* wait for eeprom to reload */
   3389 	switch (sc->sc_type) {
   3390 	case WM_T_82571:
   3391 	case WM_T_82572:
   3392 	case WM_T_82573:
   3393 	case WM_T_82574:
   3394 	case WM_T_82583:
   3395 	case WM_T_82575:
   3396 	case WM_T_82576:
   3397 	case WM_T_82580:
   3398 	case WM_T_I350:
   3399 	case WM_T_I354:
   3400 	case WM_T_I210:
   3401 	case WM_T_I211:
   3402 	case WM_T_80003:
   3403 	case WM_T_ICH8:
   3404 	case WM_T_ICH9:
   3405 		for (i = 0; i < 10; i++) {
   3406 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3407 				break;
   3408 			delay(1000);
   3409 		}
   3410 		if (i == 10) {
   3411 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3412 			    "complete\n", device_xname(sc->sc_dev));
   3413 		}
   3414 		break;
   3415 	default:
   3416 		break;
   3417 	}
   3418 }
   3419 
   3420 void
   3421 wm_lan_init_done(struct wm_softc *sc)
   3422 {
   3423 	uint32_t reg = 0;
   3424 	int i;
   3425 
   3426 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3427 		device_xname(sc->sc_dev), __func__));
   3428 
   3429 	/* Wait for eeprom to reload */
   3430 	switch (sc->sc_type) {
   3431 	case WM_T_ICH10:
   3432 	case WM_T_PCH:
   3433 	case WM_T_PCH2:
   3434 	case WM_T_PCH_LPT:
   3435 	case WM_T_PCH_SPT:
   3436 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3437 			reg = CSR_READ(sc, WMREG_STATUS);
   3438 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3439 				break;
   3440 			delay(100);
   3441 		}
   3442 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3443 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3444 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3445 		}
   3446 		break;
   3447 	default:
   3448 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3449 		    __func__);
   3450 		break;
   3451 	}
   3452 
   3453 	reg &= ~STATUS_LAN_INIT_DONE;
   3454 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3455 }
   3456 
   3457 void
   3458 wm_get_cfg_done(struct wm_softc *sc)
   3459 {
   3460 	int mask;
   3461 	uint32_t reg;
   3462 	int i;
   3463 
   3464 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3465 		device_xname(sc->sc_dev), __func__));
   3466 
   3467 	/* Wait for eeprom to reload */
   3468 	switch (sc->sc_type) {
   3469 	case WM_T_82542_2_0:
   3470 	case WM_T_82542_2_1:
   3471 		/* null */
   3472 		break;
   3473 	case WM_T_82543:
   3474 	case WM_T_82544:
   3475 	case WM_T_82540:
   3476 	case WM_T_82545:
   3477 	case WM_T_82545_3:
   3478 	case WM_T_82546:
   3479 	case WM_T_82546_3:
   3480 	case WM_T_82541:
   3481 	case WM_T_82541_2:
   3482 	case WM_T_82547:
   3483 	case WM_T_82547_2:
   3484 	case WM_T_82573:
   3485 	case WM_T_82574:
   3486 	case WM_T_82583:
   3487 		/* generic */
   3488 		delay(10*1000);
   3489 		break;
   3490 	case WM_T_80003:
   3491 	case WM_T_82571:
   3492 	case WM_T_82572:
   3493 	case WM_T_82575:
   3494 	case WM_T_82576:
   3495 	case WM_T_82580:
   3496 	case WM_T_I350:
   3497 	case WM_T_I354:
   3498 	case WM_T_I210:
   3499 	case WM_T_I211:
   3500 		if (sc->sc_type == WM_T_82571) {
   3501 			/* Only 82571 shares port 0 */
   3502 			mask = EEMNGCTL_CFGDONE_0;
   3503 		} else
   3504 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3505 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3506 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3507 				break;
   3508 			delay(1000);
   3509 		}
   3510 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3511 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3512 				device_xname(sc->sc_dev), __func__));
   3513 		}
   3514 		break;
   3515 	case WM_T_ICH8:
   3516 	case WM_T_ICH9:
   3517 	case WM_T_ICH10:
   3518 	case WM_T_PCH:
   3519 	case WM_T_PCH2:
   3520 	case WM_T_PCH_LPT:
   3521 	case WM_T_PCH_SPT:
   3522 		delay(10*1000);
   3523 		if (sc->sc_type >= WM_T_ICH10)
   3524 			wm_lan_init_done(sc);
   3525 		else
   3526 			wm_get_auto_rd_done(sc);
   3527 
   3528 		reg = CSR_READ(sc, WMREG_STATUS);
   3529 		if ((reg & STATUS_PHYRA) != 0)
   3530 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3531 		break;
   3532 	default:
   3533 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3534 		    __func__);
   3535 		break;
   3536 	}
   3537 }
   3538 
   3539 /* Init hardware bits */
   3540 void
   3541 wm_initialize_hardware_bits(struct wm_softc *sc)
   3542 {
   3543 	uint32_t tarc0, tarc1, reg;
   3544 
   3545 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3546 		device_xname(sc->sc_dev), __func__));
   3547 
   3548 	/* For 82571 variant, 80003 and ICHs */
   3549 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3550 	    || (sc->sc_type >= WM_T_80003)) {
   3551 
   3552 		/* Transmit Descriptor Control 0 */
   3553 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3554 		reg |= TXDCTL_COUNT_DESC;
   3555 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3556 
   3557 		/* Transmit Descriptor Control 1 */
   3558 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3559 		reg |= TXDCTL_COUNT_DESC;
   3560 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3561 
   3562 		/* TARC0 */
   3563 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3564 		switch (sc->sc_type) {
   3565 		case WM_T_82571:
   3566 		case WM_T_82572:
   3567 		case WM_T_82573:
   3568 		case WM_T_82574:
   3569 		case WM_T_82583:
   3570 		case WM_T_80003:
   3571 			/* Clear bits 30..27 */
   3572 			tarc0 &= ~__BITS(30, 27);
   3573 			break;
   3574 		default:
   3575 			break;
   3576 		}
   3577 
   3578 		switch (sc->sc_type) {
   3579 		case WM_T_82571:
   3580 		case WM_T_82572:
   3581 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3582 
   3583 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3584 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3585 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3586 			/* 8257[12] Errata No.7 */
   3587 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3588 
   3589 			/* TARC1 bit 28 */
   3590 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3591 				tarc1 &= ~__BIT(28);
   3592 			else
   3593 				tarc1 |= __BIT(28);
   3594 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3595 
   3596 			/*
   3597 			 * 8257[12] Errata No.13
   3598 			 * Disable Dyamic Clock Gating.
   3599 			 */
   3600 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3601 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3602 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3603 			break;
   3604 		case WM_T_82573:
   3605 		case WM_T_82574:
   3606 		case WM_T_82583:
   3607 			if ((sc->sc_type == WM_T_82574)
   3608 			    || (sc->sc_type == WM_T_82583))
   3609 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3610 
   3611 			/* Extended Device Control */
   3612 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3613 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3614 			reg |= __BIT(22);	/* Set bit 22 */
   3615 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3616 
   3617 			/* Device Control */
   3618 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3619 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3620 
   3621 			/* PCIe Control Register */
   3622 			/*
   3623 			 * 82573 Errata (unknown).
   3624 			 *
   3625 			 * 82574 Errata 25 and 82583 Errata 12
   3626 			 * "Dropped Rx Packets":
   3627 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3628 			 */
   3629 			reg = CSR_READ(sc, WMREG_GCR);
   3630 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3631 			CSR_WRITE(sc, WMREG_GCR, reg);
   3632 
   3633 			if ((sc->sc_type == WM_T_82574)
   3634 			    || (sc->sc_type == WM_T_82583)) {
   3635 				/*
   3636 				 * Document says this bit must be set for
   3637 				 * proper operation.
   3638 				 */
   3639 				reg = CSR_READ(sc, WMREG_GCR);
   3640 				reg |= __BIT(22);
   3641 				CSR_WRITE(sc, WMREG_GCR, reg);
   3642 
   3643 				/*
   3644 				 * Apply workaround for hardware errata
   3645 				 * documented in errata docs Fixes issue where
   3646 				 * some error prone or unreliable PCIe
   3647 				 * completions are occurring, particularly
   3648 				 * with ASPM enabled. Without fix, issue can
   3649 				 * cause Tx timeouts.
   3650 				 */
   3651 				reg = CSR_READ(sc, WMREG_GCR2);
   3652 				reg |= __BIT(0);
   3653 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3654 			}
   3655 			break;
   3656 		case WM_T_80003:
   3657 			/* TARC0 */
   3658 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3659 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3660 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3661 
   3662 			/* TARC1 bit 28 */
   3663 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3664 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3665 				tarc1 &= ~__BIT(28);
   3666 			else
   3667 				tarc1 |= __BIT(28);
   3668 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3669 			break;
   3670 		case WM_T_ICH8:
   3671 		case WM_T_ICH9:
   3672 		case WM_T_ICH10:
   3673 		case WM_T_PCH:
   3674 		case WM_T_PCH2:
   3675 		case WM_T_PCH_LPT:
   3676 		case WM_T_PCH_SPT:
   3677 			/* TARC0 */
   3678 			if ((sc->sc_type == WM_T_ICH8)
   3679 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3680 				/* Set TARC0 bits 29 and 28 */
   3681 				tarc0 |= __BITS(29, 28);
   3682 			}
   3683 			/* Set TARC0 bits 23,24,26,27 */
   3684 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3685 
   3686 			/* CTRL_EXT */
   3687 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3688 			reg |= __BIT(22);	/* Set bit 22 */
   3689 			/*
   3690 			 * Enable PHY low-power state when MAC is at D3
   3691 			 * w/o WoL
   3692 			 */
   3693 			if (sc->sc_type >= WM_T_PCH)
   3694 				reg |= CTRL_EXT_PHYPDEN;
   3695 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3696 
   3697 			/* TARC1 */
   3698 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3699 			/* bit 28 */
   3700 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3701 				tarc1 &= ~__BIT(28);
   3702 			else
   3703 				tarc1 |= __BIT(28);
   3704 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3705 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3706 
   3707 			/* Device Status */
   3708 			if (sc->sc_type == WM_T_ICH8) {
   3709 				reg = CSR_READ(sc, WMREG_STATUS);
   3710 				reg &= ~__BIT(31);
   3711 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3712 
   3713 			}
   3714 
   3715 			/* IOSFPC */
   3716 			if (sc->sc_type == WM_T_PCH_SPT) {
   3717 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3718 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3719 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3720 			}
   3721 			/*
   3722 			 * Work-around descriptor data corruption issue during
   3723 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3724 			 * capability.
   3725 			 */
   3726 			reg = CSR_READ(sc, WMREG_RFCTL);
   3727 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3728 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3729 			break;
   3730 		default:
   3731 			break;
   3732 		}
   3733 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3734 
   3735 		switch (sc->sc_type) {
   3736 		/*
   3737 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3738 		 * Avoid RSS Hash Value bug.
   3739 		 */
   3740 		case WM_T_82571:
   3741 		case WM_T_82572:
   3742 		case WM_T_82573:
   3743 		case WM_T_80003:
   3744 		case WM_T_ICH8:
   3745 			reg = CSR_READ(sc, WMREG_RFCTL);
   3746 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3747 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3748 			break;
   3749 		case WM_T_82574:
   3750 			/* use extened Rx descriptor. */
   3751 			reg = CSR_READ(sc, WMREG_RFCTL);
   3752 			reg |= WMREG_RFCTL_EXSTEN;
   3753 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3754 			break;
   3755 		default:
   3756 			break;
   3757 		}
   3758 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3759 		/*
   3760 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3761 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3762 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3763 		 * Correctly by the Device"
   3764 		 *
   3765 		 * I354(C2000) Errata AVR53:
   3766 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3767 		 * Hang"
   3768 		 */
   3769 		reg = CSR_READ(sc, WMREG_RFCTL);
   3770 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3771 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3772 	}
   3773 }
   3774 
   3775 static uint32_t
   3776 wm_rxpbs_adjust_82580(uint32_t val)
   3777 {
   3778 	uint32_t rv = 0;
   3779 
   3780 	if (val < __arraycount(wm_82580_rxpbs_table))
   3781 		rv = wm_82580_rxpbs_table[val];
   3782 
   3783 	return rv;
   3784 }
   3785 
   3786 /*
   3787  * wm_reset_phy:
   3788  *
   3789  *	generic PHY reset function.
   3790  *	Same as e1000_phy_hw_reset_generic()
   3791  */
   3792 static void
   3793 wm_reset_phy(struct wm_softc *sc)
   3794 {
   3795 	uint32_t reg;
   3796 
   3797 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3798 		device_xname(sc->sc_dev), __func__));
   3799 	if (wm_phy_resetisblocked(sc))
   3800 		return;
   3801 
   3802 	sc->phy.acquire(sc);
   3803 
   3804 	reg = CSR_READ(sc, WMREG_CTRL);
   3805 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3806 	CSR_WRITE_FLUSH(sc);
   3807 
   3808 	delay(sc->phy.reset_delay_us);
   3809 
   3810 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3811 	CSR_WRITE_FLUSH(sc);
   3812 
   3813 	delay(150);
   3814 
   3815 	sc->phy.release(sc);
   3816 
   3817 	wm_get_cfg_done(sc);
   3818 }
   3819 
   3820 static void
   3821 wm_flush_desc_rings(struct wm_softc *sc)
   3822 {
   3823 	pcireg_t preg;
   3824 	uint32_t reg;
   3825 	int nexttx;
   3826 
   3827 	/* First, disable MULR fix in FEXTNVM11 */
   3828 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3829 	reg |= FEXTNVM11_DIS_MULRFIX;
   3830 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3831 
   3832 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3833 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3834 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3835 		struct wm_txqueue *txq;
   3836 		wiseman_txdesc_t *txd;
   3837 
   3838 		/* TX */
   3839 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3840 		    device_xname(sc->sc_dev), preg, reg);
   3841 		reg = CSR_READ(sc, WMREG_TCTL);
   3842 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3843 
   3844 		txq = &sc->sc_queue[0].wmq_txq;
   3845 		nexttx = txq->txq_next;
   3846 		txd = &txq->txq_descs[nexttx];
   3847 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3848 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3849 		txd->wtx_fields.wtxu_status = 0;
   3850 		txd->wtx_fields.wtxu_options = 0;
   3851 		txd->wtx_fields.wtxu_vlan = 0;
   3852 
   3853 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3854 			BUS_SPACE_BARRIER_WRITE);
   3855 
   3856 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3857 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3858 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3859 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3860 		delay(250);
   3861 	}
   3862 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3863 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3864 		uint32_t rctl;
   3865 
   3866 		/* RX */
   3867 		printf("%s: Need RX flush (reg = %08x)\n",
   3868 		    device_xname(sc->sc_dev), preg);
   3869 		rctl = CSR_READ(sc, WMREG_RCTL);
   3870 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3871 		CSR_WRITE_FLUSH(sc);
   3872 		delay(150);
   3873 
   3874 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3875 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3876 		reg &= 0xffffc000;
   3877 		/*
   3878 		 * update thresholds: prefetch threshold to 31, host threshold
   3879 		 * to 1 and make sure the granularity is "descriptors" and not
   3880 		 * "cache lines"
   3881 		 */
   3882 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3883 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3884 
   3885 		/*
   3886 		 * momentarily enable the RX ring for the changes to take
   3887 		 * effect
   3888 		 */
   3889 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3890 		CSR_WRITE_FLUSH(sc);
   3891 		delay(150);
   3892 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3893 	}
   3894 }
   3895 
   3896 /*
   3897  * wm_reset:
   3898  *
   3899  *	Reset the i82542 chip.
   3900  */
   3901 static void
   3902 wm_reset(struct wm_softc *sc)
   3903 {
   3904 	int phy_reset = 0;
   3905 	int i, error = 0;
   3906 	uint32_t reg;
   3907 
   3908 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3909 		device_xname(sc->sc_dev), __func__));
   3910 	KASSERT(sc->sc_type != 0);
   3911 
   3912 	/*
   3913 	 * Allocate on-chip memory according to the MTU size.
   3914 	 * The Packet Buffer Allocation register must be written
   3915 	 * before the chip is reset.
   3916 	 */
   3917 	switch (sc->sc_type) {
   3918 	case WM_T_82547:
   3919 	case WM_T_82547_2:
   3920 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3921 		    PBA_22K : PBA_30K;
   3922 		for (i = 0; i < sc->sc_nqueues; i++) {
   3923 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3924 			txq->txq_fifo_head = 0;
   3925 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3926 			txq->txq_fifo_size =
   3927 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3928 			txq->txq_fifo_stall = 0;
   3929 		}
   3930 		break;
   3931 	case WM_T_82571:
   3932 	case WM_T_82572:
   3933 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3934 	case WM_T_80003:
   3935 		sc->sc_pba = PBA_32K;
   3936 		break;
   3937 	case WM_T_82573:
   3938 		sc->sc_pba = PBA_12K;
   3939 		break;
   3940 	case WM_T_82574:
   3941 	case WM_T_82583:
   3942 		sc->sc_pba = PBA_20K;
   3943 		break;
   3944 	case WM_T_82576:
   3945 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3946 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3947 		break;
   3948 	case WM_T_82580:
   3949 	case WM_T_I350:
   3950 	case WM_T_I354:
   3951 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3952 		break;
   3953 	case WM_T_I210:
   3954 	case WM_T_I211:
   3955 		sc->sc_pba = PBA_34K;
   3956 		break;
   3957 	case WM_T_ICH8:
   3958 		/* Workaround for a bit corruption issue in FIFO memory */
   3959 		sc->sc_pba = PBA_8K;
   3960 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3961 		break;
   3962 	case WM_T_ICH9:
   3963 	case WM_T_ICH10:
   3964 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3965 		    PBA_14K : PBA_10K;
   3966 		break;
   3967 	case WM_T_PCH:
   3968 	case WM_T_PCH2:
   3969 	case WM_T_PCH_LPT:
   3970 	case WM_T_PCH_SPT:
   3971 		sc->sc_pba = PBA_26K;
   3972 		break;
   3973 	default:
   3974 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3975 		    PBA_40K : PBA_48K;
   3976 		break;
   3977 	}
   3978 	/*
   3979 	 * Only old or non-multiqueue devices have the PBA register
   3980 	 * XXX Need special handling for 82575.
   3981 	 */
   3982 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3983 	    || (sc->sc_type == WM_T_82575))
   3984 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3985 
   3986 	/* Prevent the PCI-E bus from sticking */
   3987 	if (sc->sc_flags & WM_F_PCIE) {
   3988 		int timeout = 800;
   3989 
   3990 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3991 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3992 
   3993 		while (timeout--) {
   3994 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3995 			    == 0)
   3996 				break;
   3997 			delay(100);
   3998 		}
   3999 	}
   4000 
   4001 	/* Set the completion timeout for interface */
   4002 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4003 	    || (sc->sc_type == WM_T_82580)
   4004 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4005 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4006 		wm_set_pcie_completion_timeout(sc);
   4007 
   4008 	/* Clear interrupt */
   4009 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4010 	if (sc->sc_nintrs > 1) {
   4011 		if (sc->sc_type != WM_T_82574) {
   4012 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4013 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4014 		} else {
   4015 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4016 		}
   4017 	}
   4018 
   4019 	/* Stop the transmit and receive processes. */
   4020 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4021 	sc->sc_rctl &= ~RCTL_EN;
   4022 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4023 	CSR_WRITE_FLUSH(sc);
   4024 
   4025 	/* XXX set_tbi_sbp_82543() */
   4026 
   4027 	delay(10*1000);
   4028 
   4029 	/* Must acquire the MDIO ownership before MAC reset */
   4030 	switch (sc->sc_type) {
   4031 	case WM_T_82573:
   4032 	case WM_T_82574:
   4033 	case WM_T_82583:
   4034 		error = wm_get_hw_semaphore_82573(sc);
   4035 		break;
   4036 	default:
   4037 		break;
   4038 	}
   4039 
   4040 	/*
   4041 	 * 82541 Errata 29? & 82547 Errata 28?
   4042 	 * See also the description about PHY_RST bit in CTRL register
   4043 	 * in 8254x_GBe_SDM.pdf.
   4044 	 */
   4045 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4046 		CSR_WRITE(sc, WMREG_CTRL,
   4047 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4048 		CSR_WRITE_FLUSH(sc);
   4049 		delay(5000);
   4050 	}
   4051 
   4052 	switch (sc->sc_type) {
   4053 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4054 	case WM_T_82541:
   4055 	case WM_T_82541_2:
   4056 	case WM_T_82547:
   4057 	case WM_T_82547_2:
   4058 		/*
   4059 		 * On some chipsets, a reset through a memory-mapped write
   4060 		 * cycle can cause the chip to reset before completing the
   4061 		 * write cycle.  This causes major headache that can be
   4062 		 * avoided by issuing the reset via indirect register writes
   4063 		 * through I/O space.
   4064 		 *
   4065 		 * So, if we successfully mapped the I/O BAR at attach time,
   4066 		 * use that.  Otherwise, try our luck with a memory-mapped
   4067 		 * reset.
   4068 		 */
   4069 		if (sc->sc_flags & WM_F_IOH_VALID)
   4070 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4071 		else
   4072 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4073 		break;
   4074 	case WM_T_82545_3:
   4075 	case WM_T_82546_3:
   4076 		/* Use the shadow control register on these chips. */
   4077 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4078 		break;
   4079 	case WM_T_80003:
   4080 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4081 		sc->phy.acquire(sc);
   4082 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4083 		sc->phy.release(sc);
   4084 		break;
   4085 	case WM_T_ICH8:
   4086 	case WM_T_ICH9:
   4087 	case WM_T_ICH10:
   4088 	case WM_T_PCH:
   4089 	case WM_T_PCH2:
   4090 	case WM_T_PCH_LPT:
   4091 	case WM_T_PCH_SPT:
   4092 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4093 		if (wm_phy_resetisblocked(sc) == false) {
   4094 			/*
   4095 			 * Gate automatic PHY configuration by hardware on
   4096 			 * non-managed 82579
   4097 			 */
   4098 			if ((sc->sc_type == WM_T_PCH2)
   4099 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4100 				== 0))
   4101 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4102 
   4103 			reg |= CTRL_PHY_RESET;
   4104 			phy_reset = 1;
   4105 		} else
   4106 			printf("XXX reset is blocked!!!\n");
   4107 		sc->phy.acquire(sc);
   4108 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4109 		/* Don't insert a completion barrier when reset */
   4110 		delay(20*1000);
   4111 		mutex_exit(sc->sc_ich_phymtx);
   4112 		break;
   4113 	case WM_T_82580:
   4114 	case WM_T_I350:
   4115 	case WM_T_I354:
   4116 	case WM_T_I210:
   4117 	case WM_T_I211:
   4118 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4119 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4120 			CSR_WRITE_FLUSH(sc);
   4121 		delay(5000);
   4122 		break;
   4123 	case WM_T_82542_2_0:
   4124 	case WM_T_82542_2_1:
   4125 	case WM_T_82543:
   4126 	case WM_T_82540:
   4127 	case WM_T_82545:
   4128 	case WM_T_82546:
   4129 	case WM_T_82571:
   4130 	case WM_T_82572:
   4131 	case WM_T_82573:
   4132 	case WM_T_82574:
   4133 	case WM_T_82575:
   4134 	case WM_T_82576:
   4135 	case WM_T_82583:
   4136 	default:
   4137 		/* Everything else can safely use the documented method. */
   4138 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4139 		break;
   4140 	}
   4141 
   4142 	/* Must release the MDIO ownership after MAC reset */
   4143 	switch (sc->sc_type) {
   4144 	case WM_T_82573:
   4145 	case WM_T_82574:
   4146 	case WM_T_82583:
   4147 		if (error == 0)
   4148 			wm_put_hw_semaphore_82573(sc);
   4149 		break;
   4150 	default:
   4151 		break;
   4152 	}
   4153 
   4154 	if (phy_reset != 0)
   4155 		wm_get_cfg_done(sc);
   4156 
   4157 	/* reload EEPROM */
   4158 	switch (sc->sc_type) {
   4159 	case WM_T_82542_2_0:
   4160 	case WM_T_82542_2_1:
   4161 	case WM_T_82543:
   4162 	case WM_T_82544:
   4163 		delay(10);
   4164 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4165 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4166 		CSR_WRITE_FLUSH(sc);
   4167 		delay(2000);
   4168 		break;
   4169 	case WM_T_82540:
   4170 	case WM_T_82545:
   4171 	case WM_T_82545_3:
   4172 	case WM_T_82546:
   4173 	case WM_T_82546_3:
   4174 		delay(5*1000);
   4175 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4176 		break;
   4177 	case WM_T_82541:
   4178 	case WM_T_82541_2:
   4179 	case WM_T_82547:
   4180 	case WM_T_82547_2:
   4181 		delay(20000);
   4182 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4183 		break;
   4184 	case WM_T_82571:
   4185 	case WM_T_82572:
   4186 	case WM_T_82573:
   4187 	case WM_T_82574:
   4188 	case WM_T_82583:
   4189 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4190 			delay(10);
   4191 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4192 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4193 			CSR_WRITE_FLUSH(sc);
   4194 		}
   4195 		/* check EECD_EE_AUTORD */
   4196 		wm_get_auto_rd_done(sc);
   4197 		/*
   4198 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4199 		 * is set.
   4200 		 */
   4201 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4202 		    || (sc->sc_type == WM_T_82583))
   4203 			delay(25*1000);
   4204 		break;
   4205 	case WM_T_82575:
   4206 	case WM_T_82576:
   4207 	case WM_T_82580:
   4208 	case WM_T_I350:
   4209 	case WM_T_I354:
   4210 	case WM_T_I210:
   4211 	case WM_T_I211:
   4212 	case WM_T_80003:
   4213 		/* check EECD_EE_AUTORD */
   4214 		wm_get_auto_rd_done(sc);
   4215 		break;
   4216 	case WM_T_ICH8:
   4217 	case WM_T_ICH9:
   4218 	case WM_T_ICH10:
   4219 	case WM_T_PCH:
   4220 	case WM_T_PCH2:
   4221 	case WM_T_PCH_LPT:
   4222 	case WM_T_PCH_SPT:
   4223 		break;
   4224 	default:
   4225 		panic("%s: unknown type\n", __func__);
   4226 	}
   4227 
   4228 	/* Check whether EEPROM is present or not */
   4229 	switch (sc->sc_type) {
   4230 	case WM_T_82575:
   4231 	case WM_T_82576:
   4232 	case WM_T_82580:
   4233 	case WM_T_I350:
   4234 	case WM_T_I354:
   4235 	case WM_T_ICH8:
   4236 	case WM_T_ICH9:
   4237 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4238 			/* Not found */
   4239 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4240 			if (sc->sc_type == WM_T_82575)
   4241 				wm_reset_init_script_82575(sc);
   4242 		}
   4243 		break;
   4244 	default:
   4245 		break;
   4246 	}
   4247 
   4248 	if ((sc->sc_type == WM_T_82580)
   4249 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4250 		/* clear global device reset status bit */
   4251 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4252 	}
   4253 
   4254 	/* Clear any pending interrupt events. */
   4255 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4256 	reg = CSR_READ(sc, WMREG_ICR);
   4257 	if (sc->sc_nintrs > 1) {
   4258 		if (sc->sc_type != WM_T_82574) {
   4259 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4260 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4261 		} else
   4262 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4263 	}
   4264 
   4265 	/* reload sc_ctrl */
   4266 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4267 
   4268 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4269 		wm_set_eee_i350(sc);
   4270 
   4271 	/* Clear the host wakeup bit after lcd reset */
   4272 	if (sc->sc_type >= WM_T_PCH) {
   4273 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4274 		    BM_PORT_GEN_CFG);
   4275 		reg &= ~BM_WUC_HOST_WU_BIT;
   4276 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4277 		    BM_PORT_GEN_CFG, reg);
   4278 	}
   4279 
   4280 	/*
   4281 	 * For PCH, this write will make sure that any noise will be detected
   4282 	 * as a CRC error and be dropped rather than show up as a bad packet
   4283 	 * to the DMA engine
   4284 	 */
   4285 	if (sc->sc_type == WM_T_PCH)
   4286 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4287 
   4288 	if (sc->sc_type >= WM_T_82544)
   4289 		CSR_WRITE(sc, WMREG_WUC, 0);
   4290 
   4291 	wm_reset_mdicnfg_82580(sc);
   4292 
   4293 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4294 		wm_pll_workaround_i210(sc);
   4295 }
   4296 
   4297 /*
   4298  * wm_add_rxbuf:
   4299  *
   4300  *	Add a receive buffer to the indiciated descriptor.
   4301  */
   4302 static int
   4303 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4304 {
   4305 	struct wm_softc *sc = rxq->rxq_sc;
   4306 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4307 	struct mbuf *m;
   4308 	int error;
   4309 
   4310 	KASSERT(mutex_owned(rxq->rxq_lock));
   4311 
   4312 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4313 	if (m == NULL)
   4314 		return ENOBUFS;
   4315 
   4316 	MCLGET(m, M_DONTWAIT);
   4317 	if ((m->m_flags & M_EXT) == 0) {
   4318 		m_freem(m);
   4319 		return ENOBUFS;
   4320 	}
   4321 
   4322 	if (rxs->rxs_mbuf != NULL)
   4323 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4324 
   4325 	rxs->rxs_mbuf = m;
   4326 
   4327 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4328 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4329 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4330 	if (error) {
   4331 		/* XXX XXX XXX */
   4332 		aprint_error_dev(sc->sc_dev,
   4333 		    "unable to load rx DMA map %d, error = %d\n",
   4334 		    idx, error);
   4335 		panic("wm_add_rxbuf");
   4336 	}
   4337 
   4338 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4339 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4340 
   4341 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4342 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4343 			wm_init_rxdesc(rxq, idx);
   4344 	} else
   4345 		wm_init_rxdesc(rxq, idx);
   4346 
   4347 	return 0;
   4348 }
   4349 
   4350 /*
   4351  * wm_rxdrain:
   4352  *
   4353  *	Drain the receive queue.
   4354  */
   4355 static void
   4356 wm_rxdrain(struct wm_rxqueue *rxq)
   4357 {
   4358 	struct wm_softc *sc = rxq->rxq_sc;
   4359 	struct wm_rxsoft *rxs;
   4360 	int i;
   4361 
   4362 	KASSERT(mutex_owned(rxq->rxq_lock));
   4363 
   4364 	for (i = 0; i < WM_NRXDESC; i++) {
   4365 		rxs = &rxq->rxq_soft[i];
   4366 		if (rxs->rxs_mbuf != NULL) {
   4367 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4368 			m_freem(rxs->rxs_mbuf);
   4369 			rxs->rxs_mbuf = NULL;
   4370 		}
   4371 	}
   4372 }
   4373 
   4374 
   4375 /*
   4376  * XXX copy from FreeBSD's sys/net/rss_config.c
   4377  */
   4378 /*
   4379  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4380  * effectiveness may be limited by algorithm choice and available entropy
   4381  * during the boot.
   4382  *
   4383  * XXXRW: And that we don't randomize it yet!
   4384  *
   4385  * This is the default Microsoft RSS specification key which is also
   4386  * the Chelsio T5 firmware default key.
   4387  */
   4388 #define RSS_KEYSIZE 40
   4389 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4390 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4391 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4392 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4393 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4394 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4395 };
   4396 
   4397 /*
   4398  * Caller must pass an array of size sizeof(rss_key).
   4399  *
   4400  * XXX
   4401  * As if_ixgbe may use this function, this function should not be
   4402  * if_wm specific function.
   4403  */
   4404 static void
   4405 wm_rss_getkey(uint8_t *key)
   4406 {
   4407 
   4408 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4409 }
   4410 
   4411 /*
   4412  * Setup registers for RSS.
   4413  *
   4414  * XXX not yet VMDq support
   4415  */
   4416 static void
   4417 wm_init_rss(struct wm_softc *sc)
   4418 {
   4419 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4420 	int i;
   4421 
   4422 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4423 
   4424 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4425 		int qid, reta_ent;
   4426 
   4427 		qid  = i % sc->sc_nqueues;
   4428 		switch(sc->sc_type) {
   4429 		case WM_T_82574:
   4430 			reta_ent = __SHIFTIN(qid,
   4431 			    RETA_ENT_QINDEX_MASK_82574);
   4432 			break;
   4433 		case WM_T_82575:
   4434 			reta_ent = __SHIFTIN(qid,
   4435 			    RETA_ENT_QINDEX1_MASK_82575);
   4436 			break;
   4437 		default:
   4438 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4439 			break;
   4440 		}
   4441 
   4442 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4443 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4444 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4445 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4446 	}
   4447 
   4448 	wm_rss_getkey((uint8_t *)rss_key);
   4449 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4450 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4451 
   4452 	if (sc->sc_type == WM_T_82574)
   4453 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4454 	else
   4455 		mrqc = MRQC_ENABLE_RSS_MQ;
   4456 
   4457 	/*
   4458 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4459 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4460 	 */
   4461 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4462 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4463 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4464 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4465 
   4466 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4467 }
   4468 
   4469 /*
   4470  * Adjust TX and RX queue numbers which the system actulally uses.
   4471  *
   4472  * The numbers are affected by below parameters.
   4473  *     - The nubmer of hardware queues
   4474  *     - The number of MSI-X vectors (= "nvectors" argument)
   4475  *     - ncpu
   4476  */
   4477 static void
   4478 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4479 {
   4480 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4481 
   4482 	if (nvectors < 2) {
   4483 		sc->sc_nqueues = 1;
   4484 		return;
   4485 	}
   4486 
   4487 	switch(sc->sc_type) {
   4488 	case WM_T_82572:
   4489 		hw_ntxqueues = 2;
   4490 		hw_nrxqueues = 2;
   4491 		break;
   4492 	case WM_T_82574:
   4493 		hw_ntxqueues = 2;
   4494 		hw_nrxqueues = 2;
   4495 		break;
   4496 	case WM_T_82575:
   4497 		hw_ntxqueues = 4;
   4498 		hw_nrxqueues = 4;
   4499 		break;
   4500 	case WM_T_82576:
   4501 		hw_ntxqueues = 16;
   4502 		hw_nrxqueues = 16;
   4503 		break;
   4504 	case WM_T_82580:
   4505 	case WM_T_I350:
   4506 	case WM_T_I354:
   4507 		hw_ntxqueues = 8;
   4508 		hw_nrxqueues = 8;
   4509 		break;
   4510 	case WM_T_I210:
   4511 		hw_ntxqueues = 4;
   4512 		hw_nrxqueues = 4;
   4513 		break;
   4514 	case WM_T_I211:
   4515 		hw_ntxqueues = 2;
   4516 		hw_nrxqueues = 2;
   4517 		break;
   4518 		/*
   4519 		 * As below ethernet controllers does not support MSI-X,
   4520 		 * this driver let them not use multiqueue.
   4521 		 *     - WM_T_80003
   4522 		 *     - WM_T_ICH8
   4523 		 *     - WM_T_ICH9
   4524 		 *     - WM_T_ICH10
   4525 		 *     - WM_T_PCH
   4526 		 *     - WM_T_PCH2
   4527 		 *     - WM_T_PCH_LPT
   4528 		 */
   4529 	default:
   4530 		hw_ntxqueues = 1;
   4531 		hw_nrxqueues = 1;
   4532 		break;
   4533 	}
   4534 
   4535 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4536 
   4537 	/*
   4538 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4539 	 * the number of queues used actually.
   4540 	 */
   4541 	if (nvectors < hw_nqueues + 1) {
   4542 		sc->sc_nqueues = nvectors - 1;
   4543 	} else {
   4544 		sc->sc_nqueues = hw_nqueues;
   4545 	}
   4546 
   4547 	/*
   4548 	 * As queues more then cpus cannot improve scaling, we limit
   4549 	 * the number of queues used actually.
   4550 	 */
   4551 	if (ncpu < sc->sc_nqueues)
   4552 		sc->sc_nqueues = ncpu;
   4553 }
   4554 
   4555 /*
   4556  * Both single interrupt MSI and INTx can use this function.
   4557  */
   4558 static int
   4559 wm_setup_legacy(struct wm_softc *sc)
   4560 {
   4561 	pci_chipset_tag_t pc = sc->sc_pc;
   4562 	const char *intrstr = NULL;
   4563 	char intrbuf[PCI_INTRSTR_LEN];
   4564 	int error;
   4565 
   4566 	error = wm_alloc_txrx_queues(sc);
   4567 	if (error) {
   4568 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4569 		    error);
   4570 		return ENOMEM;
   4571 	}
   4572 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4573 	    sizeof(intrbuf));
   4574 #ifdef WM_MPSAFE
   4575 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4576 #endif
   4577 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4578 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4579 	if (sc->sc_ihs[0] == NULL) {
   4580 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4581 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4582 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4583 		return ENOMEM;
   4584 	}
   4585 
   4586 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4587 	sc->sc_nintrs = 1;
   4588 	return 0;
   4589 }
   4590 
   4591 static int
   4592 wm_setup_msix(struct wm_softc *sc)
   4593 {
   4594 	void *vih;
   4595 	kcpuset_t *affinity;
   4596 	int qidx, error, intr_idx, txrx_established;
   4597 	pci_chipset_tag_t pc = sc->sc_pc;
   4598 	const char *intrstr = NULL;
   4599 	char intrbuf[PCI_INTRSTR_LEN];
   4600 	char intr_xname[INTRDEVNAMEBUF];
   4601 
   4602 	if (sc->sc_nqueues < ncpu) {
   4603 		/*
   4604 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4605 		 * interrupts start from CPU#1.
   4606 		 */
   4607 		sc->sc_affinity_offset = 1;
   4608 	} else {
   4609 		/*
   4610 		 * In this case, this device use all CPUs. So, we unify
   4611 		 * affinitied cpu_index to msix vector number for readability.
   4612 		 */
   4613 		sc->sc_affinity_offset = 0;
   4614 	}
   4615 
   4616 	error = wm_alloc_txrx_queues(sc);
   4617 	if (error) {
   4618 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4619 		    error);
   4620 		return ENOMEM;
   4621 	}
   4622 
   4623 	kcpuset_create(&affinity, false);
   4624 	intr_idx = 0;
   4625 
   4626 	/*
   4627 	 * TX and RX
   4628 	 */
   4629 	txrx_established = 0;
   4630 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4631 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4632 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4633 
   4634 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4635 		    sizeof(intrbuf));
   4636 #ifdef WM_MPSAFE
   4637 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4638 		    PCI_INTR_MPSAFE, true);
   4639 #endif
   4640 		memset(intr_xname, 0, sizeof(intr_xname));
   4641 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4642 		    device_xname(sc->sc_dev), qidx);
   4643 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4644 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4645 		if (vih == NULL) {
   4646 			aprint_error_dev(sc->sc_dev,
   4647 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4648 			    intrstr ? " at " : "",
   4649 			    intrstr ? intrstr : "");
   4650 
   4651 			goto fail;
   4652 		}
   4653 		kcpuset_zero(affinity);
   4654 		/* Round-robin affinity */
   4655 		kcpuset_set(affinity, affinity_to);
   4656 		error = interrupt_distribute(vih, affinity, NULL);
   4657 		if (error == 0) {
   4658 			aprint_normal_dev(sc->sc_dev,
   4659 			    "for TX and RX interrupting at %s affinity to %u\n",
   4660 			    intrstr, affinity_to);
   4661 		} else {
   4662 			aprint_normal_dev(sc->sc_dev,
   4663 			    "for TX and RX interrupting at %s\n", intrstr);
   4664 		}
   4665 		sc->sc_ihs[intr_idx] = vih;
   4666 		wmq->wmq_id= qidx;
   4667 		wmq->wmq_intr_idx = intr_idx;
   4668 
   4669 		txrx_established++;
   4670 		intr_idx++;
   4671 	}
   4672 
   4673 	/*
   4674 	 * LINK
   4675 	 */
   4676 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4677 	    sizeof(intrbuf));
   4678 #ifdef WM_MPSAFE
   4679 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4680 #endif
   4681 	memset(intr_xname, 0, sizeof(intr_xname));
   4682 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4683 	    device_xname(sc->sc_dev));
   4684 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4685 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4686 	if (vih == NULL) {
   4687 		aprint_error_dev(sc->sc_dev,
   4688 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4689 		    intrstr ? " at " : "",
   4690 		    intrstr ? intrstr : "");
   4691 
   4692 		goto fail;
   4693 	}
   4694 	/* keep default affinity to LINK interrupt */
   4695 	aprint_normal_dev(sc->sc_dev,
   4696 	    "for LINK interrupting at %s\n", intrstr);
   4697 	sc->sc_ihs[intr_idx] = vih;
   4698 	sc->sc_link_intr_idx = intr_idx;
   4699 
   4700 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4701 	kcpuset_destroy(affinity);
   4702 	return 0;
   4703 
   4704  fail:
   4705 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4706 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4707 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4708 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4709 	}
   4710 
   4711 	kcpuset_destroy(affinity);
   4712 	return ENOMEM;
   4713 }
   4714 
   4715 static void
   4716 wm_turnon(struct wm_softc *sc)
   4717 {
   4718 	int i;
   4719 
   4720 	KASSERT(WM_CORE_LOCKED(sc));
   4721 
   4722 	for(i = 0; i < sc->sc_nqueues; i++) {
   4723 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4724 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4725 
   4726 		mutex_enter(txq->txq_lock);
   4727 		txq->txq_stopping = false;
   4728 		mutex_exit(txq->txq_lock);
   4729 
   4730 		mutex_enter(rxq->rxq_lock);
   4731 		rxq->rxq_stopping = false;
   4732 		mutex_exit(rxq->rxq_lock);
   4733 	}
   4734 
   4735 	sc->sc_core_stopping = false;
   4736 }
   4737 
   4738 static void
   4739 wm_turnoff(struct wm_softc *sc)
   4740 {
   4741 	int i;
   4742 
   4743 	KASSERT(WM_CORE_LOCKED(sc));
   4744 
   4745 	sc->sc_core_stopping = true;
   4746 
   4747 	for(i = 0; i < sc->sc_nqueues; i++) {
   4748 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4749 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4750 
   4751 		mutex_enter(rxq->rxq_lock);
   4752 		rxq->rxq_stopping = true;
   4753 		mutex_exit(rxq->rxq_lock);
   4754 
   4755 		mutex_enter(txq->txq_lock);
   4756 		txq->txq_stopping = true;
   4757 		mutex_exit(txq->txq_lock);
   4758 	}
   4759 }
   4760 
   4761 /*
   4762  * wm_init:		[ifnet interface function]
   4763  *
   4764  *	Initialize the interface.
   4765  */
   4766 static int
   4767 wm_init(struct ifnet *ifp)
   4768 {
   4769 	struct wm_softc *sc = ifp->if_softc;
   4770 	int ret;
   4771 
   4772 	WM_CORE_LOCK(sc);
   4773 	ret = wm_init_locked(ifp);
   4774 	WM_CORE_UNLOCK(sc);
   4775 
   4776 	return ret;
   4777 }
   4778 
   4779 static int
   4780 wm_init_locked(struct ifnet *ifp)
   4781 {
   4782 	struct wm_softc *sc = ifp->if_softc;
   4783 	int i, j, trynum, error = 0;
   4784 	uint32_t reg;
   4785 
   4786 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4787 		device_xname(sc->sc_dev), __func__));
   4788 	KASSERT(WM_CORE_LOCKED(sc));
   4789 
   4790 	/*
   4791 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4792 	 * There is a small but measurable benefit to avoiding the adjusment
   4793 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4794 	 * on such platforms.  One possibility is that the DMA itself is
   4795 	 * slightly more efficient if the front of the entire packet (instead
   4796 	 * of the front of the headers) is aligned.
   4797 	 *
   4798 	 * Note we must always set align_tweak to 0 if we are using
   4799 	 * jumbo frames.
   4800 	 */
   4801 #ifdef __NO_STRICT_ALIGNMENT
   4802 	sc->sc_align_tweak = 0;
   4803 #else
   4804 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4805 		sc->sc_align_tweak = 0;
   4806 	else
   4807 		sc->sc_align_tweak = 2;
   4808 #endif /* __NO_STRICT_ALIGNMENT */
   4809 
   4810 	/* Cancel any pending I/O. */
   4811 	wm_stop_locked(ifp, 0);
   4812 
   4813 	/* update statistics before reset */
   4814 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4815 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4816 
   4817 	/* PCH_SPT hardware workaround */
   4818 	if (sc->sc_type == WM_T_PCH_SPT)
   4819 		wm_flush_desc_rings(sc);
   4820 
   4821 	/* Reset the chip to a known state. */
   4822 	wm_reset(sc);
   4823 
   4824 	/* AMT based hardware can now take control from firmware */
   4825 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4826 		wm_get_hw_control(sc);
   4827 
   4828 	/* Init hardware bits */
   4829 	wm_initialize_hardware_bits(sc);
   4830 
   4831 	/* Reset the PHY. */
   4832 	if (sc->sc_flags & WM_F_HAS_MII)
   4833 		wm_gmii_reset(sc);
   4834 
   4835 	/* Calculate (E)ITR value */
   4836 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4837 		sc->sc_itr = 450;	/* For EITR */
   4838 	} else if (sc->sc_type >= WM_T_82543) {
   4839 		/*
   4840 		 * Set up the interrupt throttling register (units of 256ns)
   4841 		 * Note that a footnote in Intel's documentation says this
   4842 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4843 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4844 		 * that that is also true for the 1024ns units of the other
   4845 		 * interrupt-related timer registers -- so, really, we ought
   4846 		 * to divide this value by 4 when the link speed is low.
   4847 		 *
   4848 		 * XXX implement this division at link speed change!
   4849 		 */
   4850 
   4851 		/*
   4852 		 * For N interrupts/sec, set this value to:
   4853 		 * 1000000000 / (N * 256).  Note that we set the
   4854 		 * absolute and packet timer values to this value
   4855 		 * divided by 4 to get "simple timer" behavior.
   4856 		 */
   4857 
   4858 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4859 	}
   4860 
   4861 	error = wm_init_txrx_queues(sc);
   4862 	if (error)
   4863 		goto out;
   4864 
   4865 	/*
   4866 	 * Clear out the VLAN table -- we don't use it (yet).
   4867 	 */
   4868 	CSR_WRITE(sc, WMREG_VET, 0);
   4869 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4870 		trynum = 10; /* Due to hw errata */
   4871 	else
   4872 		trynum = 1;
   4873 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4874 		for (j = 0; j < trynum; j++)
   4875 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4876 
   4877 	/*
   4878 	 * Set up flow-control parameters.
   4879 	 *
   4880 	 * XXX Values could probably stand some tuning.
   4881 	 */
   4882 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4883 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4884 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4885 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4886 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4887 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4888 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4889 	}
   4890 
   4891 	sc->sc_fcrtl = FCRTL_DFLT;
   4892 	if (sc->sc_type < WM_T_82543) {
   4893 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4894 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4895 	} else {
   4896 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4897 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4898 	}
   4899 
   4900 	if (sc->sc_type == WM_T_80003)
   4901 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4902 	else
   4903 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4904 
   4905 	/* Writes the control register. */
   4906 	wm_set_vlan(sc);
   4907 
   4908 	if (sc->sc_flags & WM_F_HAS_MII) {
   4909 		int val;
   4910 
   4911 		switch (sc->sc_type) {
   4912 		case WM_T_80003:
   4913 		case WM_T_ICH8:
   4914 		case WM_T_ICH9:
   4915 		case WM_T_ICH10:
   4916 		case WM_T_PCH:
   4917 		case WM_T_PCH2:
   4918 		case WM_T_PCH_LPT:
   4919 		case WM_T_PCH_SPT:
   4920 			/*
   4921 			 * Set the mac to wait the maximum time between each
   4922 			 * iteration and increase the max iterations when
   4923 			 * polling the phy; this fixes erroneous timeouts at
   4924 			 * 10Mbps.
   4925 			 */
   4926 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4927 			    0xFFFF);
   4928 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4929 			val |= 0x3F;
   4930 			wm_kmrn_writereg(sc,
   4931 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4932 			break;
   4933 		default:
   4934 			break;
   4935 		}
   4936 
   4937 		if (sc->sc_type == WM_T_80003) {
   4938 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4939 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4940 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4941 
   4942 			/* Bypass RX and TX FIFO's */
   4943 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4944 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4945 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4946 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4947 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4948 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4949 		}
   4950 	}
   4951 #if 0
   4952 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4953 #endif
   4954 
   4955 	/* Set up checksum offload parameters. */
   4956 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4957 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4958 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4959 		reg |= RXCSUM_IPOFL;
   4960 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4961 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4962 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4963 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4964 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4965 
   4966 	/* Set up MSI-X */
   4967 	if (sc->sc_nintrs > 1) {
   4968 		uint32_t ivar;
   4969 		struct wm_queue *wmq;
   4970 		int qid, qintr_idx;
   4971 
   4972 		if (sc->sc_type == WM_T_82575) {
   4973 			/* Interrupt control */
   4974 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4975 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4976 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4977 
   4978 			/* TX and RX */
   4979 			for (i = 0; i < sc->sc_nqueues; i++) {
   4980 				wmq = &sc->sc_queue[i];
   4981 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4982 				    EITR_TX_QUEUE(wmq->wmq_id)
   4983 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4984 			}
   4985 			/* Link status */
   4986 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4987 			    EITR_OTHER);
   4988 		} else if (sc->sc_type == WM_T_82574) {
   4989 			/* Interrupt control */
   4990 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4991 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4992 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4993 
   4994 			ivar = 0;
   4995 			/* TX and RX */
   4996 			for (i = 0; i < sc->sc_nqueues; i++) {
   4997 				wmq = &sc->sc_queue[i];
   4998 				qid = wmq->wmq_id;
   4999 				qintr_idx = wmq->wmq_intr_idx;
   5000 
   5001 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5002 				    IVAR_TX_MASK_Q_82574(qid));
   5003 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5004 				    IVAR_RX_MASK_Q_82574(qid));
   5005 			}
   5006 			/* Link status */
   5007 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5008 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5009 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5010 		} else {
   5011 			/* Interrupt control */
   5012 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5013 			    | GPIE_EIAME | GPIE_PBA);
   5014 
   5015 			switch (sc->sc_type) {
   5016 			case WM_T_82580:
   5017 			case WM_T_I350:
   5018 			case WM_T_I354:
   5019 			case WM_T_I210:
   5020 			case WM_T_I211:
   5021 				/* TX and RX */
   5022 				for (i = 0; i < sc->sc_nqueues; i++) {
   5023 					wmq = &sc->sc_queue[i];
   5024 					qid = wmq->wmq_id;
   5025 					qintr_idx = wmq->wmq_intr_idx;
   5026 
   5027 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5028 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5029 					ivar |= __SHIFTIN((qintr_idx
   5030 						| IVAR_VALID),
   5031 					    IVAR_TX_MASK_Q(qid));
   5032 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5033 					ivar |= __SHIFTIN((qintr_idx
   5034 						| IVAR_VALID),
   5035 					    IVAR_RX_MASK_Q(qid));
   5036 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5037 				}
   5038 				break;
   5039 			case WM_T_82576:
   5040 				/* TX and RX */
   5041 				for (i = 0; i < sc->sc_nqueues; i++) {
   5042 					wmq = &sc->sc_queue[i];
   5043 					qid = wmq->wmq_id;
   5044 					qintr_idx = wmq->wmq_intr_idx;
   5045 
   5046 					ivar = CSR_READ(sc,
   5047 					    WMREG_IVAR_Q_82576(qid));
   5048 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5049 					ivar |= __SHIFTIN((qintr_idx
   5050 						| IVAR_VALID),
   5051 					    IVAR_TX_MASK_Q_82576(qid));
   5052 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5053 					ivar |= __SHIFTIN((qintr_idx
   5054 						| IVAR_VALID),
   5055 					    IVAR_RX_MASK_Q_82576(qid));
   5056 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5057 					    ivar);
   5058 				}
   5059 				break;
   5060 			default:
   5061 				break;
   5062 			}
   5063 
   5064 			/* Link status */
   5065 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5066 			    IVAR_MISC_OTHER);
   5067 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5068 		}
   5069 
   5070 		if (sc->sc_nqueues > 1) {
   5071 			wm_init_rss(sc);
   5072 
   5073 			/*
   5074 			** NOTE: Receive Full-Packet Checksum Offload
   5075 			** is mutually exclusive with Multiqueue. However
   5076 			** this is not the same as TCP/IP checksums which
   5077 			** still work.
   5078 			*/
   5079 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5080 			reg |= RXCSUM_PCSD;
   5081 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5082 		}
   5083 	}
   5084 
   5085 	/* Set up the interrupt registers. */
   5086 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5087 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5088 	    ICR_RXO | ICR_RXT0;
   5089 	if (sc->sc_nintrs > 1) {
   5090 		uint32_t mask;
   5091 		struct wm_queue *wmq;
   5092 
   5093 		switch (sc->sc_type) {
   5094 		case WM_T_82574:
   5095 			CSR_WRITE(sc, WMREG_EIAC_82574,
   5096 			    WMREG_EIAC_82574_MSIX_MASK);
   5097 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   5098 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5099 			break;
   5100 		default:
   5101 			if (sc->sc_type == WM_T_82575) {
   5102 				mask = 0;
   5103 				for (i = 0; i < sc->sc_nqueues; i++) {
   5104 					wmq = &sc->sc_queue[i];
   5105 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5106 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5107 				}
   5108 				mask |= EITR_OTHER;
   5109 			} else {
   5110 				mask = 0;
   5111 				for (i = 0; i < sc->sc_nqueues; i++) {
   5112 					wmq = &sc->sc_queue[i];
   5113 					mask |= 1 << wmq->wmq_intr_idx;
   5114 				}
   5115 				mask |= 1 << sc->sc_link_intr_idx;
   5116 			}
   5117 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5118 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5119 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5120 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5121 			break;
   5122 		}
   5123 	} else
   5124 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5125 
   5126 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5127 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5128 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5129 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5130 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5131 		reg |= KABGTXD_BGSQLBIAS;
   5132 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5133 	}
   5134 
   5135 	/* Set up the inter-packet gap. */
   5136 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5137 
   5138 	if (sc->sc_type >= WM_T_82543) {
   5139 		/*
   5140 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5141 		 * the multi queue function with MSI-X.
   5142 		 */
   5143 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5144 			int qidx;
   5145 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5146 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5147 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5148 				    sc->sc_itr);
   5149 			}
   5150 			/*
   5151 			 * Link interrupts occur much less than TX
   5152 			 * interrupts and RX interrupts. So, we don't
   5153 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5154 			 * FreeBSD's if_igb.
   5155 			 */
   5156 		} else
   5157 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5158 	}
   5159 
   5160 	/* Set the VLAN ethernetype. */
   5161 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5162 
   5163 	/*
   5164 	 * Set up the transmit control register; we start out with
   5165 	 * a collision distance suitable for FDX, but update it whe
   5166 	 * we resolve the media type.
   5167 	 */
   5168 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5169 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5170 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5171 	if (sc->sc_type >= WM_T_82571)
   5172 		sc->sc_tctl |= TCTL_MULR;
   5173 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5174 
   5175 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5176 		/* Write TDT after TCTL.EN is set. See the document. */
   5177 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5178 	}
   5179 
   5180 	if (sc->sc_type == WM_T_80003) {
   5181 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5182 		reg &= ~TCTL_EXT_GCEX_MASK;
   5183 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5184 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5185 	}
   5186 
   5187 	/* Set the media. */
   5188 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5189 		goto out;
   5190 
   5191 	/* Configure for OS presence */
   5192 	wm_init_manageability(sc);
   5193 
   5194 	/*
   5195 	 * Set up the receive control register; we actually program
   5196 	 * the register when we set the receive filter.  Use multicast
   5197 	 * address offset type 0.
   5198 	 *
   5199 	 * Only the i82544 has the ability to strip the incoming
   5200 	 * CRC, so we don't enable that feature.
   5201 	 */
   5202 	sc->sc_mchash_type = 0;
   5203 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5204 	    | RCTL_MO(sc->sc_mchash_type);
   5205 
   5206 	/*
   5207 	 * 82574 use one buffer extended Rx descriptor.
   5208 	 */
   5209 	if (sc->sc_type == WM_T_82574)
   5210 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5211 
   5212 	/*
   5213 	 * The I350 has a bug where it always strips the CRC whether
   5214 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5215 	 */
   5216 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5217 	    || (sc->sc_type == WM_T_I210))
   5218 		sc->sc_rctl |= RCTL_SECRC;
   5219 
   5220 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5221 	    && (ifp->if_mtu > ETHERMTU)) {
   5222 		sc->sc_rctl |= RCTL_LPE;
   5223 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5224 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5225 	}
   5226 
   5227 	if (MCLBYTES == 2048) {
   5228 		sc->sc_rctl |= RCTL_2k;
   5229 	} else {
   5230 		if (sc->sc_type >= WM_T_82543) {
   5231 			switch (MCLBYTES) {
   5232 			case 4096:
   5233 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5234 				break;
   5235 			case 8192:
   5236 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5237 				break;
   5238 			case 16384:
   5239 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5240 				break;
   5241 			default:
   5242 				panic("wm_init: MCLBYTES %d unsupported",
   5243 				    MCLBYTES);
   5244 				break;
   5245 			}
   5246 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5247 	}
   5248 
   5249 	/* Set the receive filter. */
   5250 	wm_set_filter(sc);
   5251 
   5252 	/* Enable ECC */
   5253 	switch (sc->sc_type) {
   5254 	case WM_T_82571:
   5255 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5256 		reg |= PBA_ECC_CORR_EN;
   5257 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5258 		break;
   5259 	case WM_T_PCH_LPT:
   5260 	case WM_T_PCH_SPT:
   5261 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5262 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5263 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5264 
   5265 		sc->sc_ctrl |= CTRL_MEHE;
   5266 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5267 		break;
   5268 	default:
   5269 		break;
   5270 	}
   5271 
   5272 	/* On 575 and later set RDT only if RX enabled */
   5273 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5274 		int qidx;
   5275 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5276 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5277 			for (i = 0; i < WM_NRXDESC; i++) {
   5278 				mutex_enter(rxq->rxq_lock);
   5279 				wm_init_rxdesc(rxq, i);
   5280 				mutex_exit(rxq->rxq_lock);
   5281 
   5282 			}
   5283 		}
   5284 	}
   5285 
   5286 	wm_turnon(sc);
   5287 
   5288 	/* Start the one second link check clock. */
   5289 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5290 
   5291 	/* ...all done! */
   5292 	ifp->if_flags |= IFF_RUNNING;
   5293 	ifp->if_flags &= ~IFF_OACTIVE;
   5294 
   5295  out:
   5296 	sc->sc_if_flags = ifp->if_flags;
   5297 	if (error)
   5298 		log(LOG_ERR, "%s: interface not running\n",
   5299 		    device_xname(sc->sc_dev));
   5300 	return error;
   5301 }
   5302 
   5303 /*
   5304  * wm_stop:		[ifnet interface function]
   5305  *
   5306  *	Stop transmission on the interface.
   5307  */
   5308 static void
   5309 wm_stop(struct ifnet *ifp, int disable)
   5310 {
   5311 	struct wm_softc *sc = ifp->if_softc;
   5312 
   5313 	WM_CORE_LOCK(sc);
   5314 	wm_stop_locked(ifp, disable);
   5315 	WM_CORE_UNLOCK(sc);
   5316 }
   5317 
   5318 static void
   5319 wm_stop_locked(struct ifnet *ifp, int disable)
   5320 {
   5321 	struct wm_softc *sc = ifp->if_softc;
   5322 	struct wm_txsoft *txs;
   5323 	int i, qidx;
   5324 
   5325 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5326 		device_xname(sc->sc_dev), __func__));
   5327 	KASSERT(WM_CORE_LOCKED(sc));
   5328 
   5329 	wm_turnoff(sc);
   5330 
   5331 	/* Stop the one second clock. */
   5332 	callout_stop(&sc->sc_tick_ch);
   5333 
   5334 	/* Stop the 82547 Tx FIFO stall check timer. */
   5335 	if (sc->sc_type == WM_T_82547)
   5336 		callout_stop(&sc->sc_txfifo_ch);
   5337 
   5338 	if (sc->sc_flags & WM_F_HAS_MII) {
   5339 		/* Down the MII. */
   5340 		mii_down(&sc->sc_mii);
   5341 	} else {
   5342 #if 0
   5343 		/* Should we clear PHY's status properly? */
   5344 		wm_reset(sc);
   5345 #endif
   5346 	}
   5347 
   5348 	/* Stop the transmit and receive processes. */
   5349 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5350 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5351 	sc->sc_rctl &= ~RCTL_EN;
   5352 
   5353 	/*
   5354 	 * Clear the interrupt mask to ensure the device cannot assert its
   5355 	 * interrupt line.
   5356 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5357 	 * service any currently pending or shared interrupt.
   5358 	 */
   5359 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5360 	sc->sc_icr = 0;
   5361 	if (sc->sc_nintrs > 1) {
   5362 		if (sc->sc_type != WM_T_82574) {
   5363 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5364 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5365 		} else
   5366 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5367 	}
   5368 
   5369 	/* Release any queued transmit buffers. */
   5370 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5371 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5372 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5373 		mutex_enter(txq->txq_lock);
   5374 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5375 			txs = &txq->txq_soft[i];
   5376 			if (txs->txs_mbuf != NULL) {
   5377 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5378 				m_freem(txs->txs_mbuf);
   5379 				txs->txs_mbuf = NULL;
   5380 			}
   5381 		}
   5382 		mutex_exit(txq->txq_lock);
   5383 	}
   5384 
   5385 	/* Mark the interface as down and cancel the watchdog timer. */
   5386 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5387 	ifp->if_timer = 0;
   5388 
   5389 	if (disable) {
   5390 		for (i = 0; i < sc->sc_nqueues; i++) {
   5391 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5392 			mutex_enter(rxq->rxq_lock);
   5393 			wm_rxdrain(rxq);
   5394 			mutex_exit(rxq->rxq_lock);
   5395 		}
   5396 	}
   5397 
   5398 #if 0 /* notyet */
   5399 	if (sc->sc_type >= WM_T_82544)
   5400 		CSR_WRITE(sc, WMREG_WUC, 0);
   5401 #endif
   5402 }
   5403 
   5404 static void
   5405 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5406 {
   5407 	struct mbuf *m;
   5408 	int i;
   5409 
   5410 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5411 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5412 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5413 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5414 		    m->m_data, m->m_len, m->m_flags);
   5415 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5416 	    i, i == 1 ? "" : "s");
   5417 }
   5418 
   5419 /*
   5420  * wm_82547_txfifo_stall:
   5421  *
   5422  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5423  *	reset the FIFO pointers, and restart packet transmission.
   5424  */
   5425 static void
   5426 wm_82547_txfifo_stall(void *arg)
   5427 {
   5428 	struct wm_softc *sc = arg;
   5429 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5430 
   5431 	mutex_enter(txq->txq_lock);
   5432 
   5433 	if (txq->txq_stopping)
   5434 		goto out;
   5435 
   5436 	if (txq->txq_fifo_stall) {
   5437 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5438 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5439 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5440 			/*
   5441 			 * Packets have drained.  Stop transmitter, reset
   5442 			 * FIFO pointers, restart transmitter, and kick
   5443 			 * the packet queue.
   5444 			 */
   5445 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5446 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5447 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5448 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5449 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5450 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5451 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5452 			CSR_WRITE_FLUSH(sc);
   5453 
   5454 			txq->txq_fifo_head = 0;
   5455 			txq->txq_fifo_stall = 0;
   5456 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5457 		} else {
   5458 			/*
   5459 			 * Still waiting for packets to drain; try again in
   5460 			 * another tick.
   5461 			 */
   5462 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5463 		}
   5464 	}
   5465 
   5466 out:
   5467 	mutex_exit(txq->txq_lock);
   5468 }
   5469 
   5470 /*
   5471  * wm_82547_txfifo_bugchk:
   5472  *
   5473  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5474  *	prevent enqueueing a packet that would wrap around the end
   5475  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5476  *
   5477  *	We do this by checking the amount of space before the end
   5478  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5479  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5480  *	the internal FIFO pointers to the beginning, and restart
   5481  *	transmission on the interface.
   5482  */
   5483 #define	WM_FIFO_HDR		0x10
   5484 #define	WM_82547_PAD_LEN	0x3e0
   5485 static int
   5486 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5487 {
   5488 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5489 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5490 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5491 
   5492 	/* Just return if already stalled. */
   5493 	if (txq->txq_fifo_stall)
   5494 		return 1;
   5495 
   5496 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5497 		/* Stall only occurs in half-duplex mode. */
   5498 		goto send_packet;
   5499 	}
   5500 
   5501 	if (len >= WM_82547_PAD_LEN + space) {
   5502 		txq->txq_fifo_stall = 1;
   5503 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5504 		return 1;
   5505 	}
   5506 
   5507  send_packet:
   5508 	txq->txq_fifo_head += len;
   5509 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5510 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5511 
   5512 	return 0;
   5513 }
   5514 
   5515 static int
   5516 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5517 {
   5518 	int error;
   5519 
   5520 	/*
   5521 	 * Allocate the control data structures, and create and load the
   5522 	 * DMA map for it.
   5523 	 *
   5524 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5525 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5526 	 * both sets within the same 4G segment.
   5527 	 */
   5528 	if (sc->sc_type < WM_T_82544)
   5529 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5530 	else
   5531 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5532 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5533 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5534 	else
   5535 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5536 
   5537 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5538 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5539 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5540 		aprint_error_dev(sc->sc_dev,
   5541 		    "unable to allocate TX control data, error = %d\n",
   5542 		    error);
   5543 		goto fail_0;
   5544 	}
   5545 
   5546 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5547 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5548 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5549 		aprint_error_dev(sc->sc_dev,
   5550 		    "unable to map TX control data, error = %d\n", error);
   5551 		goto fail_1;
   5552 	}
   5553 
   5554 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5555 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5556 		aprint_error_dev(sc->sc_dev,
   5557 		    "unable to create TX control data DMA map, error = %d\n",
   5558 		    error);
   5559 		goto fail_2;
   5560 	}
   5561 
   5562 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5563 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5564 		aprint_error_dev(sc->sc_dev,
   5565 		    "unable to load TX control data DMA map, error = %d\n",
   5566 		    error);
   5567 		goto fail_3;
   5568 	}
   5569 
   5570 	return 0;
   5571 
   5572  fail_3:
   5573 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5574  fail_2:
   5575 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5576 	    WM_TXDESCS_SIZE(txq));
   5577  fail_1:
   5578 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5579  fail_0:
   5580 	return error;
   5581 }
   5582 
   5583 static void
   5584 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5585 {
   5586 
   5587 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5588 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5589 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5590 	    WM_TXDESCS_SIZE(txq));
   5591 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5592 }
   5593 
   5594 static int
   5595 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5596 {
   5597 	int error;
   5598 	size_t rxq_descs_size;
   5599 
   5600 	/*
   5601 	 * Allocate the control data structures, and create and load the
   5602 	 * DMA map for it.
   5603 	 *
   5604 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5605 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5606 	 * both sets within the same 4G segment.
   5607 	 */
   5608 	rxq->rxq_ndesc = WM_NRXDESC;
   5609 	if (sc->sc_type == WM_T_82574)
   5610 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5611 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5612 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5613 	else
   5614 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5615 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5616 
   5617 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5618 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5619 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5620 		aprint_error_dev(sc->sc_dev,
   5621 		    "unable to allocate RX control data, error = %d\n",
   5622 		    error);
   5623 		goto fail_0;
   5624 	}
   5625 
   5626 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5627 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5628 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5629 		aprint_error_dev(sc->sc_dev,
   5630 		    "unable to map RX control data, error = %d\n", error);
   5631 		goto fail_1;
   5632 	}
   5633 
   5634 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5635 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5636 		aprint_error_dev(sc->sc_dev,
   5637 		    "unable to create RX control data DMA map, error = %d\n",
   5638 		    error);
   5639 		goto fail_2;
   5640 	}
   5641 
   5642 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5643 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5644 		aprint_error_dev(sc->sc_dev,
   5645 		    "unable to load RX control data DMA map, error = %d\n",
   5646 		    error);
   5647 		goto fail_3;
   5648 	}
   5649 
   5650 	return 0;
   5651 
   5652  fail_3:
   5653 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5654  fail_2:
   5655 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5656 	    rxq_descs_size);
   5657  fail_1:
   5658 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5659  fail_0:
   5660 	return error;
   5661 }
   5662 
   5663 static void
   5664 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5665 {
   5666 
   5667 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5668 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5669 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5670 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5671 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5672 }
   5673 
   5674 
   5675 static int
   5676 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5677 {
   5678 	int i, error;
   5679 
   5680 	/* Create the transmit buffer DMA maps. */
   5681 	WM_TXQUEUELEN(txq) =
   5682 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5683 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5684 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5685 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5686 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5687 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5688 			aprint_error_dev(sc->sc_dev,
   5689 			    "unable to create Tx DMA map %d, error = %d\n",
   5690 			    i, error);
   5691 			goto fail;
   5692 		}
   5693 	}
   5694 
   5695 	return 0;
   5696 
   5697  fail:
   5698 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5699 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5700 			bus_dmamap_destroy(sc->sc_dmat,
   5701 			    txq->txq_soft[i].txs_dmamap);
   5702 	}
   5703 	return error;
   5704 }
   5705 
   5706 static void
   5707 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5708 {
   5709 	int i;
   5710 
   5711 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5712 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5713 			bus_dmamap_destroy(sc->sc_dmat,
   5714 			    txq->txq_soft[i].txs_dmamap);
   5715 	}
   5716 }
   5717 
   5718 static int
   5719 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5720 {
   5721 	int i, error;
   5722 
   5723 	/* Create the receive buffer DMA maps. */
   5724 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5725 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5726 			    MCLBYTES, 0, 0,
   5727 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5728 			aprint_error_dev(sc->sc_dev,
   5729 			    "unable to create Rx DMA map %d error = %d\n",
   5730 			    i, error);
   5731 			goto fail;
   5732 		}
   5733 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5734 	}
   5735 
   5736 	return 0;
   5737 
   5738  fail:
   5739 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5740 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5741 			bus_dmamap_destroy(sc->sc_dmat,
   5742 			    rxq->rxq_soft[i].rxs_dmamap);
   5743 	}
   5744 	return error;
   5745 }
   5746 
   5747 static void
   5748 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5749 {
   5750 	int i;
   5751 
   5752 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5753 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5754 			bus_dmamap_destroy(sc->sc_dmat,
   5755 			    rxq->rxq_soft[i].rxs_dmamap);
   5756 	}
   5757 }
   5758 
   5759 /*
   5760  * wm_alloc_quques:
   5761  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5762  */
   5763 static int
   5764 wm_alloc_txrx_queues(struct wm_softc *sc)
   5765 {
   5766 	int i, error, tx_done, rx_done;
   5767 
   5768 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5769 	    KM_SLEEP);
   5770 	if (sc->sc_queue == NULL) {
   5771 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5772 		error = ENOMEM;
   5773 		goto fail_0;
   5774 	}
   5775 
   5776 	/*
   5777 	 * For transmission
   5778 	 */
   5779 	error = 0;
   5780 	tx_done = 0;
   5781 	for (i = 0; i < sc->sc_nqueues; i++) {
   5782 #ifdef WM_EVENT_COUNTERS
   5783 		int j;
   5784 		const char *xname;
   5785 #endif
   5786 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5787 		txq->txq_sc = sc;
   5788 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5789 
   5790 		error = wm_alloc_tx_descs(sc, txq);
   5791 		if (error)
   5792 			break;
   5793 		error = wm_alloc_tx_buffer(sc, txq);
   5794 		if (error) {
   5795 			wm_free_tx_descs(sc, txq);
   5796 			break;
   5797 		}
   5798 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5799 		if (txq->txq_interq == NULL) {
   5800 			wm_free_tx_descs(sc, txq);
   5801 			wm_free_tx_buffer(sc, txq);
   5802 			error = ENOMEM;
   5803 			break;
   5804 		}
   5805 
   5806 #ifdef WM_EVENT_COUNTERS
   5807 		xname = device_xname(sc->sc_dev);
   5808 
   5809 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5810 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5811 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5812 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5813 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5814 
   5815 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5816 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5817 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5818 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5819 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5820 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5821 
   5822 		for (j = 0; j < WM_NTXSEGS; j++) {
   5823 			snprintf(txq->txq_txseg_evcnt_names[j],
   5824 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5825 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5826 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5827 		}
   5828 
   5829 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5830 
   5831 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5832 #endif /* WM_EVENT_COUNTERS */
   5833 
   5834 		tx_done++;
   5835 	}
   5836 	if (error)
   5837 		goto fail_1;
   5838 
   5839 	/*
   5840 	 * For recieve
   5841 	 */
   5842 	error = 0;
   5843 	rx_done = 0;
   5844 	for (i = 0; i < sc->sc_nqueues; i++) {
   5845 #ifdef WM_EVENT_COUNTERS
   5846 		const char *xname;
   5847 #endif
   5848 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5849 		rxq->rxq_sc = sc;
   5850 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5851 
   5852 		error = wm_alloc_rx_descs(sc, rxq);
   5853 		if (error)
   5854 			break;
   5855 
   5856 		error = wm_alloc_rx_buffer(sc, rxq);
   5857 		if (error) {
   5858 			wm_free_rx_descs(sc, rxq);
   5859 			break;
   5860 		}
   5861 
   5862 #ifdef WM_EVENT_COUNTERS
   5863 		xname = device_xname(sc->sc_dev);
   5864 
   5865 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5866 
   5867 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5868 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5869 #endif /* WM_EVENT_COUNTERS */
   5870 
   5871 		rx_done++;
   5872 	}
   5873 	if (error)
   5874 		goto fail_2;
   5875 
   5876 	return 0;
   5877 
   5878  fail_2:
   5879 	for (i = 0; i < rx_done; i++) {
   5880 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5881 		wm_free_rx_buffer(sc, rxq);
   5882 		wm_free_rx_descs(sc, rxq);
   5883 		if (rxq->rxq_lock)
   5884 			mutex_obj_free(rxq->rxq_lock);
   5885 	}
   5886  fail_1:
   5887 	for (i = 0; i < tx_done; i++) {
   5888 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5889 		pcq_destroy(txq->txq_interq);
   5890 		wm_free_tx_buffer(sc, txq);
   5891 		wm_free_tx_descs(sc, txq);
   5892 		if (txq->txq_lock)
   5893 			mutex_obj_free(txq->txq_lock);
   5894 	}
   5895 
   5896 	kmem_free(sc->sc_queue,
   5897 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5898  fail_0:
   5899 	return error;
   5900 }
   5901 
   5902 /*
   5903  * wm_free_quques:
   5904  *	Free {tx,rx}descs and {tx,rx} buffers
   5905  */
   5906 static void
   5907 wm_free_txrx_queues(struct wm_softc *sc)
   5908 {
   5909 	int i;
   5910 
   5911 	for (i = 0; i < sc->sc_nqueues; i++) {
   5912 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5913 		wm_free_rx_buffer(sc, rxq);
   5914 		wm_free_rx_descs(sc, rxq);
   5915 		if (rxq->rxq_lock)
   5916 			mutex_obj_free(rxq->rxq_lock);
   5917 	}
   5918 
   5919 	for (i = 0; i < sc->sc_nqueues; i++) {
   5920 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5921 		struct mbuf *m;
   5922 
   5923 		/* drain txq_interq */
   5924 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   5925 			m_freem(m);
   5926 		pcq_destroy(txq->txq_interq);
   5927 
   5928 		wm_free_tx_buffer(sc, txq);
   5929 		wm_free_tx_descs(sc, txq);
   5930 		if (txq->txq_lock)
   5931 			mutex_obj_free(txq->txq_lock);
   5932 	}
   5933 
   5934 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5935 }
   5936 
   5937 static void
   5938 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5939 {
   5940 
   5941 	KASSERT(mutex_owned(txq->txq_lock));
   5942 
   5943 	/* Initialize the transmit descriptor ring. */
   5944 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5945 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5946 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5947 	txq->txq_free = WM_NTXDESC(txq);
   5948 	txq->txq_next = 0;
   5949 }
   5950 
   5951 static void
   5952 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5953     struct wm_txqueue *txq)
   5954 {
   5955 
   5956 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5957 		device_xname(sc->sc_dev), __func__));
   5958 	KASSERT(mutex_owned(txq->txq_lock));
   5959 
   5960 	if (sc->sc_type < WM_T_82543) {
   5961 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5962 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5963 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5964 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5965 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5966 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5967 	} else {
   5968 		int qid = wmq->wmq_id;
   5969 
   5970 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5971 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5972 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5973 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5974 
   5975 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5976 			/*
   5977 			 * Don't write TDT before TCTL.EN is set.
   5978 			 * See the document.
   5979 			 */
   5980 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5981 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5982 			    | TXDCTL_WTHRESH(0));
   5983 		else {
   5984 			/* ITR / 4 */
   5985 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5986 			if (sc->sc_type >= WM_T_82540) {
   5987 				/* should be same */
   5988 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5989 			}
   5990 
   5991 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5992 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5993 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5994 		}
   5995 	}
   5996 }
   5997 
   5998 static void
   5999 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6000 {
   6001 	int i;
   6002 
   6003 	KASSERT(mutex_owned(txq->txq_lock));
   6004 
   6005 	/* Initialize the transmit job descriptors. */
   6006 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6007 		txq->txq_soft[i].txs_mbuf = NULL;
   6008 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6009 	txq->txq_snext = 0;
   6010 	txq->txq_sdirty = 0;
   6011 }
   6012 
   6013 static void
   6014 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6015     struct wm_txqueue *txq)
   6016 {
   6017 
   6018 	KASSERT(mutex_owned(txq->txq_lock));
   6019 
   6020 	/*
   6021 	 * Set up some register offsets that are different between
   6022 	 * the i82542 and the i82543 and later chips.
   6023 	 */
   6024 	if (sc->sc_type < WM_T_82543)
   6025 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6026 	else
   6027 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6028 
   6029 	wm_init_tx_descs(sc, txq);
   6030 	wm_init_tx_regs(sc, wmq, txq);
   6031 	wm_init_tx_buffer(sc, txq);
   6032 }
   6033 
   6034 static void
   6035 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6036     struct wm_rxqueue *rxq)
   6037 {
   6038 
   6039 	KASSERT(mutex_owned(rxq->rxq_lock));
   6040 
   6041 	/*
   6042 	 * Initialize the receive descriptor and receive job
   6043 	 * descriptor rings.
   6044 	 */
   6045 	if (sc->sc_type < WM_T_82543) {
   6046 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6047 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6048 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6049 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6050 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6051 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6052 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6053 
   6054 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6055 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6056 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6057 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6058 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6059 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6060 	} else {
   6061 		int qid = wmq->wmq_id;
   6062 
   6063 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6064 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6065 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6066 
   6067 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6068 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6069 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   6070 
   6071 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6072 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6073 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6074 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6075 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6076 			    | RXDCTL_WTHRESH(1));
   6077 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6078 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6079 		} else {
   6080 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6081 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6082 			/* ITR / 4 */
   6083 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   6084 			/* MUST be same */
   6085 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   6086 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6087 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6088 		}
   6089 	}
   6090 }
   6091 
   6092 static int
   6093 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6094 {
   6095 	struct wm_rxsoft *rxs;
   6096 	int error, i;
   6097 
   6098 	KASSERT(mutex_owned(rxq->rxq_lock));
   6099 
   6100 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6101 		rxs = &rxq->rxq_soft[i];
   6102 		if (rxs->rxs_mbuf == NULL) {
   6103 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6104 				log(LOG_ERR, "%s: unable to allocate or map "
   6105 				    "rx buffer %d, error = %d\n",
   6106 				    device_xname(sc->sc_dev), i, error);
   6107 				/*
   6108 				 * XXX Should attempt to run with fewer receive
   6109 				 * XXX buffers instead of just failing.
   6110 				 */
   6111 				wm_rxdrain(rxq);
   6112 				return ENOMEM;
   6113 			}
   6114 		} else {
   6115 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6116 				wm_init_rxdesc(rxq, i);
   6117 			/*
   6118 			 * For 82575 and newer device, the RX descriptors
   6119 			 * must be initialized after the setting of RCTL.EN in
   6120 			 * wm_set_filter()
   6121 			 */
   6122 		}
   6123 	}
   6124 	rxq->rxq_ptr = 0;
   6125 	rxq->rxq_discard = 0;
   6126 	WM_RXCHAIN_RESET(rxq);
   6127 
   6128 	return 0;
   6129 }
   6130 
   6131 static int
   6132 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6133     struct wm_rxqueue *rxq)
   6134 {
   6135 
   6136 	KASSERT(mutex_owned(rxq->rxq_lock));
   6137 
   6138 	/*
   6139 	 * Set up some register offsets that are different between
   6140 	 * the i82542 and the i82543 and later chips.
   6141 	 */
   6142 	if (sc->sc_type < WM_T_82543)
   6143 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6144 	else
   6145 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6146 
   6147 	wm_init_rx_regs(sc, wmq, rxq);
   6148 	return wm_init_rx_buffer(sc, rxq);
   6149 }
   6150 
   6151 /*
   6152  * wm_init_quques:
   6153  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6154  */
   6155 static int
   6156 wm_init_txrx_queues(struct wm_softc *sc)
   6157 {
   6158 	int i, error = 0;
   6159 
   6160 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6161 		device_xname(sc->sc_dev), __func__));
   6162 
   6163 	for (i = 0; i < sc->sc_nqueues; i++) {
   6164 		struct wm_queue *wmq = &sc->sc_queue[i];
   6165 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6166 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6167 
   6168 		mutex_enter(txq->txq_lock);
   6169 		wm_init_tx_queue(sc, wmq, txq);
   6170 		mutex_exit(txq->txq_lock);
   6171 
   6172 		mutex_enter(rxq->rxq_lock);
   6173 		error = wm_init_rx_queue(sc, wmq, rxq);
   6174 		mutex_exit(rxq->rxq_lock);
   6175 		if (error)
   6176 			break;
   6177 	}
   6178 
   6179 	return error;
   6180 }
   6181 
   6182 /*
   6183  * wm_tx_offload:
   6184  *
   6185  *	Set up TCP/IP checksumming parameters for the
   6186  *	specified packet.
   6187  */
   6188 static int
   6189 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6190     uint8_t *fieldsp)
   6191 {
   6192 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6193 	struct mbuf *m0 = txs->txs_mbuf;
   6194 	struct livengood_tcpip_ctxdesc *t;
   6195 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6196 	uint32_t ipcse;
   6197 	struct ether_header *eh;
   6198 	int offset, iphl;
   6199 	uint8_t fields;
   6200 
   6201 	/*
   6202 	 * XXX It would be nice if the mbuf pkthdr had offset
   6203 	 * fields for the protocol headers.
   6204 	 */
   6205 
   6206 	eh = mtod(m0, struct ether_header *);
   6207 	switch (htons(eh->ether_type)) {
   6208 	case ETHERTYPE_IP:
   6209 	case ETHERTYPE_IPV6:
   6210 		offset = ETHER_HDR_LEN;
   6211 		break;
   6212 
   6213 	case ETHERTYPE_VLAN:
   6214 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6215 		break;
   6216 
   6217 	default:
   6218 		/*
   6219 		 * Don't support this protocol or encapsulation.
   6220 		 */
   6221 		*fieldsp = 0;
   6222 		*cmdp = 0;
   6223 		return 0;
   6224 	}
   6225 
   6226 	if ((m0->m_pkthdr.csum_flags &
   6227 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6228 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6229 	} else {
   6230 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6231 	}
   6232 	ipcse = offset + iphl - 1;
   6233 
   6234 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6235 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6236 	seg = 0;
   6237 	fields = 0;
   6238 
   6239 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6240 		int hlen = offset + iphl;
   6241 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6242 
   6243 		if (__predict_false(m0->m_len <
   6244 				    (hlen + sizeof(struct tcphdr)))) {
   6245 			/*
   6246 			 * TCP/IP headers are not in the first mbuf; we need
   6247 			 * to do this the slow and painful way.  Let's just
   6248 			 * hope this doesn't happen very often.
   6249 			 */
   6250 			struct tcphdr th;
   6251 
   6252 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6253 
   6254 			m_copydata(m0, hlen, sizeof(th), &th);
   6255 			if (v4) {
   6256 				struct ip ip;
   6257 
   6258 				m_copydata(m0, offset, sizeof(ip), &ip);
   6259 				ip.ip_len = 0;
   6260 				m_copyback(m0,
   6261 				    offset + offsetof(struct ip, ip_len),
   6262 				    sizeof(ip.ip_len), &ip.ip_len);
   6263 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6264 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6265 			} else {
   6266 				struct ip6_hdr ip6;
   6267 
   6268 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6269 				ip6.ip6_plen = 0;
   6270 				m_copyback(m0,
   6271 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6272 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6273 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6274 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6275 			}
   6276 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6277 			    sizeof(th.th_sum), &th.th_sum);
   6278 
   6279 			hlen += th.th_off << 2;
   6280 		} else {
   6281 			/*
   6282 			 * TCP/IP headers are in the first mbuf; we can do
   6283 			 * this the easy way.
   6284 			 */
   6285 			struct tcphdr *th;
   6286 
   6287 			if (v4) {
   6288 				struct ip *ip =
   6289 				    (void *)(mtod(m0, char *) + offset);
   6290 				th = (void *)(mtod(m0, char *) + hlen);
   6291 
   6292 				ip->ip_len = 0;
   6293 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6294 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6295 			} else {
   6296 				struct ip6_hdr *ip6 =
   6297 				    (void *)(mtod(m0, char *) + offset);
   6298 				th = (void *)(mtod(m0, char *) + hlen);
   6299 
   6300 				ip6->ip6_plen = 0;
   6301 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6302 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6303 			}
   6304 			hlen += th->th_off << 2;
   6305 		}
   6306 
   6307 		if (v4) {
   6308 			WM_Q_EVCNT_INCR(txq, txtso);
   6309 			cmdlen |= WTX_TCPIP_CMD_IP;
   6310 		} else {
   6311 			WM_Q_EVCNT_INCR(txq, txtso6);
   6312 			ipcse = 0;
   6313 		}
   6314 		cmd |= WTX_TCPIP_CMD_TSE;
   6315 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6316 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6317 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6318 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6319 	}
   6320 
   6321 	/*
   6322 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6323 	 * offload feature, if we load the context descriptor, we
   6324 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6325 	 */
   6326 
   6327 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6328 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6329 	    WTX_TCPIP_IPCSE(ipcse);
   6330 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6331 		WM_Q_EVCNT_INCR(txq, txipsum);
   6332 		fields |= WTX_IXSM;
   6333 	}
   6334 
   6335 	offset += iphl;
   6336 
   6337 	if (m0->m_pkthdr.csum_flags &
   6338 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6339 		WM_Q_EVCNT_INCR(txq, txtusum);
   6340 		fields |= WTX_TXSM;
   6341 		tucs = WTX_TCPIP_TUCSS(offset) |
   6342 		    WTX_TCPIP_TUCSO(offset +
   6343 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6344 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6345 	} else if ((m0->m_pkthdr.csum_flags &
   6346 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6347 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6348 		fields |= WTX_TXSM;
   6349 		tucs = WTX_TCPIP_TUCSS(offset) |
   6350 		    WTX_TCPIP_TUCSO(offset +
   6351 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6352 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6353 	} else {
   6354 		/* Just initialize it to a valid TCP context. */
   6355 		tucs = WTX_TCPIP_TUCSS(offset) |
   6356 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6357 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6358 	}
   6359 
   6360 	/* Fill in the context descriptor. */
   6361 	t = (struct livengood_tcpip_ctxdesc *)
   6362 	    &txq->txq_descs[txq->txq_next];
   6363 	t->tcpip_ipcs = htole32(ipcs);
   6364 	t->tcpip_tucs = htole32(tucs);
   6365 	t->tcpip_cmdlen = htole32(cmdlen);
   6366 	t->tcpip_seg = htole32(seg);
   6367 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6368 
   6369 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6370 	txs->txs_ndesc++;
   6371 
   6372 	*cmdp = cmd;
   6373 	*fieldsp = fields;
   6374 
   6375 	return 0;
   6376 }
   6377 
   6378 static inline int
   6379 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6380 {
   6381 	struct wm_softc *sc = ifp->if_softc;
   6382 	u_int cpuid = cpu_index(curcpu());
   6383 
   6384 	/*
   6385 	 * Currently, simple distribute strategy.
   6386 	 * TODO:
   6387 	 * distribute by flowid(RSS has value).
   6388 	 */
   6389 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6390 }
   6391 
   6392 /*
   6393  * wm_start:		[ifnet interface function]
   6394  *
   6395  *	Start packet transmission on the interface.
   6396  */
   6397 static void
   6398 wm_start(struct ifnet *ifp)
   6399 {
   6400 	struct wm_softc *sc = ifp->if_softc;
   6401 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6402 
   6403 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6404 
   6405 	/*
   6406 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6407 	 */
   6408 
   6409 	mutex_enter(txq->txq_lock);
   6410 	if (!txq->txq_stopping)
   6411 		wm_start_locked(ifp);
   6412 	mutex_exit(txq->txq_lock);
   6413 }
   6414 
   6415 static void
   6416 wm_start_locked(struct ifnet *ifp)
   6417 {
   6418 	struct wm_softc *sc = ifp->if_softc;
   6419 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6420 
   6421 	wm_send_common_locked(ifp, txq, false);
   6422 }
   6423 
   6424 static int
   6425 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6426 {
   6427 	int qid;
   6428 	struct wm_softc *sc = ifp->if_softc;
   6429 	struct wm_txqueue *txq;
   6430 
   6431 	qid = wm_select_txqueue(ifp, m);
   6432 	txq = &sc->sc_queue[qid].wmq_txq;
   6433 
   6434 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6435 		m_freem(m);
   6436 		WM_Q_EVCNT_INCR(txq, txdrop);
   6437 		return ENOBUFS;
   6438 	}
   6439 
   6440 	/*
   6441 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6442 	 */
   6443 	ifp->if_obytes += m->m_pkthdr.len;
   6444 	if (m->m_flags & M_MCAST)
   6445 		ifp->if_omcasts++;
   6446 
   6447 	if (mutex_tryenter(txq->txq_lock)) {
   6448 		if (!txq->txq_stopping)
   6449 			wm_transmit_locked(ifp, txq);
   6450 		mutex_exit(txq->txq_lock);
   6451 	}
   6452 
   6453 	return 0;
   6454 }
   6455 
   6456 static void
   6457 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6458 {
   6459 
   6460 	wm_send_common_locked(ifp, txq, true);
   6461 }
   6462 
   6463 static void
   6464 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6465     bool is_transmit)
   6466 {
   6467 	struct wm_softc *sc = ifp->if_softc;
   6468 	struct mbuf *m0;
   6469 	struct m_tag *mtag;
   6470 	struct wm_txsoft *txs;
   6471 	bus_dmamap_t dmamap;
   6472 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6473 	bus_addr_t curaddr;
   6474 	bus_size_t seglen, curlen;
   6475 	uint32_t cksumcmd;
   6476 	uint8_t cksumfields;
   6477 
   6478 	KASSERT(mutex_owned(txq->txq_lock));
   6479 
   6480 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6481 		return;
   6482 
   6483 	/* Remember the previous number of free descriptors. */
   6484 	ofree = txq->txq_free;
   6485 
   6486 	/*
   6487 	 * Loop through the send queue, setting up transmit descriptors
   6488 	 * until we drain the queue, or use up all available transmit
   6489 	 * descriptors.
   6490 	 */
   6491 	for (;;) {
   6492 		m0 = NULL;
   6493 
   6494 		/* Get a work queue entry. */
   6495 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6496 			wm_txeof(sc, txq);
   6497 			if (txq->txq_sfree == 0) {
   6498 				DPRINTF(WM_DEBUG_TX,
   6499 				    ("%s: TX: no free job descriptors\n",
   6500 					device_xname(sc->sc_dev)));
   6501 				WM_Q_EVCNT_INCR(txq, txsstall);
   6502 				break;
   6503 			}
   6504 		}
   6505 
   6506 		/* Grab a packet off the queue. */
   6507 		if (is_transmit)
   6508 			m0 = pcq_get(txq->txq_interq);
   6509 		else
   6510 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6511 		if (m0 == NULL)
   6512 			break;
   6513 
   6514 		DPRINTF(WM_DEBUG_TX,
   6515 		    ("%s: TX: have packet to transmit: %p\n",
   6516 		    device_xname(sc->sc_dev), m0));
   6517 
   6518 		txs = &txq->txq_soft[txq->txq_snext];
   6519 		dmamap = txs->txs_dmamap;
   6520 
   6521 		use_tso = (m0->m_pkthdr.csum_flags &
   6522 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6523 
   6524 		/*
   6525 		 * So says the Linux driver:
   6526 		 * The controller does a simple calculation to make sure
   6527 		 * there is enough room in the FIFO before initiating the
   6528 		 * DMA for each buffer.  The calc is:
   6529 		 *	4 = ceil(buffer len / MSS)
   6530 		 * To make sure we don't overrun the FIFO, adjust the max
   6531 		 * buffer len if the MSS drops.
   6532 		 */
   6533 		dmamap->dm_maxsegsz =
   6534 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6535 		    ? m0->m_pkthdr.segsz << 2
   6536 		    : WTX_MAX_LEN;
   6537 
   6538 		/*
   6539 		 * Load the DMA map.  If this fails, the packet either
   6540 		 * didn't fit in the allotted number of segments, or we
   6541 		 * were short on resources.  For the too-many-segments
   6542 		 * case, we simply report an error and drop the packet,
   6543 		 * since we can't sanely copy a jumbo packet to a single
   6544 		 * buffer.
   6545 		 */
   6546 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6547 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6548 		if (error) {
   6549 			if (error == EFBIG) {
   6550 				WM_Q_EVCNT_INCR(txq, txdrop);
   6551 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6552 				    "DMA segments, dropping...\n",
   6553 				    device_xname(sc->sc_dev));
   6554 				wm_dump_mbuf_chain(sc, m0);
   6555 				m_freem(m0);
   6556 				continue;
   6557 			}
   6558 			/*  Short on resources, just stop for now. */
   6559 			DPRINTF(WM_DEBUG_TX,
   6560 			    ("%s: TX: dmamap load failed: %d\n",
   6561 			    device_xname(sc->sc_dev), error));
   6562 			break;
   6563 		}
   6564 
   6565 		segs_needed = dmamap->dm_nsegs;
   6566 		if (use_tso) {
   6567 			/* For sentinel descriptor; see below. */
   6568 			segs_needed++;
   6569 		}
   6570 
   6571 		/*
   6572 		 * Ensure we have enough descriptors free to describe
   6573 		 * the packet.  Note, we always reserve one descriptor
   6574 		 * at the end of the ring due to the semantics of the
   6575 		 * TDT register, plus one more in the event we need
   6576 		 * to load offload context.
   6577 		 */
   6578 		if (segs_needed > txq->txq_free - 2) {
   6579 			/*
   6580 			 * Not enough free descriptors to transmit this
   6581 			 * packet.  We haven't committed anything yet,
   6582 			 * so just unload the DMA map, put the packet
   6583 			 * pack on the queue, and punt.  Notify the upper
   6584 			 * layer that there are no more slots left.
   6585 			 */
   6586 			DPRINTF(WM_DEBUG_TX,
   6587 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6588 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6589 			    segs_needed, txq->txq_free - 1));
   6590 			ifp->if_flags |= IFF_OACTIVE;
   6591 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6592 			WM_Q_EVCNT_INCR(txq, txdstall);
   6593 			break;
   6594 		}
   6595 
   6596 		/*
   6597 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6598 		 * once we know we can transmit the packet, since we
   6599 		 * do some internal FIFO space accounting here.
   6600 		 */
   6601 		if (sc->sc_type == WM_T_82547 &&
   6602 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6603 			DPRINTF(WM_DEBUG_TX,
   6604 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6605 			    device_xname(sc->sc_dev)));
   6606 			ifp->if_flags |= IFF_OACTIVE;
   6607 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6608 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6609 			break;
   6610 		}
   6611 
   6612 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6613 
   6614 		DPRINTF(WM_DEBUG_TX,
   6615 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6616 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6617 
   6618 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6619 
   6620 		/*
   6621 		 * Store a pointer to the packet so that we can free it
   6622 		 * later.
   6623 		 *
   6624 		 * Initially, we consider the number of descriptors the
   6625 		 * packet uses the number of DMA segments.  This may be
   6626 		 * incremented by 1 if we do checksum offload (a descriptor
   6627 		 * is used to set the checksum context).
   6628 		 */
   6629 		txs->txs_mbuf = m0;
   6630 		txs->txs_firstdesc = txq->txq_next;
   6631 		txs->txs_ndesc = segs_needed;
   6632 
   6633 		/* Set up offload parameters for this packet. */
   6634 		if (m0->m_pkthdr.csum_flags &
   6635 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6636 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6637 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6638 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6639 					  &cksumfields) != 0) {
   6640 				/* Error message already displayed. */
   6641 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6642 				continue;
   6643 			}
   6644 		} else {
   6645 			cksumcmd = 0;
   6646 			cksumfields = 0;
   6647 		}
   6648 
   6649 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6650 
   6651 		/* Sync the DMA map. */
   6652 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6653 		    BUS_DMASYNC_PREWRITE);
   6654 
   6655 		/* Initialize the transmit descriptor. */
   6656 		for (nexttx = txq->txq_next, seg = 0;
   6657 		     seg < dmamap->dm_nsegs; seg++) {
   6658 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6659 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6660 			     seglen != 0;
   6661 			     curaddr += curlen, seglen -= curlen,
   6662 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6663 				curlen = seglen;
   6664 
   6665 				/*
   6666 				 * So says the Linux driver:
   6667 				 * Work around for premature descriptor
   6668 				 * write-backs in TSO mode.  Append a
   6669 				 * 4-byte sentinel descriptor.
   6670 				 */
   6671 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6672 				    curlen > 8)
   6673 					curlen -= 4;
   6674 
   6675 				wm_set_dma_addr(
   6676 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6677 				txq->txq_descs[nexttx].wtx_cmdlen
   6678 				    = htole32(cksumcmd | curlen);
   6679 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6680 				    = 0;
   6681 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6682 				    = cksumfields;
   6683 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6684 				lasttx = nexttx;
   6685 
   6686 				DPRINTF(WM_DEBUG_TX,
   6687 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6688 				     "len %#04zx\n",
   6689 				    device_xname(sc->sc_dev), nexttx,
   6690 				    (uint64_t)curaddr, curlen));
   6691 			}
   6692 		}
   6693 
   6694 		KASSERT(lasttx != -1);
   6695 
   6696 		/*
   6697 		 * Set up the command byte on the last descriptor of
   6698 		 * the packet.  If we're in the interrupt delay window,
   6699 		 * delay the interrupt.
   6700 		 */
   6701 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6702 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6703 
   6704 		/*
   6705 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6706 		 * up the descriptor to encapsulate the packet for us.
   6707 		 *
   6708 		 * This is only valid on the last descriptor of the packet.
   6709 		 */
   6710 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6711 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6712 			    htole32(WTX_CMD_VLE);
   6713 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6714 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6715 		}
   6716 
   6717 		txs->txs_lastdesc = lasttx;
   6718 
   6719 		DPRINTF(WM_DEBUG_TX,
   6720 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6721 		    device_xname(sc->sc_dev),
   6722 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6723 
   6724 		/* Sync the descriptors we're using. */
   6725 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6726 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6727 
   6728 		/* Give the packet to the chip. */
   6729 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6730 
   6731 		DPRINTF(WM_DEBUG_TX,
   6732 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6733 
   6734 		DPRINTF(WM_DEBUG_TX,
   6735 		    ("%s: TX: finished transmitting packet, job %d\n",
   6736 		    device_xname(sc->sc_dev), txq->txq_snext));
   6737 
   6738 		/* Advance the tx pointer. */
   6739 		txq->txq_free -= txs->txs_ndesc;
   6740 		txq->txq_next = nexttx;
   6741 
   6742 		txq->txq_sfree--;
   6743 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6744 
   6745 		/* Pass the packet to any BPF listeners. */
   6746 		bpf_mtap(ifp, m0);
   6747 	}
   6748 
   6749 	if (m0 != NULL) {
   6750 		ifp->if_flags |= IFF_OACTIVE;
   6751 		WM_Q_EVCNT_INCR(txq, txdrop);
   6752 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6753 			__func__));
   6754 		m_freem(m0);
   6755 	}
   6756 
   6757 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6758 		/* No more slots; notify upper layer. */
   6759 		ifp->if_flags |= IFF_OACTIVE;
   6760 	}
   6761 
   6762 	if (txq->txq_free != ofree) {
   6763 		/* Set a watchdog timer in case the chip flakes out. */
   6764 		ifp->if_timer = 5;
   6765 	}
   6766 }
   6767 
   6768 /*
   6769  * wm_nq_tx_offload:
   6770  *
   6771  *	Set up TCP/IP checksumming parameters for the
   6772  *	specified packet, for NEWQUEUE devices
   6773  */
   6774 static int
   6775 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6776     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6777 {
   6778 	struct mbuf *m0 = txs->txs_mbuf;
   6779 	struct m_tag *mtag;
   6780 	uint32_t vl_len, mssidx, cmdc;
   6781 	struct ether_header *eh;
   6782 	int offset, iphl;
   6783 
   6784 	/*
   6785 	 * XXX It would be nice if the mbuf pkthdr had offset
   6786 	 * fields for the protocol headers.
   6787 	 */
   6788 	*cmdlenp = 0;
   6789 	*fieldsp = 0;
   6790 
   6791 	eh = mtod(m0, struct ether_header *);
   6792 	switch (htons(eh->ether_type)) {
   6793 	case ETHERTYPE_IP:
   6794 	case ETHERTYPE_IPV6:
   6795 		offset = ETHER_HDR_LEN;
   6796 		break;
   6797 
   6798 	case ETHERTYPE_VLAN:
   6799 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6800 		break;
   6801 
   6802 	default:
   6803 		/* Don't support this protocol or encapsulation. */
   6804 		*do_csum = false;
   6805 		return 0;
   6806 	}
   6807 	*do_csum = true;
   6808 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6809 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6810 
   6811 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6812 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6813 
   6814 	if ((m0->m_pkthdr.csum_flags &
   6815 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6816 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6817 	} else {
   6818 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6819 	}
   6820 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6821 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6822 
   6823 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6824 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6825 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6826 		*cmdlenp |= NQTX_CMD_VLE;
   6827 	}
   6828 
   6829 	mssidx = 0;
   6830 
   6831 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6832 		int hlen = offset + iphl;
   6833 		int tcp_hlen;
   6834 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6835 
   6836 		if (__predict_false(m0->m_len <
   6837 				    (hlen + sizeof(struct tcphdr)))) {
   6838 			/*
   6839 			 * TCP/IP headers are not in the first mbuf; we need
   6840 			 * to do this the slow and painful way.  Let's just
   6841 			 * hope this doesn't happen very often.
   6842 			 */
   6843 			struct tcphdr th;
   6844 
   6845 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6846 
   6847 			m_copydata(m0, hlen, sizeof(th), &th);
   6848 			if (v4) {
   6849 				struct ip ip;
   6850 
   6851 				m_copydata(m0, offset, sizeof(ip), &ip);
   6852 				ip.ip_len = 0;
   6853 				m_copyback(m0,
   6854 				    offset + offsetof(struct ip, ip_len),
   6855 				    sizeof(ip.ip_len), &ip.ip_len);
   6856 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6857 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6858 			} else {
   6859 				struct ip6_hdr ip6;
   6860 
   6861 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6862 				ip6.ip6_plen = 0;
   6863 				m_copyback(m0,
   6864 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6865 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6866 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6867 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6868 			}
   6869 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6870 			    sizeof(th.th_sum), &th.th_sum);
   6871 
   6872 			tcp_hlen = th.th_off << 2;
   6873 		} else {
   6874 			/*
   6875 			 * TCP/IP headers are in the first mbuf; we can do
   6876 			 * this the easy way.
   6877 			 */
   6878 			struct tcphdr *th;
   6879 
   6880 			if (v4) {
   6881 				struct ip *ip =
   6882 				    (void *)(mtod(m0, char *) + offset);
   6883 				th = (void *)(mtod(m0, char *) + hlen);
   6884 
   6885 				ip->ip_len = 0;
   6886 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6887 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6888 			} else {
   6889 				struct ip6_hdr *ip6 =
   6890 				    (void *)(mtod(m0, char *) + offset);
   6891 				th = (void *)(mtod(m0, char *) + hlen);
   6892 
   6893 				ip6->ip6_plen = 0;
   6894 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6895 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6896 			}
   6897 			tcp_hlen = th->th_off << 2;
   6898 		}
   6899 		hlen += tcp_hlen;
   6900 		*cmdlenp |= NQTX_CMD_TSE;
   6901 
   6902 		if (v4) {
   6903 			WM_Q_EVCNT_INCR(txq, txtso);
   6904 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6905 		} else {
   6906 			WM_Q_EVCNT_INCR(txq, txtso6);
   6907 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6908 		}
   6909 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6910 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6911 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6912 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6913 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6914 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6915 	} else {
   6916 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6917 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6918 	}
   6919 
   6920 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6921 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6922 		cmdc |= NQTXC_CMD_IP4;
   6923 	}
   6924 
   6925 	if (m0->m_pkthdr.csum_flags &
   6926 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6927 		WM_Q_EVCNT_INCR(txq, txtusum);
   6928 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6929 			cmdc |= NQTXC_CMD_TCP;
   6930 		} else {
   6931 			cmdc |= NQTXC_CMD_UDP;
   6932 		}
   6933 		cmdc |= NQTXC_CMD_IP4;
   6934 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6935 	}
   6936 	if (m0->m_pkthdr.csum_flags &
   6937 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6938 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6939 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6940 			cmdc |= NQTXC_CMD_TCP;
   6941 		} else {
   6942 			cmdc |= NQTXC_CMD_UDP;
   6943 		}
   6944 		cmdc |= NQTXC_CMD_IP6;
   6945 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6946 	}
   6947 
   6948 	/* Fill in the context descriptor. */
   6949 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6950 	    htole32(vl_len);
   6951 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6952 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6953 	    htole32(cmdc);
   6954 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6955 	    htole32(mssidx);
   6956 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6957 	DPRINTF(WM_DEBUG_TX,
   6958 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6959 	    txq->txq_next, 0, vl_len));
   6960 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6961 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6962 	txs->txs_ndesc++;
   6963 	return 0;
   6964 }
   6965 
   6966 /*
   6967  * wm_nq_start:		[ifnet interface function]
   6968  *
   6969  *	Start packet transmission on the interface for NEWQUEUE devices
   6970  */
   6971 static void
   6972 wm_nq_start(struct ifnet *ifp)
   6973 {
   6974 	struct wm_softc *sc = ifp->if_softc;
   6975 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6976 
   6977 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6978 
   6979 	/*
   6980 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6981 	 */
   6982 
   6983 	mutex_enter(txq->txq_lock);
   6984 	if (!txq->txq_stopping)
   6985 		wm_nq_start_locked(ifp);
   6986 	mutex_exit(txq->txq_lock);
   6987 }
   6988 
   6989 static void
   6990 wm_nq_start_locked(struct ifnet *ifp)
   6991 {
   6992 	struct wm_softc *sc = ifp->if_softc;
   6993 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6994 
   6995 	wm_nq_send_common_locked(ifp, txq, false);
   6996 }
   6997 
   6998 static int
   6999 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7000 {
   7001 	int qid;
   7002 	struct wm_softc *sc = ifp->if_softc;
   7003 	struct wm_txqueue *txq;
   7004 
   7005 	qid = wm_select_txqueue(ifp, m);
   7006 	txq = &sc->sc_queue[qid].wmq_txq;
   7007 
   7008 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7009 		m_freem(m);
   7010 		WM_Q_EVCNT_INCR(txq, txdrop);
   7011 		return ENOBUFS;
   7012 	}
   7013 
   7014 	/*
   7015 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7016 	 */
   7017 	ifp->if_obytes += m->m_pkthdr.len;
   7018 	if (m->m_flags & M_MCAST)
   7019 		ifp->if_omcasts++;
   7020 
   7021 	/*
   7022 	 * The situations which this mutex_tryenter() fails at running time
   7023 	 * are below two patterns.
   7024 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7025 	 *     (2) contention with deferred if_start softint(wm_deferred_start())
   7026 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7027 	 * dequeued by wm_deferred_start(). So, it does not get stuck.
   7028 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7029 	 * dequeued by wm_deferred_start(). So, it does not get stuck, either.
   7030 	 */
   7031 	if (mutex_tryenter(txq->txq_lock)) {
   7032 		if (!txq->txq_stopping)
   7033 			wm_nq_transmit_locked(ifp, txq);
   7034 		mutex_exit(txq->txq_lock);
   7035 	}
   7036 
   7037 	return 0;
   7038 }
   7039 
   7040 static void
   7041 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7042 {
   7043 
   7044 	wm_nq_send_common_locked(ifp, txq, true);
   7045 }
   7046 
   7047 static void
   7048 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7049     bool is_transmit)
   7050 {
   7051 	struct wm_softc *sc = ifp->if_softc;
   7052 	struct mbuf *m0;
   7053 	struct m_tag *mtag;
   7054 	struct wm_txsoft *txs;
   7055 	bus_dmamap_t dmamap;
   7056 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7057 	bool do_csum, sent;
   7058 
   7059 	KASSERT(mutex_owned(txq->txq_lock));
   7060 
   7061 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   7062 		return;
   7063 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7064 		return;
   7065 
   7066 	sent = false;
   7067 
   7068 	/*
   7069 	 * Loop through the send queue, setting up transmit descriptors
   7070 	 * until we drain the queue, or use up all available transmit
   7071 	 * descriptors.
   7072 	 */
   7073 	for (;;) {
   7074 		m0 = NULL;
   7075 
   7076 		/* Get a work queue entry. */
   7077 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7078 			wm_txeof(sc, txq);
   7079 			if (txq->txq_sfree == 0) {
   7080 				DPRINTF(WM_DEBUG_TX,
   7081 				    ("%s: TX: no free job descriptors\n",
   7082 					device_xname(sc->sc_dev)));
   7083 				WM_Q_EVCNT_INCR(txq, txsstall);
   7084 				break;
   7085 			}
   7086 		}
   7087 
   7088 		/* Grab a packet off the queue. */
   7089 		if (is_transmit)
   7090 			m0 = pcq_get(txq->txq_interq);
   7091 		else
   7092 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7093 		if (m0 == NULL)
   7094 			break;
   7095 
   7096 		DPRINTF(WM_DEBUG_TX,
   7097 		    ("%s: TX: have packet to transmit: %p\n",
   7098 		    device_xname(sc->sc_dev), m0));
   7099 
   7100 		txs = &txq->txq_soft[txq->txq_snext];
   7101 		dmamap = txs->txs_dmamap;
   7102 
   7103 		/*
   7104 		 * Load the DMA map.  If this fails, the packet either
   7105 		 * didn't fit in the allotted number of segments, or we
   7106 		 * were short on resources.  For the too-many-segments
   7107 		 * case, we simply report an error and drop the packet,
   7108 		 * since we can't sanely copy a jumbo packet to a single
   7109 		 * buffer.
   7110 		 */
   7111 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7112 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7113 		if (error) {
   7114 			if (error == EFBIG) {
   7115 				WM_Q_EVCNT_INCR(txq, txdrop);
   7116 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7117 				    "DMA segments, dropping...\n",
   7118 				    device_xname(sc->sc_dev));
   7119 				wm_dump_mbuf_chain(sc, m0);
   7120 				m_freem(m0);
   7121 				continue;
   7122 			}
   7123 			/* Short on resources, just stop for now. */
   7124 			DPRINTF(WM_DEBUG_TX,
   7125 			    ("%s: TX: dmamap load failed: %d\n",
   7126 			    device_xname(sc->sc_dev), error));
   7127 			break;
   7128 		}
   7129 
   7130 		segs_needed = dmamap->dm_nsegs;
   7131 
   7132 		/*
   7133 		 * Ensure we have enough descriptors free to describe
   7134 		 * the packet.  Note, we always reserve one descriptor
   7135 		 * at the end of the ring due to the semantics of the
   7136 		 * TDT register, plus one more in the event we need
   7137 		 * to load offload context.
   7138 		 */
   7139 		if (segs_needed > txq->txq_free - 2) {
   7140 			/*
   7141 			 * Not enough free descriptors to transmit this
   7142 			 * packet.  We haven't committed anything yet,
   7143 			 * so just unload the DMA map, put the packet
   7144 			 * pack on the queue, and punt.  Notify the upper
   7145 			 * layer that there are no more slots left.
   7146 			 */
   7147 			DPRINTF(WM_DEBUG_TX,
   7148 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7149 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7150 			    segs_needed, txq->txq_free - 1));
   7151 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7152 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7153 			WM_Q_EVCNT_INCR(txq, txdstall);
   7154 			break;
   7155 		}
   7156 
   7157 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7158 
   7159 		DPRINTF(WM_DEBUG_TX,
   7160 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7161 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7162 
   7163 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7164 
   7165 		/*
   7166 		 * Store a pointer to the packet so that we can free it
   7167 		 * later.
   7168 		 *
   7169 		 * Initially, we consider the number of descriptors the
   7170 		 * packet uses the number of DMA segments.  This may be
   7171 		 * incremented by 1 if we do checksum offload (a descriptor
   7172 		 * is used to set the checksum context).
   7173 		 */
   7174 		txs->txs_mbuf = m0;
   7175 		txs->txs_firstdesc = txq->txq_next;
   7176 		txs->txs_ndesc = segs_needed;
   7177 
   7178 		/* Set up offload parameters for this packet. */
   7179 		uint32_t cmdlen, fields, dcmdlen;
   7180 		if (m0->m_pkthdr.csum_flags &
   7181 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7182 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7183 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7184 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7185 			    &do_csum) != 0) {
   7186 				/* Error message already displayed. */
   7187 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7188 				continue;
   7189 			}
   7190 		} else {
   7191 			do_csum = false;
   7192 			cmdlen = 0;
   7193 			fields = 0;
   7194 		}
   7195 
   7196 		/* Sync the DMA map. */
   7197 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7198 		    BUS_DMASYNC_PREWRITE);
   7199 
   7200 		/* Initialize the first transmit descriptor. */
   7201 		nexttx = txq->txq_next;
   7202 		if (!do_csum) {
   7203 			/* setup a legacy descriptor */
   7204 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7205 			    dmamap->dm_segs[0].ds_addr);
   7206 			txq->txq_descs[nexttx].wtx_cmdlen =
   7207 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7208 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7209 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7210 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7211 			    NULL) {
   7212 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7213 				    htole32(WTX_CMD_VLE);
   7214 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7215 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7216 			} else {
   7217 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7218 			}
   7219 			dcmdlen = 0;
   7220 		} else {
   7221 			/* setup an advanced data descriptor */
   7222 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7223 			    htole64(dmamap->dm_segs[0].ds_addr);
   7224 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7225 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7226 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7227 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7228 			    htole32(fields);
   7229 			DPRINTF(WM_DEBUG_TX,
   7230 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7231 			    device_xname(sc->sc_dev), nexttx,
   7232 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7233 			DPRINTF(WM_DEBUG_TX,
   7234 			    ("\t 0x%08x%08x\n", fields,
   7235 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7236 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7237 		}
   7238 
   7239 		lasttx = nexttx;
   7240 		nexttx = WM_NEXTTX(txq, nexttx);
   7241 		/*
   7242 		 * fill in the next descriptors. legacy or adcanced format
   7243 		 * is the same here
   7244 		 */
   7245 		for (seg = 1; seg < dmamap->dm_nsegs;
   7246 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7247 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7248 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7249 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7250 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7251 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7252 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7253 			lasttx = nexttx;
   7254 
   7255 			DPRINTF(WM_DEBUG_TX,
   7256 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7257 			     "len %#04zx\n",
   7258 			    device_xname(sc->sc_dev), nexttx,
   7259 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7260 			    dmamap->dm_segs[seg].ds_len));
   7261 		}
   7262 
   7263 		KASSERT(lasttx != -1);
   7264 
   7265 		/*
   7266 		 * Set up the command byte on the last descriptor of
   7267 		 * the packet.  If we're in the interrupt delay window,
   7268 		 * delay the interrupt.
   7269 		 */
   7270 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7271 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7272 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7273 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7274 
   7275 		txs->txs_lastdesc = lasttx;
   7276 
   7277 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7278 		    device_xname(sc->sc_dev),
   7279 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7280 
   7281 		/* Sync the descriptors we're using. */
   7282 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7283 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7284 
   7285 		/* Give the packet to the chip. */
   7286 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7287 		sent = true;
   7288 
   7289 		DPRINTF(WM_DEBUG_TX,
   7290 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7291 
   7292 		DPRINTF(WM_DEBUG_TX,
   7293 		    ("%s: TX: finished transmitting packet, job %d\n",
   7294 		    device_xname(sc->sc_dev), txq->txq_snext));
   7295 
   7296 		/* Advance the tx pointer. */
   7297 		txq->txq_free -= txs->txs_ndesc;
   7298 		txq->txq_next = nexttx;
   7299 
   7300 		txq->txq_sfree--;
   7301 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7302 
   7303 		/* Pass the packet to any BPF listeners. */
   7304 		bpf_mtap(ifp, m0);
   7305 	}
   7306 
   7307 	if (m0 != NULL) {
   7308 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7309 		WM_Q_EVCNT_INCR(txq, txdrop);
   7310 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7311 			__func__));
   7312 		m_freem(m0);
   7313 	}
   7314 
   7315 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7316 		/* No more slots; notify upper layer. */
   7317 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7318 	}
   7319 
   7320 	if (sent) {
   7321 		/* Set a watchdog timer in case the chip flakes out. */
   7322 		ifp->if_timer = 5;
   7323 	}
   7324 }
   7325 
   7326 static void
   7327 wm_deferred_start(struct ifnet *ifp)
   7328 {
   7329 	struct wm_softc *sc = ifp->if_softc;
   7330 	int qid = 0;
   7331 
   7332 	/*
   7333 	 * Try to transmit on all Tx queues. Passing a txq somehow and
   7334 	 * transmitting only on the txq may be better.
   7335 	 */
   7336 restart:
   7337 	WM_CORE_LOCK(sc);
   7338 	if (sc->sc_core_stopping)
   7339 		goto out;
   7340 
   7341 	for (; qid < sc->sc_nqueues; qid++) {
   7342 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   7343 
   7344 		if (!mutex_tryenter(txq->txq_lock))
   7345 			continue;
   7346 
   7347 		if (txq->txq_stopping) {
   7348 			mutex_exit(txq->txq_lock);
   7349 			continue;
   7350 		}
   7351 		WM_CORE_UNLOCK(sc);
   7352 
   7353 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7354 			/* XXX need for ALTQ */
   7355 			if (qid == 0)
   7356 				wm_nq_start_locked(ifp);
   7357 			wm_nq_transmit_locked(ifp, txq);
   7358 		} else {
   7359 			/* XXX need for ALTQ */
   7360 			if (qid == 0)
   7361 				wm_start_locked(ifp);
   7362 			wm_transmit_locked(ifp, txq);
   7363 		}
   7364 		mutex_exit(txq->txq_lock);
   7365 
   7366 		qid++;
   7367 		goto restart;
   7368 	}
   7369 out:
   7370 	WM_CORE_UNLOCK(sc);
   7371 }
   7372 
   7373 /* Interrupt */
   7374 
   7375 /*
   7376  * wm_txeof:
   7377  *
   7378  *	Helper; handle transmit interrupts.
   7379  */
   7380 static int
   7381 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7382 {
   7383 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7384 	struct wm_txsoft *txs;
   7385 	bool processed = false;
   7386 	int count = 0;
   7387 	int i;
   7388 	uint8_t status;
   7389 
   7390 	KASSERT(mutex_owned(txq->txq_lock));
   7391 
   7392 	if (txq->txq_stopping)
   7393 		return 0;
   7394 
   7395 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7396 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7397 	else
   7398 		ifp->if_flags &= ~IFF_OACTIVE;
   7399 
   7400 	/*
   7401 	 * Go through the Tx list and free mbufs for those
   7402 	 * frames which have been transmitted.
   7403 	 */
   7404 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7405 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7406 		txs = &txq->txq_soft[i];
   7407 
   7408 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7409 			device_xname(sc->sc_dev), i));
   7410 
   7411 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7412 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7413 
   7414 		status =
   7415 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7416 		if ((status & WTX_ST_DD) == 0) {
   7417 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7418 			    BUS_DMASYNC_PREREAD);
   7419 			break;
   7420 		}
   7421 
   7422 		processed = true;
   7423 		count++;
   7424 		DPRINTF(WM_DEBUG_TX,
   7425 		    ("%s: TX: job %d done: descs %d..%d\n",
   7426 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7427 		    txs->txs_lastdesc));
   7428 
   7429 		/*
   7430 		 * XXX We should probably be using the statistics
   7431 		 * XXX registers, but I don't know if they exist
   7432 		 * XXX on chips before the i82544.
   7433 		 */
   7434 
   7435 #ifdef WM_EVENT_COUNTERS
   7436 		if (status & WTX_ST_TU)
   7437 			WM_Q_EVCNT_INCR(txq, tu);
   7438 #endif /* WM_EVENT_COUNTERS */
   7439 
   7440 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7441 			ifp->if_oerrors++;
   7442 			if (status & WTX_ST_LC)
   7443 				log(LOG_WARNING, "%s: late collision\n",
   7444 				    device_xname(sc->sc_dev));
   7445 			else if (status & WTX_ST_EC) {
   7446 				ifp->if_collisions += 16;
   7447 				log(LOG_WARNING, "%s: excessive collisions\n",
   7448 				    device_xname(sc->sc_dev));
   7449 			}
   7450 		} else
   7451 			ifp->if_opackets++;
   7452 
   7453 		txq->txq_free += txs->txs_ndesc;
   7454 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7455 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7456 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7457 		m_freem(txs->txs_mbuf);
   7458 		txs->txs_mbuf = NULL;
   7459 	}
   7460 
   7461 	/* Update the dirty transmit buffer pointer. */
   7462 	txq->txq_sdirty = i;
   7463 	DPRINTF(WM_DEBUG_TX,
   7464 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7465 
   7466 	if (count != 0)
   7467 		rnd_add_uint32(&sc->rnd_source, count);
   7468 
   7469 	/*
   7470 	 * If there are no more pending transmissions, cancel the watchdog
   7471 	 * timer.
   7472 	 */
   7473 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7474 		ifp->if_timer = 0;
   7475 
   7476 	return processed;
   7477 }
   7478 
   7479 static inline uint32_t
   7480 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7481 {
   7482 	struct wm_softc *sc = rxq->rxq_sc;
   7483 
   7484 	if (sc->sc_type == WM_T_82574)
   7485 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7486 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7487 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7488 	else
   7489 		return rxq->rxq_descs[idx].wrx_status;
   7490 }
   7491 
   7492 static inline uint32_t
   7493 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7494 {
   7495 	struct wm_softc *sc = rxq->rxq_sc;
   7496 
   7497 	if (sc->sc_type == WM_T_82574)
   7498 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7499 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7500 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7501 	else
   7502 		return rxq->rxq_descs[idx].wrx_errors;
   7503 }
   7504 
   7505 static inline uint16_t
   7506 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7507 {
   7508 	struct wm_softc *sc = rxq->rxq_sc;
   7509 
   7510 	if (sc->sc_type == WM_T_82574)
   7511 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7512 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7513 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7514 	else
   7515 		return rxq->rxq_descs[idx].wrx_special;
   7516 }
   7517 
   7518 static inline int
   7519 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7520 {
   7521 	struct wm_softc *sc = rxq->rxq_sc;
   7522 
   7523 	if (sc->sc_type == WM_T_82574)
   7524 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7525 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7526 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7527 	else
   7528 		return rxq->rxq_descs[idx].wrx_len;
   7529 }
   7530 
   7531 #ifdef WM_DEBUG
   7532 static inline uint32_t
   7533 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7534 {
   7535 	struct wm_softc *sc = rxq->rxq_sc;
   7536 
   7537 	if (sc->sc_type == WM_T_82574)
   7538 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7539 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7540 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7541 	else
   7542 		return 0;
   7543 }
   7544 
   7545 static inline uint8_t
   7546 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7547 {
   7548 	struct wm_softc *sc = rxq->rxq_sc;
   7549 
   7550 	if (sc->sc_type == WM_T_82574)
   7551 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7552 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7553 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7554 	else
   7555 		return 0;
   7556 }
   7557 #endif /* WM_DEBUG */
   7558 
   7559 static inline bool
   7560 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7561     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7562 {
   7563 
   7564 	if (sc->sc_type == WM_T_82574)
   7565 		return (status & ext_bit) != 0;
   7566 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7567 		return (status & nq_bit) != 0;
   7568 	else
   7569 		return (status & legacy_bit) != 0;
   7570 }
   7571 
   7572 static inline bool
   7573 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7574     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7575 {
   7576 
   7577 	if (sc->sc_type == WM_T_82574)
   7578 		return (error & ext_bit) != 0;
   7579 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7580 		return (error & nq_bit) != 0;
   7581 	else
   7582 		return (error & legacy_bit) != 0;
   7583 }
   7584 
   7585 static inline bool
   7586 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7587 {
   7588 
   7589 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7590 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7591 		return true;
   7592 	else
   7593 		return false;
   7594 }
   7595 
   7596 static inline bool
   7597 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7598 {
   7599 	struct wm_softc *sc = rxq->rxq_sc;
   7600 
   7601 	/* XXXX missing error bit for newqueue? */
   7602 	if (wm_rxdesc_is_set_error(sc, errors,
   7603 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7604 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7605 		NQRXC_ERROR_RXE)) {
   7606 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7607 			log(LOG_WARNING, "%s: symbol error\n",
   7608 			    device_xname(sc->sc_dev));
   7609 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7610 			log(LOG_WARNING, "%s: receive sequence error\n",
   7611 			    device_xname(sc->sc_dev));
   7612 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7613 			log(LOG_WARNING, "%s: CRC error\n",
   7614 			    device_xname(sc->sc_dev));
   7615 		return true;
   7616 	}
   7617 
   7618 	return false;
   7619 }
   7620 
   7621 static inline bool
   7622 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7623 {
   7624 	struct wm_softc *sc = rxq->rxq_sc;
   7625 
   7626 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7627 		NQRXC_STATUS_DD)) {
   7628 		/* We have processed all of the receive descriptors. */
   7629 		struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   7630 
   7631 		if (sc->sc_type == WM_T_82574) {
   7632 			rxq->rxq_ext_descs[idx].erx_data.erxd_addr =
   7633 				htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr
   7634 				    + sc->sc_align_tweak);
   7635 		} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7636 			rxq->rxq_nq_descs[idx].nqrx_data.nrxd_paddr =
   7637 				htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr
   7638 				    + sc->sc_align_tweak);
   7639 		}
   7640 
   7641 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7642 		return false;
   7643 	}
   7644 
   7645 	return true;
   7646 }
   7647 
   7648 static inline bool
   7649 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7650     struct mbuf *m)
   7651 {
   7652 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7653 
   7654 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7655 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7656 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7657 	}
   7658 
   7659 	return true;
   7660 }
   7661 
   7662 static inline void
   7663 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7664     uint32_t errors, struct mbuf *m)
   7665 {
   7666 	struct wm_softc *sc = rxq->rxq_sc;
   7667 
   7668 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7669 		if (wm_rxdesc_is_set_status(sc, status,
   7670 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7671 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7672 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7673 			if (wm_rxdesc_is_set_error(sc, errors,
   7674 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7675 				m->m_pkthdr.csum_flags |=
   7676 					M_CSUM_IPv4_BAD;
   7677 		}
   7678 		if (wm_rxdesc_is_set_status(sc, status,
   7679 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7680 			/*
   7681 			 * Note: we don't know if this was TCP or UDP,
   7682 			 * so we just set both bits, and expect the
   7683 			 * upper layers to deal.
   7684 			 */
   7685 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7686 			m->m_pkthdr.csum_flags |=
   7687 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7688 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7689 			if (wm_rxdesc_is_set_error(sc, errors,
   7690 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7691 				m->m_pkthdr.csum_flags |=
   7692 					M_CSUM_TCP_UDP_BAD;
   7693 		}
   7694 	}
   7695 }
   7696 
   7697 /*
   7698  * wm_rxeof:
   7699  *
   7700  *	Helper; handle receive interrupts.
   7701  */
   7702 static void
   7703 wm_rxeof(struct wm_rxqueue *rxq)
   7704 {
   7705 	struct wm_softc *sc = rxq->rxq_sc;
   7706 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7707 	struct wm_rxsoft *rxs;
   7708 	struct mbuf *m;
   7709 	int i, len;
   7710 	int count = 0;
   7711 	uint32_t status, errors;
   7712 	uint16_t vlantag;
   7713 
   7714 	KASSERT(mutex_owned(rxq->rxq_lock));
   7715 
   7716 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7717 		rxs = &rxq->rxq_soft[i];
   7718 
   7719 		DPRINTF(WM_DEBUG_RX,
   7720 		    ("%s: RX: checking descriptor %d\n",
   7721 		    device_xname(sc->sc_dev), i));
   7722 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7723 
   7724 		status = wm_rxdesc_get_status(rxq, i);
   7725 		errors = wm_rxdesc_get_errors(rxq, i);
   7726 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   7727 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   7728 #ifdef WM_DEBUG
   7729 		uint32_t rsshash = wm_rxdesc_get_rsshash(rxq, i);
   7730 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   7731 #endif
   7732 
   7733 		if (!wm_rxdesc_dd(rxq, i, status))
   7734 			break;
   7735 
   7736 		count++;
   7737 		if (__predict_false(rxq->rxq_discard)) {
   7738 			DPRINTF(WM_DEBUG_RX,
   7739 			    ("%s: RX: discarding contents of descriptor %d\n",
   7740 			    device_xname(sc->sc_dev), i));
   7741 			wm_init_rxdesc(rxq, i);
   7742 			if (wm_rxdesc_is_eop(rxq, status)) {
   7743 				/* Reset our state. */
   7744 				DPRINTF(WM_DEBUG_RX,
   7745 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7746 				    device_xname(sc->sc_dev)));
   7747 				rxq->rxq_discard = 0;
   7748 			}
   7749 			continue;
   7750 		}
   7751 
   7752 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7753 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7754 
   7755 		m = rxs->rxs_mbuf;
   7756 
   7757 		/*
   7758 		 * Add a new receive buffer to the ring, unless of
   7759 		 * course the length is zero. Treat the latter as a
   7760 		 * failed mapping.
   7761 		 */
   7762 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7763 			/*
   7764 			 * Failed, throw away what we've done so
   7765 			 * far, and discard the rest of the packet.
   7766 			 */
   7767 			ifp->if_ierrors++;
   7768 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7769 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7770 			wm_init_rxdesc(rxq, i);
   7771 			if (!wm_rxdesc_is_eop(rxq, status))
   7772 				rxq->rxq_discard = 1;
   7773 			if (rxq->rxq_head != NULL)
   7774 				m_freem(rxq->rxq_head);
   7775 			WM_RXCHAIN_RESET(rxq);
   7776 			DPRINTF(WM_DEBUG_RX,
   7777 			    ("%s: RX: Rx buffer allocation failed, "
   7778 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7779 			    rxq->rxq_discard ? " (discard)" : ""));
   7780 			continue;
   7781 		}
   7782 
   7783 		m->m_len = len;
   7784 		rxq->rxq_len += len;
   7785 		DPRINTF(WM_DEBUG_RX,
   7786 		    ("%s: RX: buffer at %p len %d\n",
   7787 		    device_xname(sc->sc_dev), m->m_data, len));
   7788 
   7789 		/* If this is not the end of the packet, keep looking. */
   7790 		if (!wm_rxdesc_is_eop(rxq, status)) {
   7791 			WM_RXCHAIN_LINK(rxq, m);
   7792 			DPRINTF(WM_DEBUG_RX,
   7793 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7794 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7795 			continue;
   7796 		}
   7797 
   7798 		/*
   7799 		 * Okay, we have the entire packet now.  The chip is
   7800 		 * configured to include the FCS except I350 and I21[01]
   7801 		 * (not all chips can be configured to strip it),
   7802 		 * so we need to trim it.
   7803 		 * May need to adjust length of previous mbuf in the
   7804 		 * chain if the current mbuf is too short.
   7805 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7806 		 * is always set in I350, so we don't trim it.
   7807 		 */
   7808 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7809 		    && (sc->sc_type != WM_T_I210)
   7810 		    && (sc->sc_type != WM_T_I211)) {
   7811 			if (m->m_len < ETHER_CRC_LEN) {
   7812 				rxq->rxq_tail->m_len
   7813 				    -= (ETHER_CRC_LEN - m->m_len);
   7814 				m->m_len = 0;
   7815 			} else
   7816 				m->m_len -= ETHER_CRC_LEN;
   7817 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7818 		} else
   7819 			len = rxq->rxq_len;
   7820 
   7821 		WM_RXCHAIN_LINK(rxq, m);
   7822 
   7823 		*rxq->rxq_tailp = NULL;
   7824 		m = rxq->rxq_head;
   7825 
   7826 		WM_RXCHAIN_RESET(rxq);
   7827 
   7828 		DPRINTF(WM_DEBUG_RX,
   7829 		    ("%s: RX: have entire packet, len -> %d\n",
   7830 		    device_xname(sc->sc_dev), len));
   7831 
   7832 		/* If an error occurred, update stats and drop the packet. */
   7833 		if (wm_rxdesc_has_errors(rxq, errors)) {
   7834 			m_freem(m);
   7835 			continue;
   7836 		}
   7837 
   7838 		/* No errors.  Receive the packet. */
   7839 		m_set_rcvif(m, ifp);
   7840 		m->m_pkthdr.len = len;
   7841                 /*
   7842                  * TODO
   7843                  * should be save rsshash and rsstype to this mbuf.
   7844                  */
   7845                 DPRINTF(WM_DEBUG_RX,
   7846                     ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   7847                         device_xname(sc->sc_dev), rsstype, rsshash));
   7848 
   7849 		/*
   7850 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7851 		 * for us.  Associate the tag with the packet.
   7852 		 */
   7853 		/* XXXX should check for i350 and i354 */
   7854 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   7855 			continue;
   7856 
   7857 		/* Set up checksum info for this packet. */
   7858 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   7859 
   7860 		mutex_exit(rxq->rxq_lock);
   7861 
   7862 		/* Pass it on. */
   7863 		if_percpuq_enqueue(sc->sc_ipq, m);
   7864 
   7865 		mutex_enter(rxq->rxq_lock);
   7866 
   7867 		if (rxq->rxq_stopping)
   7868 			break;
   7869 	}
   7870 
   7871 	/* Update the receive pointer. */
   7872 	rxq->rxq_ptr = i;
   7873 	if (count != 0)
   7874 		rnd_add_uint32(&sc->rnd_source, count);
   7875 
   7876 	DPRINTF(WM_DEBUG_RX,
   7877 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7878 }
   7879 
   7880 /*
   7881  * wm_linkintr_gmii:
   7882  *
   7883  *	Helper; handle link interrupts for GMII.
   7884  */
   7885 static void
   7886 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7887 {
   7888 
   7889 	KASSERT(WM_CORE_LOCKED(sc));
   7890 
   7891 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7892 		__func__));
   7893 
   7894 	if (icr & ICR_LSC) {
   7895 		uint32_t reg;
   7896 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7897 
   7898 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7899 			wm_gig_downshift_workaround_ich8lan(sc);
   7900 
   7901 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7902 			device_xname(sc->sc_dev)));
   7903 		mii_pollstat(&sc->sc_mii);
   7904 		if (sc->sc_type == WM_T_82543) {
   7905 			int miistatus, active;
   7906 
   7907 			/*
   7908 			 * With 82543, we need to force speed and
   7909 			 * duplex on the MAC equal to what the PHY
   7910 			 * speed and duplex configuration is.
   7911 			 */
   7912 			miistatus = sc->sc_mii.mii_media_status;
   7913 
   7914 			if (miistatus & IFM_ACTIVE) {
   7915 				active = sc->sc_mii.mii_media_active;
   7916 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7917 				switch (IFM_SUBTYPE(active)) {
   7918 				case IFM_10_T:
   7919 					sc->sc_ctrl |= CTRL_SPEED_10;
   7920 					break;
   7921 				case IFM_100_TX:
   7922 					sc->sc_ctrl |= CTRL_SPEED_100;
   7923 					break;
   7924 				case IFM_1000_T:
   7925 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7926 					break;
   7927 				default:
   7928 					/*
   7929 					 * fiber?
   7930 					 * Shoud not enter here.
   7931 					 */
   7932 					printf("unknown media (%x)\n", active);
   7933 					break;
   7934 				}
   7935 				if (active & IFM_FDX)
   7936 					sc->sc_ctrl |= CTRL_FD;
   7937 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7938 			}
   7939 		} else if ((sc->sc_type == WM_T_ICH8)
   7940 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7941 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7942 		} else if (sc->sc_type == WM_T_PCH) {
   7943 			wm_k1_gig_workaround_hv(sc,
   7944 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7945 		}
   7946 
   7947 		if ((sc->sc_phytype == WMPHY_82578)
   7948 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7949 			== IFM_1000_T)) {
   7950 
   7951 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7952 				delay(200*1000); /* XXX too big */
   7953 
   7954 				/* Link stall fix for link up */
   7955 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7956 				    HV_MUX_DATA_CTRL,
   7957 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7958 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7959 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7960 				    HV_MUX_DATA_CTRL,
   7961 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7962 			}
   7963 		}
   7964 		/*
   7965 		 * I217 Packet Loss issue:
   7966 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   7967 		 * on power up.
   7968 		 * Set the Beacon Duration for I217 to 8 usec
   7969 		 */
   7970 		if ((sc->sc_type == WM_T_PCH_LPT)
   7971 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7972 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   7973 			reg &= ~FEXTNVM4_BEACON_DURATION;
   7974 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   7975 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   7976 		}
   7977 
   7978 		/* XXX Work-around I218 hang issue */
   7979 		/* e1000_k1_workaround_lpt_lp() */
   7980 
   7981 		if ((sc->sc_type == WM_T_PCH_LPT)
   7982 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7983 			/*
   7984 			 * Set platform power management values for Latency
   7985 			 * Tolerance Reporting (LTR)
   7986 			 */
   7987 			wm_platform_pm_pch_lpt(sc,
   7988 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   7989 				    != 0));
   7990 		}
   7991 
   7992 		/* FEXTNVM6 K1-off workaround */
   7993 		if (sc->sc_type == WM_T_PCH_SPT) {
   7994 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   7995 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   7996 			    & FEXTNVM6_K1_OFF_ENABLE)
   7997 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   7998 			else
   7999 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8000 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8001 		}
   8002 	} else if (icr & ICR_RXSEQ) {
   8003 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8004 			device_xname(sc->sc_dev)));
   8005 	}
   8006 }
   8007 
   8008 /*
   8009  * wm_linkintr_tbi:
   8010  *
   8011  *	Helper; handle link interrupts for TBI mode.
   8012  */
   8013 static void
   8014 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8015 {
   8016 	uint32_t status;
   8017 
   8018 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8019 		__func__));
   8020 
   8021 	status = CSR_READ(sc, WMREG_STATUS);
   8022 	if (icr & ICR_LSC) {
   8023 		if (status & STATUS_LU) {
   8024 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8025 			    device_xname(sc->sc_dev),
   8026 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8027 			/*
   8028 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8029 			 * so we should update sc->sc_ctrl
   8030 			 */
   8031 
   8032 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8033 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8034 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8035 			if (status & STATUS_FD)
   8036 				sc->sc_tctl |=
   8037 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8038 			else
   8039 				sc->sc_tctl |=
   8040 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8041 			if (sc->sc_ctrl & CTRL_TFCE)
   8042 				sc->sc_fcrtl |= FCRTL_XONE;
   8043 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8044 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8045 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8046 				      sc->sc_fcrtl);
   8047 			sc->sc_tbi_linkup = 1;
   8048 		} else {
   8049 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8050 			    device_xname(sc->sc_dev)));
   8051 			sc->sc_tbi_linkup = 0;
   8052 		}
   8053 		/* Update LED */
   8054 		wm_tbi_serdes_set_linkled(sc);
   8055 	} else if (icr & ICR_RXSEQ) {
   8056 		DPRINTF(WM_DEBUG_LINK,
   8057 		    ("%s: LINK: Receive sequence error\n",
   8058 		    device_xname(sc->sc_dev)));
   8059 	}
   8060 }
   8061 
   8062 /*
   8063  * wm_linkintr_serdes:
   8064  *
   8065  *	Helper; handle link interrupts for TBI mode.
   8066  */
   8067 static void
   8068 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8069 {
   8070 	struct mii_data *mii = &sc->sc_mii;
   8071 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8072 	uint32_t pcs_adv, pcs_lpab, reg;
   8073 
   8074 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8075 		__func__));
   8076 
   8077 	if (icr & ICR_LSC) {
   8078 		/* Check PCS */
   8079 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8080 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8081 			mii->mii_media_status |= IFM_ACTIVE;
   8082 			sc->sc_tbi_linkup = 1;
   8083 		} else {
   8084 			mii->mii_media_status |= IFM_NONE;
   8085 			sc->sc_tbi_linkup = 0;
   8086 			wm_tbi_serdes_set_linkled(sc);
   8087 			return;
   8088 		}
   8089 		mii->mii_media_active |= IFM_1000_SX;
   8090 		if ((reg & PCS_LSTS_FDX) != 0)
   8091 			mii->mii_media_active |= IFM_FDX;
   8092 		else
   8093 			mii->mii_media_active |= IFM_HDX;
   8094 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8095 			/* Check flow */
   8096 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8097 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8098 				DPRINTF(WM_DEBUG_LINK,
   8099 				    ("XXX LINKOK but not ACOMP\n"));
   8100 				return;
   8101 			}
   8102 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8103 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8104 			DPRINTF(WM_DEBUG_LINK,
   8105 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8106 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8107 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8108 				mii->mii_media_active |= IFM_FLOW
   8109 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8110 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8111 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8112 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8113 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8114 				mii->mii_media_active |= IFM_FLOW
   8115 				    | IFM_ETH_TXPAUSE;
   8116 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8117 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8118 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8119 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8120 				mii->mii_media_active |= IFM_FLOW
   8121 				    | IFM_ETH_RXPAUSE;
   8122 		}
   8123 		/* Update LED */
   8124 		wm_tbi_serdes_set_linkled(sc);
   8125 	} else {
   8126 		DPRINTF(WM_DEBUG_LINK,
   8127 		    ("%s: LINK: Receive sequence error\n",
   8128 		    device_xname(sc->sc_dev)));
   8129 	}
   8130 }
   8131 
   8132 /*
   8133  * wm_linkintr:
   8134  *
   8135  *	Helper; handle link interrupts.
   8136  */
   8137 static void
   8138 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8139 {
   8140 
   8141 	KASSERT(WM_CORE_LOCKED(sc));
   8142 
   8143 	if (sc->sc_flags & WM_F_HAS_MII)
   8144 		wm_linkintr_gmii(sc, icr);
   8145 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8146 	    && (sc->sc_type >= WM_T_82575))
   8147 		wm_linkintr_serdes(sc, icr);
   8148 	else
   8149 		wm_linkintr_tbi(sc, icr);
   8150 }
   8151 
   8152 /*
   8153  * wm_intr_legacy:
   8154  *
   8155  *	Interrupt service routine for INTx and MSI.
   8156  */
   8157 static int
   8158 wm_intr_legacy(void *arg)
   8159 {
   8160 	struct wm_softc *sc = arg;
   8161 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8162 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   8163 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8164 	uint32_t icr, rndval = 0;
   8165 	int handled = 0;
   8166 
   8167 	DPRINTF(WM_DEBUG_TX,
   8168 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8169 	while (1 /* CONSTCOND */) {
   8170 		icr = CSR_READ(sc, WMREG_ICR);
   8171 		if ((icr & sc->sc_icr) == 0)
   8172 			break;
   8173 		if (rndval == 0)
   8174 			rndval = icr;
   8175 
   8176 		mutex_enter(rxq->rxq_lock);
   8177 
   8178 		if (rxq->rxq_stopping) {
   8179 			mutex_exit(rxq->rxq_lock);
   8180 			break;
   8181 		}
   8182 
   8183 		handled = 1;
   8184 
   8185 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8186 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8187 			DPRINTF(WM_DEBUG_RX,
   8188 			    ("%s: RX: got Rx intr 0x%08x\n",
   8189 			    device_xname(sc->sc_dev),
   8190 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8191 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8192 		}
   8193 #endif
   8194 		wm_rxeof(rxq);
   8195 
   8196 		mutex_exit(rxq->rxq_lock);
   8197 		mutex_enter(txq->txq_lock);
   8198 
   8199 		if (txq->txq_stopping) {
   8200 			mutex_exit(txq->txq_lock);
   8201 			break;
   8202 		}
   8203 
   8204 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8205 		if (icr & ICR_TXDW) {
   8206 			DPRINTF(WM_DEBUG_TX,
   8207 			    ("%s: TX: got TXDW interrupt\n",
   8208 			    device_xname(sc->sc_dev)));
   8209 			WM_Q_EVCNT_INCR(txq, txdw);
   8210 		}
   8211 #endif
   8212 		wm_txeof(sc, txq);
   8213 
   8214 		mutex_exit(txq->txq_lock);
   8215 		WM_CORE_LOCK(sc);
   8216 
   8217 		if (sc->sc_core_stopping) {
   8218 			WM_CORE_UNLOCK(sc);
   8219 			break;
   8220 		}
   8221 
   8222 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8223 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8224 			wm_linkintr(sc, icr);
   8225 		}
   8226 
   8227 		WM_CORE_UNLOCK(sc);
   8228 
   8229 		if (icr & ICR_RXO) {
   8230 #if defined(WM_DEBUG)
   8231 			log(LOG_WARNING, "%s: Receive overrun\n",
   8232 			    device_xname(sc->sc_dev));
   8233 #endif /* defined(WM_DEBUG) */
   8234 		}
   8235 	}
   8236 
   8237 	rnd_add_uint32(&sc->rnd_source, rndval);
   8238 
   8239 	if (handled) {
   8240 		/* Try to get more packets going. */
   8241 		if_schedule_deferred_start(ifp);
   8242 	}
   8243 
   8244 	return handled;
   8245 }
   8246 
   8247 static int
   8248 wm_txrxintr_msix(void *arg)
   8249 {
   8250 	struct wm_queue *wmq = arg;
   8251 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8252 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8253 	struct wm_softc *sc = txq->txq_sc;
   8254 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8255 
   8256 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8257 
   8258 	DPRINTF(WM_DEBUG_TX,
   8259 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8260 
   8261 	if (sc->sc_type == WM_T_82574)
   8262 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8263 	else if (sc->sc_type == WM_T_82575)
   8264 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8265 	else
   8266 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8267 
   8268 	mutex_enter(txq->txq_lock);
   8269 
   8270 	if (txq->txq_stopping) {
   8271 		mutex_exit(txq->txq_lock);
   8272 		return 0;
   8273 	}
   8274 
   8275 	WM_Q_EVCNT_INCR(txq, txdw);
   8276 	wm_txeof(sc, txq);
   8277 
   8278 	/* Try to get more packets going. */
   8279 	if (pcq_peek(txq->txq_interq) != NULL)
   8280 		if_schedule_deferred_start(ifp);
   8281 	/*
   8282 	 * There are still some upper layer processing which call
   8283 	 * ifp->if_start(). e.g. ALTQ
   8284 	 */
   8285 	if (wmq->wmq_id == 0)
   8286 		if_schedule_deferred_start(ifp);
   8287 
   8288 	mutex_exit(txq->txq_lock);
   8289 
   8290 	DPRINTF(WM_DEBUG_RX,
   8291 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8292 	mutex_enter(rxq->rxq_lock);
   8293 
   8294 	if (rxq->rxq_stopping) {
   8295 		mutex_exit(rxq->rxq_lock);
   8296 		return 0;
   8297 	}
   8298 
   8299 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8300 	wm_rxeof(rxq);
   8301 	mutex_exit(rxq->rxq_lock);
   8302 
   8303 	if (sc->sc_type == WM_T_82574)
   8304 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8305 	else if (sc->sc_type == WM_T_82575)
   8306 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8307 	else
   8308 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8309 
   8310 	return 1;
   8311 }
   8312 
   8313 /*
   8314  * wm_linkintr_msix:
   8315  *
   8316  *	Interrupt service routine for link status change for MSI-X.
   8317  */
   8318 static int
   8319 wm_linkintr_msix(void *arg)
   8320 {
   8321 	struct wm_softc *sc = arg;
   8322 	uint32_t reg;
   8323 
   8324 	DPRINTF(WM_DEBUG_LINK,
   8325 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8326 
   8327 	reg = CSR_READ(sc, WMREG_ICR);
   8328 	WM_CORE_LOCK(sc);
   8329 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8330 		goto out;
   8331 
   8332 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8333 	wm_linkintr(sc, ICR_LSC);
   8334 
   8335 out:
   8336 	WM_CORE_UNLOCK(sc);
   8337 
   8338 	if (sc->sc_type == WM_T_82574)
   8339 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8340 	else if (sc->sc_type == WM_T_82575)
   8341 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8342 	else
   8343 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8344 
   8345 	return 1;
   8346 }
   8347 
   8348 /*
   8349  * Media related.
   8350  * GMII, SGMII, TBI (and SERDES)
   8351  */
   8352 
   8353 /* Common */
   8354 
   8355 /*
   8356  * wm_tbi_serdes_set_linkled:
   8357  *
   8358  *	Update the link LED on TBI and SERDES devices.
   8359  */
   8360 static void
   8361 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8362 {
   8363 
   8364 	if (sc->sc_tbi_linkup)
   8365 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8366 	else
   8367 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8368 
   8369 	/* 82540 or newer devices are active low */
   8370 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8371 
   8372 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8373 }
   8374 
   8375 /* GMII related */
   8376 
   8377 /*
   8378  * wm_gmii_reset:
   8379  *
   8380  *	Reset the PHY.
   8381  */
   8382 static void
   8383 wm_gmii_reset(struct wm_softc *sc)
   8384 {
   8385 	uint32_t reg;
   8386 	int rv;
   8387 
   8388 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8389 		device_xname(sc->sc_dev), __func__));
   8390 
   8391 	rv = sc->phy.acquire(sc);
   8392 	if (rv != 0) {
   8393 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8394 		    __func__);
   8395 		return;
   8396 	}
   8397 
   8398 	switch (sc->sc_type) {
   8399 	case WM_T_82542_2_0:
   8400 	case WM_T_82542_2_1:
   8401 		/* null */
   8402 		break;
   8403 	case WM_T_82543:
   8404 		/*
   8405 		 * With 82543, we need to force speed and duplex on the MAC
   8406 		 * equal to what the PHY speed and duplex configuration is.
   8407 		 * In addition, we need to perform a hardware reset on the PHY
   8408 		 * to take it out of reset.
   8409 		 */
   8410 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8411 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8412 
   8413 		/* The PHY reset pin is active-low. */
   8414 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8415 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8416 		    CTRL_EXT_SWDPIN(4));
   8417 		reg |= CTRL_EXT_SWDPIO(4);
   8418 
   8419 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8420 		CSR_WRITE_FLUSH(sc);
   8421 		delay(10*1000);
   8422 
   8423 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8424 		CSR_WRITE_FLUSH(sc);
   8425 		delay(150);
   8426 #if 0
   8427 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8428 #endif
   8429 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8430 		break;
   8431 	case WM_T_82544:	/* reset 10000us */
   8432 	case WM_T_82540:
   8433 	case WM_T_82545:
   8434 	case WM_T_82545_3:
   8435 	case WM_T_82546:
   8436 	case WM_T_82546_3:
   8437 	case WM_T_82541:
   8438 	case WM_T_82541_2:
   8439 	case WM_T_82547:
   8440 	case WM_T_82547_2:
   8441 	case WM_T_82571:	/* reset 100us */
   8442 	case WM_T_82572:
   8443 	case WM_T_82573:
   8444 	case WM_T_82574:
   8445 	case WM_T_82575:
   8446 	case WM_T_82576:
   8447 	case WM_T_82580:
   8448 	case WM_T_I350:
   8449 	case WM_T_I354:
   8450 	case WM_T_I210:
   8451 	case WM_T_I211:
   8452 	case WM_T_82583:
   8453 	case WM_T_80003:
   8454 		/* generic reset */
   8455 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8456 		CSR_WRITE_FLUSH(sc);
   8457 		delay(20000);
   8458 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8459 		CSR_WRITE_FLUSH(sc);
   8460 		delay(20000);
   8461 
   8462 		if ((sc->sc_type == WM_T_82541)
   8463 		    || (sc->sc_type == WM_T_82541_2)
   8464 		    || (sc->sc_type == WM_T_82547)
   8465 		    || (sc->sc_type == WM_T_82547_2)) {
   8466 			/* workaround for igp are done in igp_reset() */
   8467 			/* XXX add code to set LED after phy reset */
   8468 		}
   8469 		break;
   8470 	case WM_T_ICH8:
   8471 	case WM_T_ICH9:
   8472 	case WM_T_ICH10:
   8473 	case WM_T_PCH:
   8474 	case WM_T_PCH2:
   8475 	case WM_T_PCH_LPT:
   8476 	case WM_T_PCH_SPT:
   8477 		/* generic reset */
   8478 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8479 		CSR_WRITE_FLUSH(sc);
   8480 		delay(100);
   8481 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8482 		CSR_WRITE_FLUSH(sc);
   8483 		delay(150);
   8484 		break;
   8485 	default:
   8486 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8487 		    __func__);
   8488 		break;
   8489 	}
   8490 
   8491 	sc->phy.release(sc);
   8492 
   8493 	/* get_cfg_done */
   8494 	wm_get_cfg_done(sc);
   8495 
   8496 	/* extra setup */
   8497 	switch (sc->sc_type) {
   8498 	case WM_T_82542_2_0:
   8499 	case WM_T_82542_2_1:
   8500 	case WM_T_82543:
   8501 	case WM_T_82544:
   8502 	case WM_T_82540:
   8503 	case WM_T_82545:
   8504 	case WM_T_82545_3:
   8505 	case WM_T_82546:
   8506 	case WM_T_82546_3:
   8507 	case WM_T_82541_2:
   8508 	case WM_T_82547_2:
   8509 	case WM_T_82571:
   8510 	case WM_T_82572:
   8511 	case WM_T_82573:
   8512 	case WM_T_82575:
   8513 	case WM_T_82576:
   8514 	case WM_T_82580:
   8515 	case WM_T_I350:
   8516 	case WM_T_I354:
   8517 	case WM_T_I210:
   8518 	case WM_T_I211:
   8519 	case WM_T_80003:
   8520 		/* null */
   8521 		break;
   8522 	case WM_T_82574:
   8523 	case WM_T_82583:
   8524 		wm_lplu_d0_disable(sc);
   8525 		break;
   8526 	case WM_T_82541:
   8527 	case WM_T_82547:
   8528 		/* XXX Configure actively LED after PHY reset */
   8529 		break;
   8530 	case WM_T_ICH8:
   8531 	case WM_T_ICH9:
   8532 	case WM_T_ICH10:
   8533 	case WM_T_PCH:
   8534 	case WM_T_PCH2:
   8535 	case WM_T_PCH_LPT:
   8536 	case WM_T_PCH_SPT:
   8537 		/* Allow time for h/w to get to a quiescent state afer reset */
   8538 		delay(10*1000);
   8539 
   8540 		if (sc->sc_type == WM_T_PCH)
   8541 			wm_hv_phy_workaround_ich8lan(sc);
   8542 
   8543 		if (sc->sc_type == WM_T_PCH2)
   8544 			wm_lv_phy_workaround_ich8lan(sc);
   8545 
   8546 		/* Clear the host wakeup bit after lcd reset */
   8547 		if (sc->sc_type >= WM_T_PCH) {
   8548 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8549 			    BM_PORT_GEN_CFG);
   8550 			reg &= ~BM_WUC_HOST_WU_BIT;
   8551 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8552 			    BM_PORT_GEN_CFG, reg);
   8553 		}
   8554 
   8555 		/*
   8556 		 * XXX Configure the LCD with th extended configuration region
   8557 		 * in NVM
   8558 		 */
   8559 
   8560 		/* Disable D0 LPLU. */
   8561 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8562 			wm_lplu_d0_disable_pch(sc);
   8563 		else
   8564 			wm_lplu_d0_disable(sc);	/* ICH* */
   8565 		break;
   8566 	default:
   8567 		panic("%s: unknown type\n", __func__);
   8568 		break;
   8569 	}
   8570 }
   8571 
   8572 /*
   8573  * wm_get_phy_id_82575:
   8574  *
   8575  * Return PHY ID. Return -1 if it failed.
   8576  */
   8577 static int
   8578 wm_get_phy_id_82575(struct wm_softc *sc)
   8579 {
   8580 	uint32_t reg;
   8581 	int phyid = -1;
   8582 
   8583 	/* XXX */
   8584 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8585 		return -1;
   8586 
   8587 	if (wm_sgmii_uses_mdio(sc)) {
   8588 		switch (sc->sc_type) {
   8589 		case WM_T_82575:
   8590 		case WM_T_82576:
   8591 			reg = CSR_READ(sc, WMREG_MDIC);
   8592 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8593 			break;
   8594 		case WM_T_82580:
   8595 		case WM_T_I350:
   8596 		case WM_T_I354:
   8597 		case WM_T_I210:
   8598 		case WM_T_I211:
   8599 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8600 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8601 			break;
   8602 		default:
   8603 			return -1;
   8604 		}
   8605 	}
   8606 
   8607 	return phyid;
   8608 }
   8609 
   8610 
   8611 /*
   8612  * wm_gmii_mediainit:
   8613  *
   8614  *	Initialize media for use on 1000BASE-T devices.
   8615  */
   8616 static void
   8617 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8618 {
   8619 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8620 	struct mii_data *mii = &sc->sc_mii;
   8621 	uint32_t reg;
   8622 
   8623 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8624 		device_xname(sc->sc_dev), __func__));
   8625 
   8626 	/* We have GMII. */
   8627 	sc->sc_flags |= WM_F_HAS_MII;
   8628 
   8629 	if (sc->sc_type == WM_T_80003)
   8630 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8631 	else
   8632 		sc->sc_tipg = TIPG_1000T_DFLT;
   8633 
   8634 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8635 	if ((sc->sc_type == WM_T_82580)
   8636 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8637 	    || (sc->sc_type == WM_T_I211)) {
   8638 		reg = CSR_READ(sc, WMREG_PHPM);
   8639 		reg &= ~PHPM_GO_LINK_D;
   8640 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8641 	}
   8642 
   8643 	/*
   8644 	 * Let the chip set speed/duplex on its own based on
   8645 	 * signals from the PHY.
   8646 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8647 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8648 	 */
   8649 	sc->sc_ctrl |= CTRL_SLU;
   8650 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8651 
   8652 	/* Initialize our media structures and probe the GMII. */
   8653 	mii->mii_ifp = ifp;
   8654 
   8655 	/*
   8656 	 * Determine the PHY access method.
   8657 	 *
   8658 	 *  For SGMII, use SGMII specific method.
   8659 	 *
   8660 	 *  For some devices, we can determine the PHY access method
   8661 	 * from sc_type.
   8662 	 *
   8663 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8664 	 * access  method by sc_type, so use the PCI product ID for some
   8665 	 * devices.
   8666 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8667 	 * can't detect, then use bm's method.
   8668 	 */
   8669 	switch (prodid) {
   8670 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8671 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8672 		/* 82577 */
   8673 		sc->sc_phytype = WMPHY_82577;
   8674 		break;
   8675 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8676 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8677 		/* 82578 */
   8678 		sc->sc_phytype = WMPHY_82578;
   8679 		break;
   8680 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8681 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8682 		/* 82579 */
   8683 		sc->sc_phytype = WMPHY_82579;
   8684 		break;
   8685 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8686 	case PCI_PRODUCT_INTEL_82801I_BM:
   8687 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8688 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8689 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8690 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8691 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8692 		/* ICH8, 9, 10 with 82567 */
   8693 		sc->sc_phytype = WMPHY_BM;
   8694 		mii->mii_readreg = wm_gmii_bm_readreg;
   8695 		mii->mii_writereg = wm_gmii_bm_writereg;
   8696 		break;
   8697 	default:
   8698 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8699 		    && !wm_sgmii_uses_mdio(sc)){
   8700 			/* SGMII */
   8701 			mii->mii_readreg = wm_sgmii_readreg;
   8702 			mii->mii_writereg = wm_sgmii_writereg;
   8703 		} else if ((sc->sc_type == WM_T_82574)
   8704 		    || (sc->sc_type == WM_T_82583)) {
   8705 			/* BM2 (phyaddr == 1) */
   8706 			sc->sc_phytype = WMPHY_BM;
   8707 			mii->mii_readreg = wm_gmii_bm_readreg;
   8708 			mii->mii_writereg = wm_gmii_bm_writereg;
   8709 		} else if (sc->sc_type >= WM_T_ICH8) {
   8710 			/* non-82567 ICH8, 9 and 10 */
   8711 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8712 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8713 		} else if (sc->sc_type >= WM_T_80003) {
   8714 			/* 80003 */
   8715 			sc->sc_phytype = WMPHY_GG82563;
   8716 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8717 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8718 		} else if (sc->sc_type >= WM_T_I210) {
   8719 			/* I210 and I211 */
   8720 			sc->sc_phytype = WMPHY_210;
   8721 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8722 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8723 		} else if (sc->sc_type >= WM_T_82580) {
   8724 			/* 82580, I350 and I354 */
   8725 			sc->sc_phytype = WMPHY_82580;
   8726 			mii->mii_readreg = wm_gmii_82580_readreg;
   8727 			mii->mii_writereg = wm_gmii_82580_writereg;
   8728 		} else if (sc->sc_type >= WM_T_82544) {
   8729 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8730 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8731 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8732 		} else {
   8733 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8734 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8735 		}
   8736 		break;
   8737 	}
   8738 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8739 		/* All PCH* use _hv_ */
   8740 		mii->mii_readreg = wm_gmii_hv_readreg;
   8741 		mii->mii_writereg = wm_gmii_hv_writereg;
   8742 	}
   8743 	mii->mii_statchg = wm_gmii_statchg;
   8744 
   8745 	/* get PHY control from SMBus to PCIe */
   8746 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   8747 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   8748 		wm_smbustopci(sc);
   8749 
   8750 	wm_gmii_reset(sc);
   8751 
   8752 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8753 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8754 	    wm_gmii_mediastatus);
   8755 
   8756 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8757 	    || (sc->sc_type == WM_T_82580)
   8758 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8759 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8760 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8761 			/* Attach only one port */
   8762 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8763 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8764 		} else {
   8765 			int i, id;
   8766 			uint32_t ctrl_ext;
   8767 
   8768 			id = wm_get_phy_id_82575(sc);
   8769 			if (id != -1) {
   8770 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8771 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8772 			}
   8773 			if ((id == -1)
   8774 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8775 				/* Power on sgmii phy if it is disabled */
   8776 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8777 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8778 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8779 				CSR_WRITE_FLUSH(sc);
   8780 				delay(300*1000); /* XXX too long */
   8781 
   8782 				/* from 1 to 8 */
   8783 				for (i = 1; i < 8; i++)
   8784 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8785 					    0xffffffff, i, MII_OFFSET_ANY,
   8786 					    MIIF_DOPAUSE);
   8787 
   8788 				/* restore previous sfp cage power state */
   8789 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8790 			}
   8791 		}
   8792 	} else {
   8793 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8794 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8795 	}
   8796 
   8797 	/*
   8798 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8799 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8800 	 */
   8801 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8802 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8803 		wm_set_mdio_slow_mode_hv(sc);
   8804 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8805 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8806 	}
   8807 
   8808 	/*
   8809 	 * (For ICH8 variants)
   8810 	 * If PHY detection failed, use BM's r/w function and retry.
   8811 	 */
   8812 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8813 		/* if failed, retry with *_bm_* */
   8814 		mii->mii_readreg = wm_gmii_bm_readreg;
   8815 		mii->mii_writereg = wm_gmii_bm_writereg;
   8816 
   8817 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8818 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8819 	}
   8820 
   8821 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8822 		/* Any PHY wasn't find */
   8823 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8824 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8825 		sc->sc_phytype = WMPHY_NONE;
   8826 	} else {
   8827 		/*
   8828 		 * PHY Found!
   8829 		 * Check PHY type.
   8830 		 */
   8831 		uint32_t model;
   8832 		struct mii_softc *child;
   8833 
   8834 		child = LIST_FIRST(&mii->mii_phys);
   8835 		model = child->mii_mpd_model;
   8836 		if (model == MII_MODEL_yyINTEL_I82566)
   8837 			sc->sc_phytype = WMPHY_IGP_3;
   8838 
   8839 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8840 	}
   8841 }
   8842 
   8843 /*
   8844  * wm_gmii_mediachange:	[ifmedia interface function]
   8845  *
   8846  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8847  */
   8848 static int
   8849 wm_gmii_mediachange(struct ifnet *ifp)
   8850 {
   8851 	struct wm_softc *sc = ifp->if_softc;
   8852 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8853 	int rc;
   8854 
   8855 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8856 		device_xname(sc->sc_dev), __func__));
   8857 	if ((ifp->if_flags & IFF_UP) == 0)
   8858 		return 0;
   8859 
   8860 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8861 	sc->sc_ctrl |= CTRL_SLU;
   8862 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8863 	    || (sc->sc_type > WM_T_82543)) {
   8864 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8865 	} else {
   8866 		sc->sc_ctrl &= ~CTRL_ASDE;
   8867 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8868 		if (ife->ifm_media & IFM_FDX)
   8869 			sc->sc_ctrl |= CTRL_FD;
   8870 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8871 		case IFM_10_T:
   8872 			sc->sc_ctrl |= CTRL_SPEED_10;
   8873 			break;
   8874 		case IFM_100_TX:
   8875 			sc->sc_ctrl |= CTRL_SPEED_100;
   8876 			break;
   8877 		case IFM_1000_T:
   8878 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8879 			break;
   8880 		default:
   8881 			panic("wm_gmii_mediachange: bad media 0x%x",
   8882 			    ife->ifm_media);
   8883 		}
   8884 	}
   8885 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8886 	if (sc->sc_type <= WM_T_82543)
   8887 		wm_gmii_reset(sc);
   8888 
   8889 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8890 		return 0;
   8891 	return rc;
   8892 }
   8893 
   8894 /*
   8895  * wm_gmii_mediastatus:	[ifmedia interface function]
   8896  *
   8897  *	Get the current interface media status on a 1000BASE-T device.
   8898  */
   8899 static void
   8900 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8901 {
   8902 	struct wm_softc *sc = ifp->if_softc;
   8903 
   8904 	ether_mediastatus(ifp, ifmr);
   8905 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8906 	    | sc->sc_flowflags;
   8907 }
   8908 
   8909 #define	MDI_IO		CTRL_SWDPIN(2)
   8910 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8911 #define	MDI_CLK		CTRL_SWDPIN(3)
   8912 
   8913 static void
   8914 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8915 {
   8916 	uint32_t i, v;
   8917 
   8918 	v = CSR_READ(sc, WMREG_CTRL);
   8919 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8920 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8921 
   8922 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8923 		if (data & i)
   8924 			v |= MDI_IO;
   8925 		else
   8926 			v &= ~MDI_IO;
   8927 		CSR_WRITE(sc, WMREG_CTRL, v);
   8928 		CSR_WRITE_FLUSH(sc);
   8929 		delay(10);
   8930 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8931 		CSR_WRITE_FLUSH(sc);
   8932 		delay(10);
   8933 		CSR_WRITE(sc, WMREG_CTRL, v);
   8934 		CSR_WRITE_FLUSH(sc);
   8935 		delay(10);
   8936 	}
   8937 }
   8938 
   8939 static uint32_t
   8940 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8941 {
   8942 	uint32_t v, i, data = 0;
   8943 
   8944 	v = CSR_READ(sc, WMREG_CTRL);
   8945 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8946 	v |= CTRL_SWDPIO(3);
   8947 
   8948 	CSR_WRITE(sc, WMREG_CTRL, v);
   8949 	CSR_WRITE_FLUSH(sc);
   8950 	delay(10);
   8951 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8952 	CSR_WRITE_FLUSH(sc);
   8953 	delay(10);
   8954 	CSR_WRITE(sc, WMREG_CTRL, v);
   8955 	CSR_WRITE_FLUSH(sc);
   8956 	delay(10);
   8957 
   8958 	for (i = 0; i < 16; i++) {
   8959 		data <<= 1;
   8960 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8961 		CSR_WRITE_FLUSH(sc);
   8962 		delay(10);
   8963 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8964 			data |= 1;
   8965 		CSR_WRITE(sc, WMREG_CTRL, v);
   8966 		CSR_WRITE_FLUSH(sc);
   8967 		delay(10);
   8968 	}
   8969 
   8970 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8971 	CSR_WRITE_FLUSH(sc);
   8972 	delay(10);
   8973 	CSR_WRITE(sc, WMREG_CTRL, v);
   8974 	CSR_WRITE_FLUSH(sc);
   8975 	delay(10);
   8976 
   8977 	return data;
   8978 }
   8979 
   8980 #undef MDI_IO
   8981 #undef MDI_DIR
   8982 #undef MDI_CLK
   8983 
   8984 /*
   8985  * wm_gmii_i82543_readreg:	[mii interface function]
   8986  *
   8987  *	Read a PHY register on the GMII (i82543 version).
   8988  */
   8989 static int
   8990 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8991 {
   8992 	struct wm_softc *sc = device_private(self);
   8993 	int rv;
   8994 
   8995 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8996 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8997 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8998 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8999 
   9000 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9001 	    device_xname(sc->sc_dev), phy, reg, rv));
   9002 
   9003 	return rv;
   9004 }
   9005 
   9006 /*
   9007  * wm_gmii_i82543_writereg:	[mii interface function]
   9008  *
   9009  *	Write a PHY register on the GMII (i82543 version).
   9010  */
   9011 static void
   9012 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9013 {
   9014 	struct wm_softc *sc = device_private(self);
   9015 
   9016 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9017 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9018 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9019 	    (MII_COMMAND_START << 30), 32);
   9020 }
   9021 
   9022 /*
   9023  * wm_gmii_mdic_readreg:	[mii interface function]
   9024  *
   9025  *	Read a PHY register on the GMII.
   9026  */
   9027 static int
   9028 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9029 {
   9030 	struct wm_softc *sc = device_private(self);
   9031 	uint32_t mdic = 0;
   9032 	int i, rv;
   9033 
   9034 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9035 	    MDIC_REGADD(reg));
   9036 
   9037 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9038 		mdic = CSR_READ(sc, WMREG_MDIC);
   9039 		if (mdic & MDIC_READY)
   9040 			break;
   9041 		delay(50);
   9042 	}
   9043 
   9044 	if ((mdic & MDIC_READY) == 0) {
   9045 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9046 		    device_xname(sc->sc_dev), phy, reg);
   9047 		rv = 0;
   9048 	} else if (mdic & MDIC_E) {
   9049 #if 0 /* This is normal if no PHY is present. */
   9050 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9051 		    device_xname(sc->sc_dev), phy, reg);
   9052 #endif
   9053 		rv = 0;
   9054 	} else {
   9055 		rv = MDIC_DATA(mdic);
   9056 		if (rv == 0xffff)
   9057 			rv = 0;
   9058 	}
   9059 
   9060 	return rv;
   9061 }
   9062 
   9063 /*
   9064  * wm_gmii_mdic_writereg:	[mii interface function]
   9065  *
   9066  *	Write a PHY register on the GMII.
   9067  */
   9068 static void
   9069 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9070 {
   9071 	struct wm_softc *sc = device_private(self);
   9072 	uint32_t mdic = 0;
   9073 	int i;
   9074 
   9075 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9076 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9077 
   9078 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9079 		mdic = CSR_READ(sc, WMREG_MDIC);
   9080 		if (mdic & MDIC_READY)
   9081 			break;
   9082 		delay(50);
   9083 	}
   9084 
   9085 	if ((mdic & MDIC_READY) == 0)
   9086 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9087 		    device_xname(sc->sc_dev), phy, reg);
   9088 	else if (mdic & MDIC_E)
   9089 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9090 		    device_xname(sc->sc_dev), phy, reg);
   9091 }
   9092 
   9093 /*
   9094  * wm_gmii_i82544_readreg:	[mii interface function]
   9095  *
   9096  *	Read a PHY register on the GMII.
   9097  */
   9098 static int
   9099 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9100 {
   9101 	struct wm_softc *sc = device_private(self);
   9102 	int rv;
   9103 
   9104 	if (sc->phy.acquire(sc)) {
   9105 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9106 		    __func__);
   9107 		return 0;
   9108 	}
   9109 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9110 	sc->phy.release(sc);
   9111 
   9112 	return rv;
   9113 }
   9114 
   9115 /*
   9116  * wm_gmii_i82544_writereg:	[mii interface function]
   9117  *
   9118  *	Write a PHY register on the GMII.
   9119  */
   9120 static void
   9121 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9122 {
   9123 	struct wm_softc *sc = device_private(self);
   9124 
   9125 	if (sc->phy.acquire(sc)) {
   9126 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9127 		    __func__);
   9128 	}
   9129 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9130 	sc->phy.release(sc);
   9131 }
   9132 
   9133 /*
   9134  * wm_gmii_i80003_readreg:	[mii interface function]
   9135  *
   9136  *	Read a PHY register on the kumeran
   9137  * This could be handled by the PHY layer if we didn't have to lock the
   9138  * ressource ...
   9139  */
   9140 static int
   9141 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9142 {
   9143 	struct wm_softc *sc = device_private(self);
   9144 	int rv;
   9145 
   9146 	if (phy != 1) /* only one PHY on kumeran bus */
   9147 		return 0;
   9148 
   9149 	if (sc->phy.acquire(sc)) {
   9150 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9151 		    __func__);
   9152 		return 0;
   9153 	}
   9154 
   9155 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9156 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9157 		    reg >> GG82563_PAGE_SHIFT);
   9158 	} else {
   9159 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9160 		    reg >> GG82563_PAGE_SHIFT);
   9161 	}
   9162 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9163 	delay(200);
   9164 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9165 	delay(200);
   9166 	sc->phy.release(sc);
   9167 
   9168 	return rv;
   9169 }
   9170 
   9171 /*
   9172  * wm_gmii_i80003_writereg:	[mii interface function]
   9173  *
   9174  *	Write a PHY register on the kumeran.
   9175  * This could be handled by the PHY layer if we didn't have to lock the
   9176  * ressource ...
   9177  */
   9178 static void
   9179 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9180 {
   9181 	struct wm_softc *sc = device_private(self);
   9182 
   9183 	if (phy != 1) /* only one PHY on kumeran bus */
   9184 		return;
   9185 
   9186 	if (sc->phy.acquire(sc)) {
   9187 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9188 		    __func__);
   9189 		return;
   9190 	}
   9191 
   9192 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9193 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9194 		    reg >> GG82563_PAGE_SHIFT);
   9195 	} else {
   9196 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9197 		    reg >> GG82563_PAGE_SHIFT);
   9198 	}
   9199 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9200 	delay(200);
   9201 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9202 	delay(200);
   9203 
   9204 	sc->phy.release(sc);
   9205 }
   9206 
   9207 /*
   9208  * wm_gmii_bm_readreg:	[mii interface function]
   9209  *
   9210  *	Read a PHY register on the kumeran
   9211  * This could be handled by the PHY layer if we didn't have to lock the
   9212  * ressource ...
   9213  */
   9214 static int
   9215 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9216 {
   9217 	struct wm_softc *sc = device_private(self);
   9218 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9219 	uint16_t val;
   9220 	int rv;
   9221 
   9222 	if (sc->phy.acquire(sc)) {
   9223 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9224 		    __func__);
   9225 		return 0;
   9226 	}
   9227 
   9228 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9229 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9230 		    || (reg == 31)) ? 1 : phy;
   9231 	/* Page 800 works differently than the rest so it has its own func */
   9232 	if (page == BM_WUC_PAGE) {
   9233 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9234 		rv = val;
   9235 		goto release;
   9236 	}
   9237 
   9238 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9239 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9240 		    && (sc->sc_type != WM_T_82583))
   9241 			wm_gmii_mdic_writereg(self, phy,
   9242 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9243 		else
   9244 			wm_gmii_mdic_writereg(self, phy,
   9245 			    BME1000_PHY_PAGE_SELECT, page);
   9246 	}
   9247 
   9248 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9249 
   9250 release:
   9251 	sc->phy.release(sc);
   9252 	return rv;
   9253 }
   9254 
   9255 /*
   9256  * wm_gmii_bm_writereg:	[mii interface function]
   9257  *
   9258  *	Write a PHY register on the kumeran.
   9259  * This could be handled by the PHY layer if we didn't have to lock the
   9260  * ressource ...
   9261  */
   9262 static void
   9263 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9264 {
   9265 	struct wm_softc *sc = device_private(self);
   9266 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9267 
   9268 	if (sc->phy.acquire(sc)) {
   9269 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9270 		    __func__);
   9271 		return;
   9272 	}
   9273 
   9274 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9275 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9276 		    || (reg == 31)) ? 1 : phy;
   9277 	/* Page 800 works differently than the rest so it has its own func */
   9278 	if (page == BM_WUC_PAGE) {
   9279 		uint16_t tmp;
   9280 
   9281 		tmp = val;
   9282 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9283 		goto release;
   9284 	}
   9285 
   9286 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9287 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9288 		    && (sc->sc_type != WM_T_82583))
   9289 			wm_gmii_mdic_writereg(self, phy,
   9290 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9291 		else
   9292 			wm_gmii_mdic_writereg(self, phy,
   9293 			    BME1000_PHY_PAGE_SELECT, page);
   9294 	}
   9295 
   9296 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9297 
   9298 release:
   9299 	sc->phy.release(sc);
   9300 }
   9301 
   9302 static void
   9303 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9304 {
   9305 	struct wm_softc *sc = device_private(self);
   9306 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9307 	uint16_t wuce, reg;
   9308 
   9309 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9310 		device_xname(sc->sc_dev), __func__));
   9311 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9312 	if (sc->sc_type == WM_T_PCH) {
   9313 		/* XXX e1000 driver do nothing... why? */
   9314 	}
   9315 
   9316 	/*
   9317 	 * 1) Enable PHY wakeup register first.
   9318 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9319 	 */
   9320 
   9321 	/* Set page 769 */
   9322 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9323 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9324 
   9325 	/* Read WUCE and save it */
   9326 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9327 
   9328 	reg = wuce | BM_WUC_ENABLE_BIT;
   9329 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9330 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9331 
   9332 	/* Select page 800 */
   9333 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9334 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9335 
   9336 	/*
   9337 	 * 2) Access PHY wakeup register.
   9338 	 * See e1000_access_phy_wakeup_reg_bm.
   9339 	 */
   9340 
   9341 	/* Write page 800 */
   9342 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9343 
   9344 	if (rd)
   9345 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9346 	else
   9347 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9348 
   9349 	/*
   9350 	 * 3) Disable PHY wakeup register.
   9351 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9352 	 */
   9353 	/* Set page 769 */
   9354 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9355 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9356 
   9357 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9358 }
   9359 
   9360 /*
   9361  * wm_gmii_hv_readreg:	[mii interface function]
   9362  *
   9363  *	Read a PHY register on the kumeran
   9364  * This could be handled by the PHY layer if we didn't have to lock the
   9365  * ressource ...
   9366  */
   9367 static int
   9368 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9369 {
   9370 	struct wm_softc *sc = device_private(self);
   9371 	int rv;
   9372 
   9373 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9374 		device_xname(sc->sc_dev), __func__));
   9375 	if (sc->phy.acquire(sc)) {
   9376 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9377 		    __func__);
   9378 		return 0;
   9379 	}
   9380 
   9381 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9382 	sc->phy.release(sc);
   9383 	return rv;
   9384 }
   9385 
   9386 static int
   9387 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9388 {
   9389 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9390 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9391 	uint16_t val;
   9392 	int rv;
   9393 
   9394 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9395 
   9396 	/* Page 800 works differently than the rest so it has its own func */
   9397 	if (page == BM_WUC_PAGE) {
   9398 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9399 		return val;
   9400 	}
   9401 
   9402 	/*
   9403 	 * Lower than page 768 works differently than the rest so it has its
   9404 	 * own func
   9405 	 */
   9406 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9407 		printf("gmii_hv_readreg!!!\n");
   9408 		return 0;
   9409 	}
   9410 
   9411 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9412 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9413 		    page << BME1000_PAGE_SHIFT);
   9414 	}
   9415 
   9416 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9417 	return rv;
   9418 }
   9419 
   9420 /*
   9421  * wm_gmii_hv_writereg:	[mii interface function]
   9422  *
   9423  *	Write a PHY register on the kumeran.
   9424  * This could be handled by the PHY layer if we didn't have to lock the
   9425  * ressource ...
   9426  */
   9427 static void
   9428 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9429 {
   9430 	struct wm_softc *sc = device_private(self);
   9431 
   9432 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9433 		device_xname(sc->sc_dev), __func__));
   9434 
   9435 	if (sc->phy.acquire(sc)) {
   9436 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9437 		    __func__);
   9438 		return;
   9439 	}
   9440 
   9441 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9442 	sc->phy.release(sc);
   9443 }
   9444 
   9445 static void
   9446 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9447 {
   9448 	struct wm_softc *sc = device_private(self);
   9449 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9450 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9451 
   9452 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9453 
   9454 	/* Page 800 works differently than the rest so it has its own func */
   9455 	if (page == BM_WUC_PAGE) {
   9456 		uint16_t tmp;
   9457 
   9458 		tmp = val;
   9459 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9460 		return;
   9461 	}
   9462 
   9463 	/*
   9464 	 * Lower than page 768 works differently than the rest so it has its
   9465 	 * own func
   9466 	 */
   9467 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9468 		printf("gmii_hv_writereg!!!\n");
   9469 		return;
   9470 	}
   9471 
   9472 	{
   9473 		/*
   9474 		 * XXX Workaround MDIO accesses being disabled after entering
   9475 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9476 		 * register is set)
   9477 		 */
   9478 		if (sc->sc_phytype == WMPHY_82578) {
   9479 			struct mii_softc *child;
   9480 
   9481 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9482 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9483 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9484 			    && ((val & (1 << 11)) != 0)) {
   9485 				printf("XXX need workaround\n");
   9486 			}
   9487 		}
   9488 
   9489 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9490 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9491 			    page << BME1000_PAGE_SHIFT);
   9492 		}
   9493 	}
   9494 
   9495 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9496 }
   9497 
   9498 /*
   9499  * wm_gmii_82580_readreg:	[mii interface function]
   9500  *
   9501  *	Read a PHY register on the 82580 and I350.
   9502  * This could be handled by the PHY layer if we didn't have to lock the
   9503  * ressource ...
   9504  */
   9505 static int
   9506 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9507 {
   9508 	struct wm_softc *sc = device_private(self);
   9509 	int rv;
   9510 
   9511 	if (sc->phy.acquire(sc) != 0) {
   9512 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9513 		    __func__);
   9514 		return 0;
   9515 	}
   9516 
   9517 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9518 
   9519 	sc->phy.release(sc);
   9520 	return rv;
   9521 }
   9522 
   9523 /*
   9524  * wm_gmii_82580_writereg:	[mii interface function]
   9525  *
   9526  *	Write a PHY register on the 82580 and I350.
   9527  * This could be handled by the PHY layer if we didn't have to lock the
   9528  * ressource ...
   9529  */
   9530 static void
   9531 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9532 {
   9533 	struct wm_softc *sc = device_private(self);
   9534 
   9535 	if (sc->phy.acquire(sc) != 0) {
   9536 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9537 		    __func__);
   9538 		return;
   9539 	}
   9540 
   9541 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9542 
   9543 	sc->phy.release(sc);
   9544 }
   9545 
   9546 /*
   9547  * wm_gmii_gs40g_readreg:	[mii interface function]
   9548  *
   9549  *	Read a PHY register on the I2100 and I211.
   9550  * This could be handled by the PHY layer if we didn't have to lock the
   9551  * ressource ...
   9552  */
   9553 static int
   9554 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9555 {
   9556 	struct wm_softc *sc = device_private(self);
   9557 	int page, offset;
   9558 	int rv;
   9559 
   9560 	/* Acquire semaphore */
   9561 	if (sc->phy.acquire(sc)) {
   9562 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9563 		    __func__);
   9564 		return 0;
   9565 	}
   9566 
   9567 	/* Page select */
   9568 	page = reg >> GS40G_PAGE_SHIFT;
   9569 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9570 
   9571 	/* Read reg */
   9572 	offset = reg & GS40G_OFFSET_MASK;
   9573 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9574 
   9575 	sc->phy.release(sc);
   9576 	return rv;
   9577 }
   9578 
   9579 /*
   9580  * wm_gmii_gs40g_writereg:	[mii interface function]
   9581  *
   9582  *	Write a PHY register on the I210 and I211.
   9583  * This could be handled by the PHY layer if we didn't have to lock the
   9584  * ressource ...
   9585  */
   9586 static void
   9587 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9588 {
   9589 	struct wm_softc *sc = device_private(self);
   9590 	int page, offset;
   9591 
   9592 	/* Acquire semaphore */
   9593 	if (sc->phy.acquire(sc)) {
   9594 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9595 		    __func__);
   9596 		return;
   9597 	}
   9598 
   9599 	/* Page select */
   9600 	page = reg >> GS40G_PAGE_SHIFT;
   9601 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9602 
   9603 	/* Write reg */
   9604 	offset = reg & GS40G_OFFSET_MASK;
   9605 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9606 
   9607 	/* Release semaphore */
   9608 	sc->phy.release(sc);
   9609 }
   9610 
   9611 /*
   9612  * wm_gmii_statchg:	[mii interface function]
   9613  *
   9614  *	Callback from MII layer when media changes.
   9615  */
   9616 static void
   9617 wm_gmii_statchg(struct ifnet *ifp)
   9618 {
   9619 	struct wm_softc *sc = ifp->if_softc;
   9620 	struct mii_data *mii = &sc->sc_mii;
   9621 
   9622 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9623 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9624 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9625 
   9626 	/*
   9627 	 * Get flow control negotiation result.
   9628 	 */
   9629 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9630 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9631 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9632 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9633 	}
   9634 
   9635 	if (sc->sc_flowflags & IFM_FLOW) {
   9636 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9637 			sc->sc_ctrl |= CTRL_TFCE;
   9638 			sc->sc_fcrtl |= FCRTL_XONE;
   9639 		}
   9640 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9641 			sc->sc_ctrl |= CTRL_RFCE;
   9642 	}
   9643 
   9644 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9645 		DPRINTF(WM_DEBUG_LINK,
   9646 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9647 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9648 	} else {
   9649 		DPRINTF(WM_DEBUG_LINK,
   9650 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9651 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9652 	}
   9653 
   9654 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9655 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9656 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9657 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9658 	if (sc->sc_type == WM_T_80003) {
   9659 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9660 		case IFM_1000_T:
   9661 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9662 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9663 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9664 			break;
   9665 		default:
   9666 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9667 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9668 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9669 			break;
   9670 		}
   9671 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9672 	}
   9673 }
   9674 
   9675 /* kumeran related (80003, ICH* and PCH*) */
   9676 
   9677 /*
   9678  * wm_kmrn_readreg:
   9679  *
   9680  *	Read a kumeran register
   9681  */
   9682 static int
   9683 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9684 {
   9685 	int rv;
   9686 
   9687 	if (sc->sc_type == WM_T_80003)
   9688 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9689 	else
   9690 		rv = sc->phy.acquire(sc);
   9691 	if (rv != 0) {
   9692 		aprint_error_dev(sc->sc_dev,
   9693 		    "%s: failed to get semaphore\n", __func__);
   9694 		return 0;
   9695 	}
   9696 
   9697 	rv = wm_kmrn_readreg_locked(sc, reg);
   9698 
   9699 	if (sc->sc_type == WM_T_80003)
   9700 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9701 	else
   9702 		sc->phy.release(sc);
   9703 
   9704 	return rv;
   9705 }
   9706 
   9707 static int
   9708 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9709 {
   9710 	int rv;
   9711 
   9712 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9713 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9714 	    KUMCTRLSTA_REN);
   9715 	CSR_WRITE_FLUSH(sc);
   9716 	delay(2);
   9717 
   9718 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9719 
   9720 	return rv;
   9721 }
   9722 
   9723 /*
   9724  * wm_kmrn_writereg:
   9725  *
   9726  *	Write a kumeran register
   9727  */
   9728 static void
   9729 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9730 {
   9731 	int rv;
   9732 
   9733 	if (sc->sc_type == WM_T_80003)
   9734 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9735 	else
   9736 		rv = sc->phy.acquire(sc);
   9737 	if (rv != 0) {
   9738 		aprint_error_dev(sc->sc_dev,
   9739 		    "%s: failed to get semaphore\n", __func__);
   9740 		return;
   9741 	}
   9742 
   9743 	wm_kmrn_writereg_locked(sc, reg, val);
   9744 
   9745 	if (sc->sc_type == WM_T_80003)
   9746 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9747 	else
   9748 		sc->phy.release(sc);
   9749 }
   9750 
   9751 static void
   9752 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9753 {
   9754 
   9755 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9756 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9757 	    (val & KUMCTRLSTA_MASK));
   9758 }
   9759 
   9760 /* SGMII related */
   9761 
   9762 /*
   9763  * wm_sgmii_uses_mdio
   9764  *
   9765  * Check whether the transaction is to the internal PHY or the external
   9766  * MDIO interface. Return true if it's MDIO.
   9767  */
   9768 static bool
   9769 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9770 {
   9771 	uint32_t reg;
   9772 	bool ismdio = false;
   9773 
   9774 	switch (sc->sc_type) {
   9775 	case WM_T_82575:
   9776 	case WM_T_82576:
   9777 		reg = CSR_READ(sc, WMREG_MDIC);
   9778 		ismdio = ((reg & MDIC_DEST) != 0);
   9779 		break;
   9780 	case WM_T_82580:
   9781 	case WM_T_I350:
   9782 	case WM_T_I354:
   9783 	case WM_T_I210:
   9784 	case WM_T_I211:
   9785 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9786 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9787 		break;
   9788 	default:
   9789 		break;
   9790 	}
   9791 
   9792 	return ismdio;
   9793 }
   9794 
   9795 /*
   9796  * wm_sgmii_readreg:	[mii interface function]
   9797  *
   9798  *	Read a PHY register on the SGMII
   9799  * This could be handled by the PHY layer if we didn't have to lock the
   9800  * ressource ...
   9801  */
   9802 static int
   9803 wm_sgmii_readreg(device_t self, int phy, int reg)
   9804 {
   9805 	struct wm_softc *sc = device_private(self);
   9806 	uint32_t i2ccmd;
   9807 	int i, rv;
   9808 
   9809 	if (sc->phy.acquire(sc)) {
   9810 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9811 		    __func__);
   9812 		return 0;
   9813 	}
   9814 
   9815 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9816 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9817 	    | I2CCMD_OPCODE_READ;
   9818 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9819 
   9820 	/* Poll the ready bit */
   9821 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9822 		delay(50);
   9823 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9824 		if (i2ccmd & I2CCMD_READY)
   9825 			break;
   9826 	}
   9827 	if ((i2ccmd & I2CCMD_READY) == 0)
   9828 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9829 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9830 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9831 
   9832 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9833 
   9834 	sc->phy.release(sc);
   9835 	return rv;
   9836 }
   9837 
   9838 /*
   9839  * wm_sgmii_writereg:	[mii interface function]
   9840  *
   9841  *	Write a PHY register on the SGMII.
   9842  * This could be handled by the PHY layer if we didn't have to lock the
   9843  * ressource ...
   9844  */
   9845 static void
   9846 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9847 {
   9848 	struct wm_softc *sc = device_private(self);
   9849 	uint32_t i2ccmd;
   9850 	int i;
   9851 	int val_swapped;
   9852 
   9853 	if (sc->phy.acquire(sc) != 0) {
   9854 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9855 		    __func__);
   9856 		return;
   9857 	}
   9858 	/* Swap the data bytes for the I2C interface */
   9859 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9860 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9861 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9862 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9863 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9864 
   9865 	/* Poll the ready bit */
   9866 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9867 		delay(50);
   9868 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9869 		if (i2ccmd & I2CCMD_READY)
   9870 			break;
   9871 	}
   9872 	if ((i2ccmd & I2CCMD_READY) == 0)
   9873 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9874 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9875 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9876 
   9877 	sc->phy.release(sc);
   9878 }
   9879 
   9880 /* TBI related */
   9881 
   9882 /*
   9883  * wm_tbi_mediainit:
   9884  *
   9885  *	Initialize media for use on 1000BASE-X devices.
   9886  */
   9887 static void
   9888 wm_tbi_mediainit(struct wm_softc *sc)
   9889 {
   9890 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9891 	const char *sep = "";
   9892 
   9893 	if (sc->sc_type < WM_T_82543)
   9894 		sc->sc_tipg = TIPG_WM_DFLT;
   9895 	else
   9896 		sc->sc_tipg = TIPG_LG_DFLT;
   9897 
   9898 	sc->sc_tbi_serdes_anegticks = 5;
   9899 
   9900 	/* Initialize our media structures */
   9901 	sc->sc_mii.mii_ifp = ifp;
   9902 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9903 
   9904 	if ((sc->sc_type >= WM_T_82575)
   9905 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9906 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9907 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9908 	else
   9909 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9910 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9911 
   9912 	/*
   9913 	 * SWD Pins:
   9914 	 *
   9915 	 *	0 = Link LED (output)
   9916 	 *	1 = Loss Of Signal (input)
   9917 	 */
   9918 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9919 
   9920 	/* XXX Perhaps this is only for TBI */
   9921 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9922 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9923 
   9924 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9925 		sc->sc_ctrl &= ~CTRL_LRST;
   9926 
   9927 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9928 
   9929 #define	ADD(ss, mm, dd)							\
   9930 do {									\
   9931 	aprint_normal("%s%s", sep, ss);					\
   9932 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9933 	sep = ", ";							\
   9934 } while (/*CONSTCOND*/0)
   9935 
   9936 	aprint_normal_dev(sc->sc_dev, "");
   9937 
   9938 	if (sc->sc_type == WM_T_I354) {
   9939 		uint32_t status;
   9940 
   9941 		status = CSR_READ(sc, WMREG_STATUS);
   9942 		if (((status & STATUS_2P5_SKU) != 0)
   9943 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   9944 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   9945 		} else
   9946 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   9947 	} else if (sc->sc_type == WM_T_82545) {
   9948 		/* Only 82545 is LX (XXX except SFP) */
   9949 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9950 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9951 	} else {
   9952 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9953 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9954 	}
   9955 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9956 	aprint_normal("\n");
   9957 
   9958 #undef ADD
   9959 
   9960 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9961 }
   9962 
   9963 /*
   9964  * wm_tbi_mediachange:	[ifmedia interface function]
   9965  *
   9966  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9967  */
   9968 static int
   9969 wm_tbi_mediachange(struct ifnet *ifp)
   9970 {
   9971 	struct wm_softc *sc = ifp->if_softc;
   9972 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9973 	uint32_t status;
   9974 	int i;
   9975 
   9976 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9977 		/* XXX need some work for >= 82571 and < 82575 */
   9978 		if (sc->sc_type < WM_T_82575)
   9979 			return 0;
   9980 	}
   9981 
   9982 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9983 	    || (sc->sc_type >= WM_T_82575))
   9984 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9985 
   9986 	sc->sc_ctrl &= ~CTRL_LRST;
   9987 	sc->sc_txcw = TXCW_ANE;
   9988 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9989 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9990 	else if (ife->ifm_media & IFM_FDX)
   9991 		sc->sc_txcw |= TXCW_FD;
   9992 	else
   9993 		sc->sc_txcw |= TXCW_HD;
   9994 
   9995 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9996 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9997 
   9998 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9999 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10000 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10001 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10002 	CSR_WRITE_FLUSH(sc);
   10003 	delay(1000);
   10004 
   10005 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10006 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10007 
   10008 	/*
   10009 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10010 	 * optics detect a signal, 0 if they don't.
   10011 	 */
   10012 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10013 		/* Have signal; wait for the link to come up. */
   10014 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10015 			delay(10000);
   10016 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10017 				break;
   10018 		}
   10019 
   10020 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10021 			    device_xname(sc->sc_dev),i));
   10022 
   10023 		status = CSR_READ(sc, WMREG_STATUS);
   10024 		DPRINTF(WM_DEBUG_LINK,
   10025 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10026 			device_xname(sc->sc_dev),status, STATUS_LU));
   10027 		if (status & STATUS_LU) {
   10028 			/* Link is up. */
   10029 			DPRINTF(WM_DEBUG_LINK,
   10030 			    ("%s: LINK: set media -> link up %s\n",
   10031 			    device_xname(sc->sc_dev),
   10032 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10033 
   10034 			/*
   10035 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10036 			 * so we should update sc->sc_ctrl
   10037 			 */
   10038 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10039 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10040 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10041 			if (status & STATUS_FD)
   10042 				sc->sc_tctl |=
   10043 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10044 			else
   10045 				sc->sc_tctl |=
   10046 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10047 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10048 				sc->sc_fcrtl |= FCRTL_XONE;
   10049 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10050 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10051 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10052 				      sc->sc_fcrtl);
   10053 			sc->sc_tbi_linkup = 1;
   10054 		} else {
   10055 			if (i == WM_LINKUP_TIMEOUT)
   10056 				wm_check_for_link(sc);
   10057 			/* Link is down. */
   10058 			DPRINTF(WM_DEBUG_LINK,
   10059 			    ("%s: LINK: set media -> link down\n",
   10060 			    device_xname(sc->sc_dev)));
   10061 			sc->sc_tbi_linkup = 0;
   10062 		}
   10063 	} else {
   10064 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10065 		    device_xname(sc->sc_dev)));
   10066 		sc->sc_tbi_linkup = 0;
   10067 	}
   10068 
   10069 	wm_tbi_serdes_set_linkled(sc);
   10070 
   10071 	return 0;
   10072 }
   10073 
   10074 /*
   10075  * wm_tbi_mediastatus:	[ifmedia interface function]
   10076  *
   10077  *	Get the current interface media status on a 1000BASE-X device.
   10078  */
   10079 static void
   10080 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10081 {
   10082 	struct wm_softc *sc = ifp->if_softc;
   10083 	uint32_t ctrl, status;
   10084 
   10085 	ifmr->ifm_status = IFM_AVALID;
   10086 	ifmr->ifm_active = IFM_ETHER;
   10087 
   10088 	status = CSR_READ(sc, WMREG_STATUS);
   10089 	if ((status & STATUS_LU) == 0) {
   10090 		ifmr->ifm_active |= IFM_NONE;
   10091 		return;
   10092 	}
   10093 
   10094 	ifmr->ifm_status |= IFM_ACTIVE;
   10095 	/* Only 82545 is LX */
   10096 	if (sc->sc_type == WM_T_82545)
   10097 		ifmr->ifm_active |= IFM_1000_LX;
   10098 	else
   10099 		ifmr->ifm_active |= IFM_1000_SX;
   10100 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10101 		ifmr->ifm_active |= IFM_FDX;
   10102 	else
   10103 		ifmr->ifm_active |= IFM_HDX;
   10104 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10105 	if (ctrl & CTRL_RFCE)
   10106 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10107 	if (ctrl & CTRL_TFCE)
   10108 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10109 }
   10110 
   10111 /* XXX TBI only */
   10112 static int
   10113 wm_check_for_link(struct wm_softc *sc)
   10114 {
   10115 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10116 	uint32_t rxcw;
   10117 	uint32_t ctrl;
   10118 	uint32_t status;
   10119 	uint32_t sig;
   10120 
   10121 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10122 		/* XXX need some work for >= 82571 */
   10123 		if (sc->sc_type >= WM_T_82571) {
   10124 			sc->sc_tbi_linkup = 1;
   10125 			return 0;
   10126 		}
   10127 	}
   10128 
   10129 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10130 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10131 	status = CSR_READ(sc, WMREG_STATUS);
   10132 
   10133 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10134 
   10135 	DPRINTF(WM_DEBUG_LINK,
   10136 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10137 		device_xname(sc->sc_dev), __func__,
   10138 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10139 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10140 
   10141 	/*
   10142 	 * SWDPIN   LU RXCW
   10143 	 *      0    0    0
   10144 	 *      0    0    1	(should not happen)
   10145 	 *      0    1    0	(should not happen)
   10146 	 *      0    1    1	(should not happen)
   10147 	 *      1    0    0	Disable autonego and force linkup
   10148 	 *      1    0    1	got /C/ but not linkup yet
   10149 	 *      1    1    0	(linkup)
   10150 	 *      1    1    1	If IFM_AUTO, back to autonego
   10151 	 *
   10152 	 */
   10153 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10154 	    && ((status & STATUS_LU) == 0)
   10155 	    && ((rxcw & RXCW_C) == 0)) {
   10156 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10157 			__func__));
   10158 		sc->sc_tbi_linkup = 0;
   10159 		/* Disable auto-negotiation in the TXCW register */
   10160 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10161 
   10162 		/*
   10163 		 * Force link-up and also force full-duplex.
   10164 		 *
   10165 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10166 		 * so we should update sc->sc_ctrl
   10167 		 */
   10168 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10169 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10170 	} else if (((status & STATUS_LU) != 0)
   10171 	    && ((rxcw & RXCW_C) != 0)
   10172 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10173 		sc->sc_tbi_linkup = 1;
   10174 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10175 			__func__));
   10176 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10177 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10178 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10179 	    && ((rxcw & RXCW_C) != 0)) {
   10180 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10181 	} else {
   10182 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10183 			status));
   10184 	}
   10185 
   10186 	return 0;
   10187 }
   10188 
   10189 /*
   10190  * wm_tbi_tick:
   10191  *
   10192  *	Check the link on TBI devices.
   10193  *	This function acts as mii_tick().
   10194  */
   10195 static void
   10196 wm_tbi_tick(struct wm_softc *sc)
   10197 {
   10198 	struct mii_data *mii = &sc->sc_mii;
   10199 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10200 	uint32_t status;
   10201 
   10202 	KASSERT(WM_CORE_LOCKED(sc));
   10203 
   10204 	status = CSR_READ(sc, WMREG_STATUS);
   10205 
   10206 	/* XXX is this needed? */
   10207 	(void)CSR_READ(sc, WMREG_RXCW);
   10208 	(void)CSR_READ(sc, WMREG_CTRL);
   10209 
   10210 	/* set link status */
   10211 	if ((status & STATUS_LU) == 0) {
   10212 		DPRINTF(WM_DEBUG_LINK,
   10213 		    ("%s: LINK: checklink -> down\n",
   10214 			device_xname(sc->sc_dev)));
   10215 		sc->sc_tbi_linkup = 0;
   10216 	} else if (sc->sc_tbi_linkup == 0) {
   10217 		DPRINTF(WM_DEBUG_LINK,
   10218 		    ("%s: LINK: checklink -> up %s\n",
   10219 			device_xname(sc->sc_dev),
   10220 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10221 		sc->sc_tbi_linkup = 1;
   10222 		sc->sc_tbi_serdes_ticks = 0;
   10223 	}
   10224 
   10225 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10226 		goto setled;
   10227 
   10228 	if ((status & STATUS_LU) == 0) {
   10229 		sc->sc_tbi_linkup = 0;
   10230 		/* If the timer expired, retry autonegotiation */
   10231 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10232 		    && (++sc->sc_tbi_serdes_ticks
   10233 			>= sc->sc_tbi_serdes_anegticks)) {
   10234 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10235 			sc->sc_tbi_serdes_ticks = 0;
   10236 			/*
   10237 			 * Reset the link, and let autonegotiation do
   10238 			 * its thing
   10239 			 */
   10240 			sc->sc_ctrl |= CTRL_LRST;
   10241 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10242 			CSR_WRITE_FLUSH(sc);
   10243 			delay(1000);
   10244 			sc->sc_ctrl &= ~CTRL_LRST;
   10245 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10246 			CSR_WRITE_FLUSH(sc);
   10247 			delay(1000);
   10248 			CSR_WRITE(sc, WMREG_TXCW,
   10249 			    sc->sc_txcw & ~TXCW_ANE);
   10250 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10251 		}
   10252 	}
   10253 
   10254 setled:
   10255 	wm_tbi_serdes_set_linkled(sc);
   10256 }
   10257 
   10258 /* SERDES related */
   10259 static void
   10260 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10261 {
   10262 	uint32_t reg;
   10263 
   10264 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10265 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10266 		return;
   10267 
   10268 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10269 	reg |= PCS_CFG_PCS_EN;
   10270 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10271 
   10272 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10273 	reg &= ~CTRL_EXT_SWDPIN(3);
   10274 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10275 	CSR_WRITE_FLUSH(sc);
   10276 }
   10277 
   10278 static int
   10279 wm_serdes_mediachange(struct ifnet *ifp)
   10280 {
   10281 	struct wm_softc *sc = ifp->if_softc;
   10282 	bool pcs_autoneg = true; /* XXX */
   10283 	uint32_t ctrl_ext, pcs_lctl, reg;
   10284 
   10285 	/* XXX Currently, this function is not called on 8257[12] */
   10286 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10287 	    || (sc->sc_type >= WM_T_82575))
   10288 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10289 
   10290 	wm_serdes_power_up_link_82575(sc);
   10291 
   10292 	sc->sc_ctrl |= CTRL_SLU;
   10293 
   10294 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10295 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10296 
   10297 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10298 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10299 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10300 	case CTRL_EXT_LINK_MODE_SGMII:
   10301 		pcs_autoneg = true;
   10302 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10303 		break;
   10304 	case CTRL_EXT_LINK_MODE_1000KX:
   10305 		pcs_autoneg = false;
   10306 		/* FALLTHROUGH */
   10307 	default:
   10308 		if ((sc->sc_type == WM_T_82575)
   10309 		    || (sc->sc_type == WM_T_82576)) {
   10310 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10311 				pcs_autoneg = false;
   10312 		}
   10313 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10314 		    | CTRL_FRCFDX;
   10315 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10316 	}
   10317 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10318 
   10319 	if (pcs_autoneg) {
   10320 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10321 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10322 
   10323 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10324 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10325 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10326 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10327 	} else
   10328 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10329 
   10330 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10331 
   10332 
   10333 	return 0;
   10334 }
   10335 
   10336 static void
   10337 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10338 {
   10339 	struct wm_softc *sc = ifp->if_softc;
   10340 	struct mii_data *mii = &sc->sc_mii;
   10341 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10342 	uint32_t pcs_adv, pcs_lpab, reg;
   10343 
   10344 	ifmr->ifm_status = IFM_AVALID;
   10345 	ifmr->ifm_active = IFM_ETHER;
   10346 
   10347 	/* Check PCS */
   10348 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10349 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10350 		ifmr->ifm_active |= IFM_NONE;
   10351 		sc->sc_tbi_linkup = 0;
   10352 		goto setled;
   10353 	}
   10354 
   10355 	sc->sc_tbi_linkup = 1;
   10356 	ifmr->ifm_status |= IFM_ACTIVE;
   10357 	if (sc->sc_type == WM_T_I354) {
   10358 		uint32_t status;
   10359 
   10360 		status = CSR_READ(sc, WMREG_STATUS);
   10361 		if (((status & STATUS_2P5_SKU) != 0)
   10362 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10363 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10364 		} else
   10365 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10366 	} else {
   10367 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10368 		case PCS_LSTS_SPEED_10:
   10369 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10370 			break;
   10371 		case PCS_LSTS_SPEED_100:
   10372 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10373 			break;
   10374 		case PCS_LSTS_SPEED_1000:
   10375 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10376 			break;
   10377 		default:
   10378 			device_printf(sc->sc_dev, "Unknown speed\n");
   10379 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10380 			break;
   10381 		}
   10382 	}
   10383 	if ((reg & PCS_LSTS_FDX) != 0)
   10384 		ifmr->ifm_active |= IFM_FDX;
   10385 	else
   10386 		ifmr->ifm_active |= IFM_HDX;
   10387 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10388 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10389 		/* Check flow */
   10390 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10391 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10392 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10393 			goto setled;
   10394 		}
   10395 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10396 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10397 		DPRINTF(WM_DEBUG_LINK,
   10398 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10399 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10400 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10401 			mii->mii_media_active |= IFM_FLOW
   10402 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10403 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10404 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10405 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10406 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10407 			mii->mii_media_active |= IFM_FLOW
   10408 			    | IFM_ETH_TXPAUSE;
   10409 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10410 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10411 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10412 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10413 			mii->mii_media_active |= IFM_FLOW
   10414 			    | IFM_ETH_RXPAUSE;
   10415 		}
   10416 	}
   10417 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10418 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10419 setled:
   10420 	wm_tbi_serdes_set_linkled(sc);
   10421 }
   10422 
   10423 /*
   10424  * wm_serdes_tick:
   10425  *
   10426  *	Check the link on serdes devices.
   10427  */
   10428 static void
   10429 wm_serdes_tick(struct wm_softc *sc)
   10430 {
   10431 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10432 	struct mii_data *mii = &sc->sc_mii;
   10433 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10434 	uint32_t reg;
   10435 
   10436 	KASSERT(WM_CORE_LOCKED(sc));
   10437 
   10438 	mii->mii_media_status = IFM_AVALID;
   10439 	mii->mii_media_active = IFM_ETHER;
   10440 
   10441 	/* Check PCS */
   10442 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10443 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10444 		mii->mii_media_status |= IFM_ACTIVE;
   10445 		sc->sc_tbi_linkup = 1;
   10446 		sc->sc_tbi_serdes_ticks = 0;
   10447 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10448 		if ((reg & PCS_LSTS_FDX) != 0)
   10449 			mii->mii_media_active |= IFM_FDX;
   10450 		else
   10451 			mii->mii_media_active |= IFM_HDX;
   10452 	} else {
   10453 		mii->mii_media_status |= IFM_NONE;
   10454 		sc->sc_tbi_linkup = 0;
   10455 		/* If the timer expired, retry autonegotiation */
   10456 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10457 		    && (++sc->sc_tbi_serdes_ticks
   10458 			>= sc->sc_tbi_serdes_anegticks)) {
   10459 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10460 			sc->sc_tbi_serdes_ticks = 0;
   10461 			/* XXX */
   10462 			wm_serdes_mediachange(ifp);
   10463 		}
   10464 	}
   10465 
   10466 	wm_tbi_serdes_set_linkled(sc);
   10467 }
   10468 
   10469 /* SFP related */
   10470 
   10471 static int
   10472 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10473 {
   10474 	uint32_t i2ccmd;
   10475 	int i;
   10476 
   10477 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10478 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10479 
   10480 	/* Poll the ready bit */
   10481 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10482 		delay(50);
   10483 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10484 		if (i2ccmd & I2CCMD_READY)
   10485 			break;
   10486 	}
   10487 	if ((i2ccmd & I2CCMD_READY) == 0)
   10488 		return -1;
   10489 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10490 		return -1;
   10491 
   10492 	*data = i2ccmd & 0x00ff;
   10493 
   10494 	return 0;
   10495 }
   10496 
   10497 static uint32_t
   10498 wm_sfp_get_media_type(struct wm_softc *sc)
   10499 {
   10500 	uint32_t ctrl_ext;
   10501 	uint8_t val = 0;
   10502 	int timeout = 3;
   10503 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10504 	int rv = -1;
   10505 
   10506 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10507 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10508 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10509 	CSR_WRITE_FLUSH(sc);
   10510 
   10511 	/* Read SFP module data */
   10512 	while (timeout) {
   10513 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10514 		if (rv == 0)
   10515 			break;
   10516 		delay(100*1000); /* XXX too big */
   10517 		timeout--;
   10518 	}
   10519 	if (rv != 0)
   10520 		goto out;
   10521 	switch (val) {
   10522 	case SFF_SFP_ID_SFF:
   10523 		aprint_normal_dev(sc->sc_dev,
   10524 		    "Module/Connector soldered to board\n");
   10525 		break;
   10526 	case SFF_SFP_ID_SFP:
   10527 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10528 		break;
   10529 	case SFF_SFP_ID_UNKNOWN:
   10530 		goto out;
   10531 	default:
   10532 		break;
   10533 	}
   10534 
   10535 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10536 	if (rv != 0) {
   10537 		goto out;
   10538 	}
   10539 
   10540 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10541 		mediatype = WM_MEDIATYPE_SERDES;
   10542 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10543 		sc->sc_flags |= WM_F_SGMII;
   10544 		mediatype = WM_MEDIATYPE_COPPER;
   10545 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10546 		sc->sc_flags |= WM_F_SGMII;
   10547 		mediatype = WM_MEDIATYPE_SERDES;
   10548 	}
   10549 
   10550 out:
   10551 	/* Restore I2C interface setting */
   10552 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10553 
   10554 	return mediatype;
   10555 }
   10556 
   10557 /*
   10558  * NVM related.
   10559  * Microwire, SPI (w/wo EERD) and Flash.
   10560  */
   10561 
   10562 /* Both spi and uwire */
   10563 
   10564 /*
   10565  * wm_eeprom_sendbits:
   10566  *
   10567  *	Send a series of bits to the EEPROM.
   10568  */
   10569 static void
   10570 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10571 {
   10572 	uint32_t reg;
   10573 	int x;
   10574 
   10575 	reg = CSR_READ(sc, WMREG_EECD);
   10576 
   10577 	for (x = nbits; x > 0; x--) {
   10578 		if (bits & (1U << (x - 1)))
   10579 			reg |= EECD_DI;
   10580 		else
   10581 			reg &= ~EECD_DI;
   10582 		CSR_WRITE(sc, WMREG_EECD, reg);
   10583 		CSR_WRITE_FLUSH(sc);
   10584 		delay(2);
   10585 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10586 		CSR_WRITE_FLUSH(sc);
   10587 		delay(2);
   10588 		CSR_WRITE(sc, WMREG_EECD, reg);
   10589 		CSR_WRITE_FLUSH(sc);
   10590 		delay(2);
   10591 	}
   10592 }
   10593 
   10594 /*
   10595  * wm_eeprom_recvbits:
   10596  *
   10597  *	Receive a series of bits from the EEPROM.
   10598  */
   10599 static void
   10600 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10601 {
   10602 	uint32_t reg, val;
   10603 	int x;
   10604 
   10605 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10606 
   10607 	val = 0;
   10608 	for (x = nbits; x > 0; x--) {
   10609 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10610 		CSR_WRITE_FLUSH(sc);
   10611 		delay(2);
   10612 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10613 			val |= (1U << (x - 1));
   10614 		CSR_WRITE(sc, WMREG_EECD, reg);
   10615 		CSR_WRITE_FLUSH(sc);
   10616 		delay(2);
   10617 	}
   10618 	*valp = val;
   10619 }
   10620 
   10621 /* Microwire */
   10622 
   10623 /*
   10624  * wm_nvm_read_uwire:
   10625  *
   10626  *	Read a word from the EEPROM using the MicroWire protocol.
   10627  */
   10628 static int
   10629 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10630 {
   10631 	uint32_t reg, val;
   10632 	int i;
   10633 
   10634 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10635 		device_xname(sc->sc_dev), __func__));
   10636 
   10637 	for (i = 0; i < wordcnt; i++) {
   10638 		/* Clear SK and DI. */
   10639 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10640 		CSR_WRITE(sc, WMREG_EECD, reg);
   10641 
   10642 		/*
   10643 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10644 		 * and Xen.
   10645 		 *
   10646 		 * We use this workaround only for 82540 because qemu's
   10647 		 * e1000 act as 82540.
   10648 		 */
   10649 		if (sc->sc_type == WM_T_82540) {
   10650 			reg |= EECD_SK;
   10651 			CSR_WRITE(sc, WMREG_EECD, reg);
   10652 			reg &= ~EECD_SK;
   10653 			CSR_WRITE(sc, WMREG_EECD, reg);
   10654 			CSR_WRITE_FLUSH(sc);
   10655 			delay(2);
   10656 		}
   10657 		/* XXX: end of workaround */
   10658 
   10659 		/* Set CHIP SELECT. */
   10660 		reg |= EECD_CS;
   10661 		CSR_WRITE(sc, WMREG_EECD, reg);
   10662 		CSR_WRITE_FLUSH(sc);
   10663 		delay(2);
   10664 
   10665 		/* Shift in the READ command. */
   10666 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10667 
   10668 		/* Shift in address. */
   10669 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10670 
   10671 		/* Shift out the data. */
   10672 		wm_eeprom_recvbits(sc, &val, 16);
   10673 		data[i] = val & 0xffff;
   10674 
   10675 		/* Clear CHIP SELECT. */
   10676 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10677 		CSR_WRITE(sc, WMREG_EECD, reg);
   10678 		CSR_WRITE_FLUSH(sc);
   10679 		delay(2);
   10680 	}
   10681 
   10682 	return 0;
   10683 }
   10684 
   10685 /* SPI */
   10686 
   10687 /*
   10688  * Set SPI and FLASH related information from the EECD register.
   10689  * For 82541 and 82547, the word size is taken from EEPROM.
   10690  */
   10691 static int
   10692 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10693 {
   10694 	int size;
   10695 	uint32_t reg;
   10696 	uint16_t data;
   10697 
   10698 	reg = CSR_READ(sc, WMREG_EECD);
   10699 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10700 
   10701 	/* Read the size of NVM from EECD by default */
   10702 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10703 	switch (sc->sc_type) {
   10704 	case WM_T_82541:
   10705 	case WM_T_82541_2:
   10706 	case WM_T_82547:
   10707 	case WM_T_82547_2:
   10708 		/* Set dummy value to access EEPROM */
   10709 		sc->sc_nvm_wordsize = 64;
   10710 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10711 		reg = data;
   10712 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10713 		if (size == 0)
   10714 			size = 6; /* 64 word size */
   10715 		else
   10716 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10717 		break;
   10718 	case WM_T_80003:
   10719 	case WM_T_82571:
   10720 	case WM_T_82572:
   10721 	case WM_T_82573: /* SPI case */
   10722 	case WM_T_82574: /* SPI case */
   10723 	case WM_T_82583: /* SPI case */
   10724 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10725 		if (size > 14)
   10726 			size = 14;
   10727 		break;
   10728 	case WM_T_82575:
   10729 	case WM_T_82576:
   10730 	case WM_T_82580:
   10731 	case WM_T_I350:
   10732 	case WM_T_I354:
   10733 	case WM_T_I210:
   10734 	case WM_T_I211:
   10735 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10736 		if (size > 15)
   10737 			size = 15;
   10738 		break;
   10739 	default:
   10740 		aprint_error_dev(sc->sc_dev,
   10741 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10742 		return -1;
   10743 		break;
   10744 	}
   10745 
   10746 	sc->sc_nvm_wordsize = 1 << size;
   10747 
   10748 	return 0;
   10749 }
   10750 
   10751 /*
   10752  * wm_nvm_ready_spi:
   10753  *
   10754  *	Wait for a SPI EEPROM to be ready for commands.
   10755  */
   10756 static int
   10757 wm_nvm_ready_spi(struct wm_softc *sc)
   10758 {
   10759 	uint32_t val;
   10760 	int usec;
   10761 
   10762 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10763 		device_xname(sc->sc_dev), __func__));
   10764 
   10765 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10766 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10767 		wm_eeprom_recvbits(sc, &val, 8);
   10768 		if ((val & SPI_SR_RDY) == 0)
   10769 			break;
   10770 	}
   10771 	if (usec >= SPI_MAX_RETRIES) {
   10772 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10773 		return 1;
   10774 	}
   10775 	return 0;
   10776 }
   10777 
   10778 /*
   10779  * wm_nvm_read_spi:
   10780  *
   10781  *	Read a work from the EEPROM using the SPI protocol.
   10782  */
   10783 static int
   10784 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10785 {
   10786 	uint32_t reg, val;
   10787 	int i;
   10788 	uint8_t opc;
   10789 
   10790 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10791 		device_xname(sc->sc_dev), __func__));
   10792 
   10793 	/* Clear SK and CS. */
   10794 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10795 	CSR_WRITE(sc, WMREG_EECD, reg);
   10796 	CSR_WRITE_FLUSH(sc);
   10797 	delay(2);
   10798 
   10799 	if (wm_nvm_ready_spi(sc))
   10800 		return 1;
   10801 
   10802 	/* Toggle CS to flush commands. */
   10803 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10804 	CSR_WRITE_FLUSH(sc);
   10805 	delay(2);
   10806 	CSR_WRITE(sc, WMREG_EECD, reg);
   10807 	CSR_WRITE_FLUSH(sc);
   10808 	delay(2);
   10809 
   10810 	opc = SPI_OPC_READ;
   10811 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10812 		opc |= SPI_OPC_A8;
   10813 
   10814 	wm_eeprom_sendbits(sc, opc, 8);
   10815 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10816 
   10817 	for (i = 0; i < wordcnt; i++) {
   10818 		wm_eeprom_recvbits(sc, &val, 16);
   10819 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10820 	}
   10821 
   10822 	/* Raise CS and clear SK. */
   10823 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10824 	CSR_WRITE(sc, WMREG_EECD, reg);
   10825 	CSR_WRITE_FLUSH(sc);
   10826 	delay(2);
   10827 
   10828 	return 0;
   10829 }
   10830 
   10831 /* Using with EERD */
   10832 
   10833 static int
   10834 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10835 {
   10836 	uint32_t attempts = 100000;
   10837 	uint32_t i, reg = 0;
   10838 	int32_t done = -1;
   10839 
   10840 	for (i = 0; i < attempts; i++) {
   10841 		reg = CSR_READ(sc, rw);
   10842 
   10843 		if (reg & EERD_DONE) {
   10844 			done = 0;
   10845 			break;
   10846 		}
   10847 		delay(5);
   10848 	}
   10849 
   10850 	return done;
   10851 }
   10852 
   10853 static int
   10854 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10855     uint16_t *data)
   10856 {
   10857 	int i, eerd = 0;
   10858 	int error = 0;
   10859 
   10860 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10861 		device_xname(sc->sc_dev), __func__));
   10862 
   10863 	for (i = 0; i < wordcnt; i++) {
   10864 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10865 
   10866 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10867 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10868 		if (error != 0)
   10869 			break;
   10870 
   10871 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10872 	}
   10873 
   10874 	return error;
   10875 }
   10876 
   10877 /* Flash */
   10878 
   10879 static int
   10880 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10881 {
   10882 	uint32_t eecd;
   10883 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10884 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10885 	uint8_t sig_byte = 0;
   10886 
   10887 	switch (sc->sc_type) {
   10888 	case WM_T_PCH_SPT:
   10889 		/*
   10890 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10891 		 * sector valid bits from the NVM.
   10892 		 */
   10893 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10894 		if ((*bank == 0) || (*bank == 1)) {
   10895 			aprint_error_dev(sc->sc_dev,
   10896 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10897 				*bank);
   10898 			return -1;
   10899 		} else {
   10900 			*bank = *bank - 2;
   10901 			return 0;
   10902 		}
   10903 	case WM_T_ICH8:
   10904 	case WM_T_ICH9:
   10905 		eecd = CSR_READ(sc, WMREG_EECD);
   10906 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10907 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10908 			return 0;
   10909 		}
   10910 		/* FALLTHROUGH */
   10911 	default:
   10912 		/* Default to 0 */
   10913 		*bank = 0;
   10914 
   10915 		/* Check bank 0 */
   10916 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10917 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10918 			*bank = 0;
   10919 			return 0;
   10920 		}
   10921 
   10922 		/* Check bank 1 */
   10923 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10924 		    &sig_byte);
   10925 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10926 			*bank = 1;
   10927 			return 0;
   10928 		}
   10929 	}
   10930 
   10931 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10932 		device_xname(sc->sc_dev)));
   10933 	return -1;
   10934 }
   10935 
   10936 /******************************************************************************
   10937  * This function does initial flash setup so that a new read/write/erase cycle
   10938  * can be started.
   10939  *
   10940  * sc - The pointer to the hw structure
   10941  ****************************************************************************/
   10942 static int32_t
   10943 wm_ich8_cycle_init(struct wm_softc *sc)
   10944 {
   10945 	uint16_t hsfsts;
   10946 	int32_t error = 1;
   10947 	int32_t i     = 0;
   10948 
   10949 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10950 
   10951 	/* May be check the Flash Des Valid bit in Hw status */
   10952 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10953 		return error;
   10954 	}
   10955 
   10956 	/* Clear FCERR in Hw status by writing 1 */
   10957 	/* Clear DAEL in Hw status by writing a 1 */
   10958 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10959 
   10960 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10961 
   10962 	/*
   10963 	 * Either we should have a hardware SPI cycle in progress bit to check
   10964 	 * against, in order to start a new cycle or FDONE bit should be
   10965 	 * changed in the hardware so that it is 1 after harware reset, which
   10966 	 * can then be used as an indication whether a cycle is in progress or
   10967 	 * has been completed .. we should also have some software semaphore
   10968 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10969 	 * threads access to those bits can be sequentiallized or a way so that
   10970 	 * 2 threads dont start the cycle at the same time
   10971 	 */
   10972 
   10973 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10974 		/*
   10975 		 * There is no cycle running at present, so we can start a
   10976 		 * cycle
   10977 		 */
   10978 
   10979 		/* Begin by setting Flash Cycle Done. */
   10980 		hsfsts |= HSFSTS_DONE;
   10981 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10982 		error = 0;
   10983 	} else {
   10984 		/*
   10985 		 * otherwise poll for sometime so the current cycle has a
   10986 		 * chance to end before giving up.
   10987 		 */
   10988 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10989 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10990 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10991 				error = 0;
   10992 				break;
   10993 			}
   10994 			delay(1);
   10995 		}
   10996 		if (error == 0) {
   10997 			/*
   10998 			 * Successful in waiting for previous cycle to timeout,
   10999 			 * now set the Flash Cycle Done.
   11000 			 */
   11001 			hsfsts |= HSFSTS_DONE;
   11002 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11003 		}
   11004 	}
   11005 	return error;
   11006 }
   11007 
   11008 /******************************************************************************
   11009  * This function starts a flash cycle and waits for its completion
   11010  *
   11011  * sc - The pointer to the hw structure
   11012  ****************************************************************************/
   11013 static int32_t
   11014 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11015 {
   11016 	uint16_t hsflctl;
   11017 	uint16_t hsfsts;
   11018 	int32_t error = 1;
   11019 	uint32_t i = 0;
   11020 
   11021 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11022 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11023 	hsflctl |= HSFCTL_GO;
   11024 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11025 
   11026 	/* Wait till FDONE bit is set to 1 */
   11027 	do {
   11028 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11029 		if (hsfsts & HSFSTS_DONE)
   11030 			break;
   11031 		delay(1);
   11032 		i++;
   11033 	} while (i < timeout);
   11034 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11035 		error = 0;
   11036 
   11037 	return error;
   11038 }
   11039 
   11040 /******************************************************************************
   11041  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11042  *
   11043  * sc - The pointer to the hw structure
   11044  * index - The index of the byte or word to read.
   11045  * size - Size of data to read, 1=byte 2=word, 4=dword
   11046  * data - Pointer to the word to store the value read.
   11047  *****************************************************************************/
   11048 static int32_t
   11049 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11050     uint32_t size, uint32_t *data)
   11051 {
   11052 	uint16_t hsfsts;
   11053 	uint16_t hsflctl;
   11054 	uint32_t flash_linear_address;
   11055 	uint32_t flash_data = 0;
   11056 	int32_t error = 1;
   11057 	int32_t count = 0;
   11058 
   11059 	if (size < 1  || size > 4 || data == 0x0 ||
   11060 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11061 		return error;
   11062 
   11063 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11064 	    sc->sc_ich8_flash_base;
   11065 
   11066 	do {
   11067 		delay(1);
   11068 		/* Steps */
   11069 		error = wm_ich8_cycle_init(sc);
   11070 		if (error)
   11071 			break;
   11072 
   11073 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11074 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11075 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11076 		    & HSFCTL_BCOUNT_MASK;
   11077 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11078 		if (sc->sc_type == WM_T_PCH_SPT) {
   11079 			/*
   11080 			 * In SPT, This register is in Lan memory space, not
   11081 			 * flash. Therefore, only 32 bit access is supported.
   11082 			 */
   11083 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11084 			    (uint32_t)hsflctl);
   11085 		} else
   11086 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11087 
   11088 		/*
   11089 		 * Write the last 24 bits of index into Flash Linear address
   11090 		 * field in Flash Address
   11091 		 */
   11092 		/* TODO: TBD maybe check the index against the size of flash */
   11093 
   11094 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11095 
   11096 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11097 
   11098 		/*
   11099 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11100 		 * the whole sequence a few more times, else read in (shift in)
   11101 		 * the Flash Data0, the order is least significant byte first
   11102 		 * msb to lsb
   11103 		 */
   11104 		if (error == 0) {
   11105 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11106 			if (size == 1)
   11107 				*data = (uint8_t)(flash_data & 0x000000FF);
   11108 			else if (size == 2)
   11109 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11110 			else if (size == 4)
   11111 				*data = (uint32_t)flash_data;
   11112 			break;
   11113 		} else {
   11114 			/*
   11115 			 * If we've gotten here, then things are probably
   11116 			 * completely hosed, but if the error condition is
   11117 			 * detected, it won't hurt to give it another try...
   11118 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11119 			 */
   11120 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11121 			if (hsfsts & HSFSTS_ERR) {
   11122 				/* Repeat for some time before giving up. */
   11123 				continue;
   11124 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11125 				break;
   11126 		}
   11127 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11128 
   11129 	return error;
   11130 }
   11131 
   11132 /******************************************************************************
   11133  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11134  *
   11135  * sc - pointer to wm_hw structure
   11136  * index - The index of the byte to read.
   11137  * data - Pointer to a byte to store the value read.
   11138  *****************************************************************************/
   11139 static int32_t
   11140 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11141 {
   11142 	int32_t status;
   11143 	uint32_t word = 0;
   11144 
   11145 	status = wm_read_ich8_data(sc, index, 1, &word);
   11146 	if (status == 0)
   11147 		*data = (uint8_t)word;
   11148 	else
   11149 		*data = 0;
   11150 
   11151 	return status;
   11152 }
   11153 
   11154 /******************************************************************************
   11155  * Reads a word from the NVM using the ICH8 flash access registers.
   11156  *
   11157  * sc - pointer to wm_hw structure
   11158  * index - The starting byte index of the word to read.
   11159  * data - Pointer to a word to store the value read.
   11160  *****************************************************************************/
   11161 static int32_t
   11162 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11163 {
   11164 	int32_t status;
   11165 	uint32_t word = 0;
   11166 
   11167 	status = wm_read_ich8_data(sc, index, 2, &word);
   11168 	if (status == 0)
   11169 		*data = (uint16_t)word;
   11170 	else
   11171 		*data = 0;
   11172 
   11173 	return status;
   11174 }
   11175 
   11176 /******************************************************************************
   11177  * Reads a dword from the NVM using the ICH8 flash access registers.
   11178  *
   11179  * sc - pointer to wm_hw structure
   11180  * index - The starting byte index of the word to read.
   11181  * data - Pointer to a word to store the value read.
   11182  *****************************************************************************/
   11183 static int32_t
   11184 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11185 {
   11186 	int32_t status;
   11187 
   11188 	status = wm_read_ich8_data(sc, index, 4, data);
   11189 	return status;
   11190 }
   11191 
   11192 /******************************************************************************
   11193  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11194  * register.
   11195  *
   11196  * sc - Struct containing variables accessed by shared code
   11197  * offset - offset of word in the EEPROM to read
   11198  * data - word read from the EEPROM
   11199  * words - number of words to read
   11200  *****************************************************************************/
   11201 static int
   11202 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11203 {
   11204 	int32_t  error = 0;
   11205 	uint32_t flash_bank = 0;
   11206 	uint32_t act_offset = 0;
   11207 	uint32_t bank_offset = 0;
   11208 	uint16_t word = 0;
   11209 	uint16_t i = 0;
   11210 
   11211 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11212 		device_xname(sc->sc_dev), __func__));
   11213 
   11214 	/*
   11215 	 * We need to know which is the valid flash bank.  In the event
   11216 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11217 	 * managing flash_bank.  So it cannot be trusted and needs
   11218 	 * to be updated with each read.
   11219 	 */
   11220 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11221 	if (error) {
   11222 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11223 			device_xname(sc->sc_dev)));
   11224 		flash_bank = 0;
   11225 	}
   11226 
   11227 	/*
   11228 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11229 	 * size
   11230 	 */
   11231 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11232 
   11233 	error = wm_get_swfwhw_semaphore(sc);
   11234 	if (error) {
   11235 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11236 		    __func__);
   11237 		return error;
   11238 	}
   11239 
   11240 	for (i = 0; i < words; i++) {
   11241 		/* The NVM part needs a byte offset, hence * 2 */
   11242 		act_offset = bank_offset + ((offset + i) * 2);
   11243 		error = wm_read_ich8_word(sc, act_offset, &word);
   11244 		if (error) {
   11245 			aprint_error_dev(sc->sc_dev,
   11246 			    "%s: failed to read NVM\n", __func__);
   11247 			break;
   11248 		}
   11249 		data[i] = word;
   11250 	}
   11251 
   11252 	wm_put_swfwhw_semaphore(sc);
   11253 	return error;
   11254 }
   11255 
   11256 /******************************************************************************
   11257  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11258  * register.
   11259  *
   11260  * sc - Struct containing variables accessed by shared code
   11261  * offset - offset of word in the EEPROM to read
   11262  * data - word read from the EEPROM
   11263  * words - number of words to read
   11264  *****************************************************************************/
   11265 static int
   11266 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11267 {
   11268 	int32_t  error = 0;
   11269 	uint32_t flash_bank = 0;
   11270 	uint32_t act_offset = 0;
   11271 	uint32_t bank_offset = 0;
   11272 	uint32_t dword = 0;
   11273 	uint16_t i = 0;
   11274 
   11275 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11276 		device_xname(sc->sc_dev), __func__));
   11277 
   11278 	/*
   11279 	 * We need to know which is the valid flash bank.  In the event
   11280 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11281 	 * managing flash_bank.  So it cannot be trusted and needs
   11282 	 * to be updated with each read.
   11283 	 */
   11284 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11285 	if (error) {
   11286 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11287 			device_xname(sc->sc_dev)));
   11288 		flash_bank = 0;
   11289 	}
   11290 
   11291 	/*
   11292 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11293 	 * size
   11294 	 */
   11295 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11296 
   11297 	error = wm_get_swfwhw_semaphore(sc);
   11298 	if (error) {
   11299 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11300 		    __func__);
   11301 		return error;
   11302 	}
   11303 
   11304 	for (i = 0; i < words; i++) {
   11305 		/* The NVM part needs a byte offset, hence * 2 */
   11306 		act_offset = bank_offset + ((offset + i) * 2);
   11307 		/* but we must read dword aligned, so mask ... */
   11308 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11309 		if (error) {
   11310 			aprint_error_dev(sc->sc_dev,
   11311 			    "%s: failed to read NVM\n", __func__);
   11312 			break;
   11313 		}
   11314 		/* ... and pick out low or high word */
   11315 		if ((act_offset & 0x2) == 0)
   11316 			data[i] = (uint16_t)(dword & 0xFFFF);
   11317 		else
   11318 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11319 	}
   11320 
   11321 	wm_put_swfwhw_semaphore(sc);
   11322 	return error;
   11323 }
   11324 
   11325 /* iNVM */
   11326 
   11327 static int
   11328 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11329 {
   11330 	int32_t  rv = 0;
   11331 	uint32_t invm_dword;
   11332 	uint16_t i;
   11333 	uint8_t record_type, word_address;
   11334 
   11335 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11336 		device_xname(sc->sc_dev), __func__));
   11337 
   11338 	for (i = 0; i < INVM_SIZE; i++) {
   11339 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11340 		/* Get record type */
   11341 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11342 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11343 			break;
   11344 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11345 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11346 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11347 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11348 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11349 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11350 			if (word_address == address) {
   11351 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11352 				rv = 0;
   11353 				break;
   11354 			}
   11355 		}
   11356 	}
   11357 
   11358 	return rv;
   11359 }
   11360 
   11361 static int
   11362 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11363 {
   11364 	int rv = 0;
   11365 	int i;
   11366 
   11367 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11368 		device_xname(sc->sc_dev), __func__));
   11369 
   11370 	for (i = 0; i < words; i++) {
   11371 		switch (offset + i) {
   11372 		case NVM_OFF_MACADDR:
   11373 		case NVM_OFF_MACADDR1:
   11374 		case NVM_OFF_MACADDR2:
   11375 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11376 			if (rv != 0) {
   11377 				data[i] = 0xffff;
   11378 				rv = -1;
   11379 			}
   11380 			break;
   11381 		case NVM_OFF_CFG2:
   11382 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11383 			if (rv != 0) {
   11384 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11385 				rv = 0;
   11386 			}
   11387 			break;
   11388 		case NVM_OFF_CFG4:
   11389 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11390 			if (rv != 0) {
   11391 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11392 				rv = 0;
   11393 			}
   11394 			break;
   11395 		case NVM_OFF_LED_1_CFG:
   11396 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11397 			if (rv != 0) {
   11398 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11399 				rv = 0;
   11400 			}
   11401 			break;
   11402 		case NVM_OFF_LED_0_2_CFG:
   11403 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11404 			if (rv != 0) {
   11405 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11406 				rv = 0;
   11407 			}
   11408 			break;
   11409 		case NVM_OFF_ID_LED_SETTINGS:
   11410 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11411 			if (rv != 0) {
   11412 				*data = ID_LED_RESERVED_FFFF;
   11413 				rv = 0;
   11414 			}
   11415 			break;
   11416 		default:
   11417 			DPRINTF(WM_DEBUG_NVM,
   11418 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11419 			*data = NVM_RESERVED_WORD;
   11420 			break;
   11421 		}
   11422 	}
   11423 
   11424 	return rv;
   11425 }
   11426 
   11427 /* Lock, detecting NVM type, validate checksum, version and read */
   11428 
   11429 /*
   11430  * wm_nvm_acquire:
   11431  *
   11432  *	Perform the EEPROM handshake required on some chips.
   11433  */
   11434 static int
   11435 wm_nvm_acquire(struct wm_softc *sc)
   11436 {
   11437 	uint32_t reg;
   11438 	int x;
   11439 	int ret = 0;
   11440 
   11441 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11442 		device_xname(sc->sc_dev), __func__));
   11443 
   11444 	if (sc->sc_type >= WM_T_ICH8) {
   11445 		ret = wm_get_nvm_ich8lan(sc);
   11446 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11447 		ret = wm_get_swfwhw_semaphore(sc);
   11448 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11449 		/* This will also do wm_get_swsm_semaphore() if needed */
   11450 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11451 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11452 		ret = wm_get_swsm_semaphore(sc);
   11453 	}
   11454 
   11455 	if (ret) {
   11456 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11457 			__func__);
   11458 		return 1;
   11459 	}
   11460 
   11461 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11462 		reg = CSR_READ(sc, WMREG_EECD);
   11463 
   11464 		/* Request EEPROM access. */
   11465 		reg |= EECD_EE_REQ;
   11466 		CSR_WRITE(sc, WMREG_EECD, reg);
   11467 
   11468 		/* ..and wait for it to be granted. */
   11469 		for (x = 0; x < 1000; x++) {
   11470 			reg = CSR_READ(sc, WMREG_EECD);
   11471 			if (reg & EECD_EE_GNT)
   11472 				break;
   11473 			delay(5);
   11474 		}
   11475 		if ((reg & EECD_EE_GNT) == 0) {
   11476 			aprint_error_dev(sc->sc_dev,
   11477 			    "could not acquire EEPROM GNT\n");
   11478 			reg &= ~EECD_EE_REQ;
   11479 			CSR_WRITE(sc, WMREG_EECD, reg);
   11480 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11481 				wm_put_swfwhw_semaphore(sc);
   11482 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11483 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11484 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11485 				wm_put_swsm_semaphore(sc);
   11486 			return 1;
   11487 		}
   11488 	}
   11489 
   11490 	return 0;
   11491 }
   11492 
   11493 /*
   11494  * wm_nvm_release:
   11495  *
   11496  *	Release the EEPROM mutex.
   11497  */
   11498 static void
   11499 wm_nvm_release(struct wm_softc *sc)
   11500 {
   11501 	uint32_t reg;
   11502 
   11503 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11504 		device_xname(sc->sc_dev), __func__));
   11505 
   11506 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11507 		reg = CSR_READ(sc, WMREG_EECD);
   11508 		reg &= ~EECD_EE_REQ;
   11509 		CSR_WRITE(sc, WMREG_EECD, reg);
   11510 	}
   11511 
   11512 	if (sc->sc_type >= WM_T_ICH8) {
   11513 		wm_put_nvm_ich8lan(sc);
   11514 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11515 		wm_put_swfwhw_semaphore(sc);
   11516 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11517 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11518 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11519 		wm_put_swsm_semaphore(sc);
   11520 }
   11521 
   11522 static int
   11523 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11524 {
   11525 	uint32_t eecd = 0;
   11526 
   11527 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11528 	    || sc->sc_type == WM_T_82583) {
   11529 		eecd = CSR_READ(sc, WMREG_EECD);
   11530 
   11531 		/* Isolate bits 15 & 16 */
   11532 		eecd = ((eecd >> 15) & 0x03);
   11533 
   11534 		/* If both bits are set, device is Flash type */
   11535 		if (eecd == 0x03)
   11536 			return 0;
   11537 	}
   11538 	return 1;
   11539 }
   11540 
   11541 static int
   11542 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11543 {
   11544 	uint32_t eec;
   11545 
   11546 	eec = CSR_READ(sc, WMREG_EEC);
   11547 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11548 		return 1;
   11549 
   11550 	return 0;
   11551 }
   11552 
   11553 /*
   11554  * wm_nvm_validate_checksum
   11555  *
   11556  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11557  */
   11558 static int
   11559 wm_nvm_validate_checksum(struct wm_softc *sc)
   11560 {
   11561 	uint16_t checksum;
   11562 	uint16_t eeprom_data;
   11563 #ifdef WM_DEBUG
   11564 	uint16_t csum_wordaddr, valid_checksum;
   11565 #endif
   11566 	int i;
   11567 
   11568 	checksum = 0;
   11569 
   11570 	/* Don't check for I211 */
   11571 	if (sc->sc_type == WM_T_I211)
   11572 		return 0;
   11573 
   11574 #ifdef WM_DEBUG
   11575 	if (sc->sc_type == WM_T_PCH_LPT) {
   11576 		csum_wordaddr = NVM_OFF_COMPAT;
   11577 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11578 	} else {
   11579 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11580 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11581 	}
   11582 
   11583 	/* Dump EEPROM image for debug */
   11584 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11585 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11586 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11587 		/* XXX PCH_SPT? */
   11588 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11589 		if ((eeprom_data & valid_checksum) == 0) {
   11590 			DPRINTF(WM_DEBUG_NVM,
   11591 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11592 				device_xname(sc->sc_dev), eeprom_data,
   11593 				    valid_checksum));
   11594 		}
   11595 	}
   11596 
   11597 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11598 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11599 		for (i = 0; i < NVM_SIZE; i++) {
   11600 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11601 				printf("XXXX ");
   11602 			else
   11603 				printf("%04hx ", eeprom_data);
   11604 			if (i % 8 == 7)
   11605 				printf("\n");
   11606 		}
   11607 	}
   11608 
   11609 #endif /* WM_DEBUG */
   11610 
   11611 	for (i = 0; i < NVM_SIZE; i++) {
   11612 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11613 			return 1;
   11614 		checksum += eeprom_data;
   11615 	}
   11616 
   11617 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11618 #ifdef WM_DEBUG
   11619 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11620 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11621 #endif
   11622 	}
   11623 
   11624 	return 0;
   11625 }
   11626 
   11627 static void
   11628 wm_nvm_version_invm(struct wm_softc *sc)
   11629 {
   11630 	uint32_t dword;
   11631 
   11632 	/*
   11633 	 * Linux's code to decode version is very strange, so we don't
   11634 	 * obey that algorithm and just use word 61 as the document.
   11635 	 * Perhaps it's not perfect though...
   11636 	 *
   11637 	 * Example:
   11638 	 *
   11639 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11640 	 */
   11641 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11642 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11643 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11644 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11645 }
   11646 
   11647 static void
   11648 wm_nvm_version(struct wm_softc *sc)
   11649 {
   11650 	uint16_t major, minor, build, patch;
   11651 	uint16_t uid0, uid1;
   11652 	uint16_t nvm_data;
   11653 	uint16_t off;
   11654 	bool check_version = false;
   11655 	bool check_optionrom = false;
   11656 	bool have_build = false;
   11657 
   11658 	/*
   11659 	 * Version format:
   11660 	 *
   11661 	 * XYYZ
   11662 	 * X0YZ
   11663 	 * X0YY
   11664 	 *
   11665 	 * Example:
   11666 	 *
   11667 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11668 	 *	82571	0x50a6	5.10.6?
   11669 	 *	82572	0x506a	5.6.10?
   11670 	 *	82572EI	0x5069	5.6.9?
   11671 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11672 	 *		0x2013	2.1.3?
   11673 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11674 	 */
   11675 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11676 	switch (sc->sc_type) {
   11677 	case WM_T_82571:
   11678 	case WM_T_82572:
   11679 	case WM_T_82574:
   11680 	case WM_T_82583:
   11681 		check_version = true;
   11682 		check_optionrom = true;
   11683 		have_build = true;
   11684 		break;
   11685 	case WM_T_82575:
   11686 	case WM_T_82576:
   11687 	case WM_T_82580:
   11688 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11689 			check_version = true;
   11690 		break;
   11691 	case WM_T_I211:
   11692 		wm_nvm_version_invm(sc);
   11693 		goto printver;
   11694 	case WM_T_I210:
   11695 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11696 			wm_nvm_version_invm(sc);
   11697 			goto printver;
   11698 		}
   11699 		/* FALLTHROUGH */
   11700 	case WM_T_I350:
   11701 	case WM_T_I354:
   11702 		check_version = true;
   11703 		check_optionrom = true;
   11704 		break;
   11705 	default:
   11706 		return;
   11707 	}
   11708 	if (check_version) {
   11709 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11710 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11711 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11712 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11713 			build = nvm_data & NVM_BUILD_MASK;
   11714 			have_build = true;
   11715 		} else
   11716 			minor = nvm_data & 0x00ff;
   11717 
   11718 		/* Decimal */
   11719 		minor = (minor / 16) * 10 + (minor % 16);
   11720 		sc->sc_nvm_ver_major = major;
   11721 		sc->sc_nvm_ver_minor = minor;
   11722 
   11723 printver:
   11724 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11725 		    sc->sc_nvm_ver_minor);
   11726 		if (have_build) {
   11727 			sc->sc_nvm_ver_build = build;
   11728 			aprint_verbose(".%d", build);
   11729 		}
   11730 	}
   11731 	if (check_optionrom) {
   11732 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11733 		/* Option ROM Version */
   11734 		if ((off != 0x0000) && (off != 0xffff)) {
   11735 			off += NVM_COMBO_VER_OFF;
   11736 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11737 			wm_nvm_read(sc, off, 1, &uid0);
   11738 			if ((uid0 != 0) && (uid0 != 0xffff)
   11739 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11740 				/* 16bits */
   11741 				major = uid0 >> 8;
   11742 				build = (uid0 << 8) | (uid1 >> 8);
   11743 				patch = uid1 & 0x00ff;
   11744 				aprint_verbose(", option ROM Version %d.%d.%d",
   11745 				    major, build, patch);
   11746 			}
   11747 		}
   11748 	}
   11749 
   11750 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11751 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11752 }
   11753 
   11754 /*
   11755  * wm_nvm_read:
   11756  *
   11757  *	Read data from the serial EEPROM.
   11758  */
   11759 static int
   11760 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11761 {
   11762 	int rv;
   11763 
   11764 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11765 		device_xname(sc->sc_dev), __func__));
   11766 
   11767 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11768 		return 1;
   11769 
   11770 	if (wm_nvm_acquire(sc))
   11771 		return 1;
   11772 
   11773 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11774 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11775 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11776 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11777 	else if (sc->sc_type == WM_T_PCH_SPT)
   11778 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11779 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11780 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11781 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11782 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11783 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11784 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11785 	else
   11786 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11787 
   11788 	wm_nvm_release(sc);
   11789 	return rv;
   11790 }
   11791 
   11792 /*
   11793  * Hardware semaphores.
   11794  * Very complexed...
   11795  */
   11796 
   11797 static int
   11798 wm_get_null(struct wm_softc *sc)
   11799 {
   11800 
   11801 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11802 		device_xname(sc->sc_dev), __func__));
   11803 	return 0;
   11804 }
   11805 
   11806 static void
   11807 wm_put_null(struct wm_softc *sc)
   11808 {
   11809 
   11810 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11811 		device_xname(sc->sc_dev), __func__));
   11812 	return;
   11813 }
   11814 
   11815 /*
   11816  * Get hardware semaphore.
   11817  * Same as e1000_get_hw_semaphore_generic()
   11818  */
   11819 static int
   11820 wm_get_swsm_semaphore(struct wm_softc *sc)
   11821 {
   11822 	int32_t timeout;
   11823 	uint32_t swsm;
   11824 
   11825 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11826 		device_xname(sc->sc_dev), __func__));
   11827 	KASSERT(sc->sc_nvm_wordsize > 0);
   11828 
   11829 	/* Get the SW semaphore. */
   11830 	timeout = sc->sc_nvm_wordsize + 1;
   11831 	while (timeout) {
   11832 		swsm = CSR_READ(sc, WMREG_SWSM);
   11833 
   11834 		if ((swsm & SWSM_SMBI) == 0)
   11835 			break;
   11836 
   11837 		delay(50);
   11838 		timeout--;
   11839 	}
   11840 
   11841 	if (timeout == 0) {
   11842 		aprint_error_dev(sc->sc_dev,
   11843 		    "could not acquire SWSM SMBI\n");
   11844 		return 1;
   11845 	}
   11846 
   11847 	/* Get the FW semaphore. */
   11848 	timeout = sc->sc_nvm_wordsize + 1;
   11849 	while (timeout) {
   11850 		swsm = CSR_READ(sc, WMREG_SWSM);
   11851 		swsm |= SWSM_SWESMBI;
   11852 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11853 		/* If we managed to set the bit we got the semaphore. */
   11854 		swsm = CSR_READ(sc, WMREG_SWSM);
   11855 		if (swsm & SWSM_SWESMBI)
   11856 			break;
   11857 
   11858 		delay(50);
   11859 		timeout--;
   11860 	}
   11861 
   11862 	if (timeout == 0) {
   11863 		aprint_error_dev(sc->sc_dev,
   11864 		    "could not acquire SWSM SWESMBI\n");
   11865 		/* Release semaphores */
   11866 		wm_put_swsm_semaphore(sc);
   11867 		return 1;
   11868 	}
   11869 	return 0;
   11870 }
   11871 
   11872 /*
   11873  * Put hardware semaphore.
   11874  * Same as e1000_put_hw_semaphore_generic()
   11875  */
   11876 static void
   11877 wm_put_swsm_semaphore(struct wm_softc *sc)
   11878 {
   11879 	uint32_t swsm;
   11880 
   11881 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11882 		device_xname(sc->sc_dev), __func__));
   11883 
   11884 	swsm = CSR_READ(sc, WMREG_SWSM);
   11885 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11886 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11887 }
   11888 
   11889 /*
   11890  * Get SW/FW semaphore.
   11891  * Same as e1000_acquire_swfw_sync_82575().
   11892  */
   11893 static int
   11894 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11895 {
   11896 	uint32_t swfw_sync;
   11897 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11898 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11899 	int timeout = 200;
   11900 
   11901 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11902 		device_xname(sc->sc_dev), __func__));
   11903 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11904 
   11905 	for (timeout = 0; timeout < 200; timeout++) {
   11906 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11907 			if (wm_get_swsm_semaphore(sc)) {
   11908 				aprint_error_dev(sc->sc_dev,
   11909 				    "%s: failed to get semaphore\n",
   11910 				    __func__);
   11911 				return 1;
   11912 			}
   11913 		}
   11914 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11915 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11916 			swfw_sync |= swmask;
   11917 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11918 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11919 				wm_put_swsm_semaphore(sc);
   11920 			return 0;
   11921 		}
   11922 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11923 			wm_put_swsm_semaphore(sc);
   11924 		delay(5000);
   11925 	}
   11926 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11927 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11928 	return 1;
   11929 }
   11930 
   11931 static void
   11932 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11933 {
   11934 	uint32_t swfw_sync;
   11935 
   11936 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11937 		device_xname(sc->sc_dev), __func__));
   11938 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11939 
   11940 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11941 		while (wm_get_swsm_semaphore(sc) != 0)
   11942 			continue;
   11943 	}
   11944 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11945 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11946 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11947 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11948 		wm_put_swsm_semaphore(sc);
   11949 }
   11950 
   11951 static int
   11952 wm_get_phy_82575(struct wm_softc *sc)
   11953 {
   11954 
   11955 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11956 		device_xname(sc->sc_dev), __func__));
   11957 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11958 }
   11959 
   11960 static void
   11961 wm_put_phy_82575(struct wm_softc *sc)
   11962 {
   11963 
   11964 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11965 		device_xname(sc->sc_dev), __func__));
   11966 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11967 }
   11968 
   11969 static int
   11970 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11971 {
   11972 	uint32_t ext_ctrl;
   11973 	int timeout = 200;
   11974 
   11975 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11976 		device_xname(sc->sc_dev), __func__));
   11977 
   11978 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11979 	for (timeout = 0; timeout < 200; timeout++) {
   11980 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11981 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11982 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11983 
   11984 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11985 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11986 			return 0;
   11987 		delay(5000);
   11988 	}
   11989 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11990 	    device_xname(sc->sc_dev), ext_ctrl);
   11991 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11992 	return 1;
   11993 }
   11994 
   11995 static void
   11996 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11997 {
   11998 	uint32_t ext_ctrl;
   11999 
   12000 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12001 		device_xname(sc->sc_dev), __func__));
   12002 
   12003 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12004 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12005 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12006 
   12007 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12008 }
   12009 
   12010 static int
   12011 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12012 {
   12013 	uint32_t ext_ctrl;
   12014 	int timeout;
   12015 
   12016 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12017 		device_xname(sc->sc_dev), __func__));
   12018 	mutex_enter(sc->sc_ich_phymtx);
   12019 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12020 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12021 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12022 			break;
   12023 		delay(1000);
   12024 	}
   12025 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12026 		printf("%s: SW has already locked the resource\n",
   12027 		    device_xname(sc->sc_dev));
   12028 		goto out;
   12029 	}
   12030 
   12031 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12032 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12033 	for (timeout = 0; timeout < 1000; timeout++) {
   12034 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12035 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12036 			break;
   12037 		delay(1000);
   12038 	}
   12039 	if (timeout >= 1000) {
   12040 		printf("%s: failed to acquire semaphore\n",
   12041 		    device_xname(sc->sc_dev));
   12042 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12043 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12044 		goto out;
   12045 	}
   12046 	return 0;
   12047 
   12048 out:
   12049 	mutex_exit(sc->sc_ich_phymtx);
   12050 	return 1;
   12051 }
   12052 
   12053 static void
   12054 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12055 {
   12056 	uint32_t ext_ctrl;
   12057 
   12058 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12059 		device_xname(sc->sc_dev), __func__));
   12060 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12061 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12062 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12063 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12064 	} else {
   12065 		printf("%s: Semaphore unexpectedly released\n",
   12066 		    device_xname(sc->sc_dev));
   12067 	}
   12068 
   12069 	mutex_exit(sc->sc_ich_phymtx);
   12070 }
   12071 
   12072 static int
   12073 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12074 {
   12075 
   12076 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12077 		device_xname(sc->sc_dev), __func__));
   12078 	mutex_enter(sc->sc_ich_nvmmtx);
   12079 
   12080 	return 0;
   12081 }
   12082 
   12083 static void
   12084 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12085 {
   12086 
   12087 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12088 		device_xname(sc->sc_dev), __func__));
   12089 	mutex_exit(sc->sc_ich_nvmmtx);
   12090 }
   12091 
   12092 static int
   12093 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12094 {
   12095 	int i = 0;
   12096 	uint32_t reg;
   12097 
   12098 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12099 		device_xname(sc->sc_dev), __func__));
   12100 
   12101 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12102 	do {
   12103 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12104 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12105 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12106 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12107 			break;
   12108 		delay(2*1000);
   12109 		i++;
   12110 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12111 
   12112 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12113 		wm_put_hw_semaphore_82573(sc);
   12114 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12115 		    device_xname(sc->sc_dev));
   12116 		return -1;
   12117 	}
   12118 
   12119 	return 0;
   12120 }
   12121 
   12122 static void
   12123 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12124 {
   12125 	uint32_t reg;
   12126 
   12127 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12128 		device_xname(sc->sc_dev), __func__));
   12129 
   12130 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12131 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12132 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12133 }
   12134 
   12135 /*
   12136  * Management mode and power management related subroutines.
   12137  * BMC, AMT, suspend/resume and EEE.
   12138  */
   12139 
   12140 #ifdef WM_WOL
   12141 static int
   12142 wm_check_mng_mode(struct wm_softc *sc)
   12143 {
   12144 	int rv;
   12145 
   12146 	switch (sc->sc_type) {
   12147 	case WM_T_ICH8:
   12148 	case WM_T_ICH9:
   12149 	case WM_T_ICH10:
   12150 	case WM_T_PCH:
   12151 	case WM_T_PCH2:
   12152 	case WM_T_PCH_LPT:
   12153 	case WM_T_PCH_SPT:
   12154 		rv = wm_check_mng_mode_ich8lan(sc);
   12155 		break;
   12156 	case WM_T_82574:
   12157 	case WM_T_82583:
   12158 		rv = wm_check_mng_mode_82574(sc);
   12159 		break;
   12160 	case WM_T_82571:
   12161 	case WM_T_82572:
   12162 	case WM_T_82573:
   12163 	case WM_T_80003:
   12164 		rv = wm_check_mng_mode_generic(sc);
   12165 		break;
   12166 	default:
   12167 		/* noting to do */
   12168 		rv = 0;
   12169 		break;
   12170 	}
   12171 
   12172 	return rv;
   12173 }
   12174 
   12175 static int
   12176 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12177 {
   12178 	uint32_t fwsm;
   12179 
   12180 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12181 
   12182 	if (((fwsm & FWSM_FW_VALID) != 0)
   12183 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12184 		return 1;
   12185 
   12186 	return 0;
   12187 }
   12188 
   12189 static int
   12190 wm_check_mng_mode_82574(struct wm_softc *sc)
   12191 {
   12192 	uint16_t data;
   12193 
   12194 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12195 
   12196 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12197 		return 1;
   12198 
   12199 	return 0;
   12200 }
   12201 
   12202 static int
   12203 wm_check_mng_mode_generic(struct wm_softc *sc)
   12204 {
   12205 	uint32_t fwsm;
   12206 
   12207 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12208 
   12209 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12210 		return 1;
   12211 
   12212 	return 0;
   12213 }
   12214 #endif /* WM_WOL */
   12215 
   12216 static int
   12217 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12218 {
   12219 	uint32_t manc, fwsm, factps;
   12220 
   12221 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12222 		return 0;
   12223 
   12224 	manc = CSR_READ(sc, WMREG_MANC);
   12225 
   12226 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12227 		device_xname(sc->sc_dev), manc));
   12228 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12229 		return 0;
   12230 
   12231 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12232 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12233 		factps = CSR_READ(sc, WMREG_FACTPS);
   12234 		if (((factps & FACTPS_MNGCG) == 0)
   12235 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12236 			return 1;
   12237 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12238 		uint16_t data;
   12239 
   12240 		factps = CSR_READ(sc, WMREG_FACTPS);
   12241 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12242 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12243 			device_xname(sc->sc_dev), factps, data));
   12244 		if (((factps & FACTPS_MNGCG) == 0)
   12245 		    && ((data & NVM_CFG2_MNGM_MASK)
   12246 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12247 			return 1;
   12248 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12249 	    && ((manc & MANC_ASF_EN) == 0))
   12250 		return 1;
   12251 
   12252 	return 0;
   12253 }
   12254 
   12255 static bool
   12256 wm_phy_resetisblocked(struct wm_softc *sc)
   12257 {
   12258 	bool blocked = false;
   12259 	uint32_t reg;
   12260 	int i = 0;
   12261 
   12262 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12263 		device_xname(sc->sc_dev), __func__));
   12264 
   12265 	switch (sc->sc_type) {
   12266 	case WM_T_ICH8:
   12267 	case WM_T_ICH9:
   12268 	case WM_T_ICH10:
   12269 	case WM_T_PCH:
   12270 	case WM_T_PCH2:
   12271 	case WM_T_PCH_LPT:
   12272 	case WM_T_PCH_SPT:
   12273 		do {
   12274 			reg = CSR_READ(sc, WMREG_FWSM);
   12275 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12276 				blocked = true;
   12277 				delay(10*1000);
   12278 				continue;
   12279 			}
   12280 			blocked = false;
   12281 		} while (blocked && (i++ < 30));
   12282 		return blocked;
   12283 		break;
   12284 	case WM_T_82571:
   12285 	case WM_T_82572:
   12286 	case WM_T_82573:
   12287 	case WM_T_82574:
   12288 	case WM_T_82583:
   12289 	case WM_T_80003:
   12290 		reg = CSR_READ(sc, WMREG_MANC);
   12291 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12292 			return true;
   12293 		else
   12294 			return false;
   12295 		break;
   12296 	default:
   12297 		/* no problem */
   12298 		break;
   12299 	}
   12300 
   12301 	return false;
   12302 }
   12303 
   12304 static void
   12305 wm_get_hw_control(struct wm_softc *sc)
   12306 {
   12307 	uint32_t reg;
   12308 
   12309 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12310 		device_xname(sc->sc_dev), __func__));
   12311 
   12312 	if (sc->sc_type == WM_T_82573) {
   12313 		reg = CSR_READ(sc, WMREG_SWSM);
   12314 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12315 	} else if (sc->sc_type >= WM_T_82571) {
   12316 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12317 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12318 	}
   12319 }
   12320 
   12321 static void
   12322 wm_release_hw_control(struct wm_softc *sc)
   12323 {
   12324 	uint32_t reg;
   12325 
   12326 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12327 		device_xname(sc->sc_dev), __func__));
   12328 
   12329 	if (sc->sc_type == WM_T_82573) {
   12330 		reg = CSR_READ(sc, WMREG_SWSM);
   12331 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12332 	} else if (sc->sc_type >= WM_T_82571) {
   12333 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12334 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12335 	}
   12336 }
   12337 
   12338 static void
   12339 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12340 {
   12341 	uint32_t reg;
   12342 
   12343 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12344 		device_xname(sc->sc_dev), __func__));
   12345 
   12346 	if (sc->sc_type < WM_T_PCH2)
   12347 		return;
   12348 
   12349 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12350 
   12351 	if (gate)
   12352 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12353 	else
   12354 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12355 
   12356 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12357 }
   12358 
   12359 static void
   12360 wm_smbustopci(struct wm_softc *sc)
   12361 {
   12362 	uint32_t fwsm, reg;
   12363 	int rv = 0;
   12364 
   12365 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12366 		device_xname(sc->sc_dev), __func__));
   12367 
   12368 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12369 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12370 
   12371 	/* Disable ULP */
   12372 	wm_ulp_disable(sc);
   12373 
   12374 	/* Acquire PHY semaphore */
   12375 	sc->phy.acquire(sc);
   12376 
   12377 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12378 	switch (sc->sc_type) {
   12379 	case WM_T_PCH_LPT:
   12380 	case WM_T_PCH_SPT:
   12381 		if (wm_phy_is_accessible_pchlan(sc))
   12382 			break;
   12383 
   12384 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12385 		reg |= CTRL_EXT_FORCE_SMBUS;
   12386 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12387 #if 0
   12388 		/* XXX Isn't this required??? */
   12389 		CSR_WRITE_FLUSH(sc);
   12390 #endif
   12391 		delay(50 * 1000);
   12392 		/* FALLTHROUGH */
   12393 	case WM_T_PCH2:
   12394 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12395 			break;
   12396 		/* FALLTHROUGH */
   12397 	case WM_T_PCH:
   12398 		if (sc->sc_type == WM_T_PCH)
   12399 			if ((fwsm & FWSM_FW_VALID) != 0)
   12400 				break;
   12401 
   12402 		if (wm_phy_resetisblocked(sc) == true) {
   12403 			printf("XXX reset is blocked(3)\n");
   12404 			break;
   12405 		}
   12406 
   12407 		wm_toggle_lanphypc_pch_lpt(sc);
   12408 
   12409 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12410 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12411 				break;
   12412 
   12413 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12414 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12415 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12416 
   12417 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12418 				break;
   12419 			rv = -1;
   12420 		}
   12421 		break;
   12422 	default:
   12423 		break;
   12424 	}
   12425 
   12426 	/* Release semaphore */
   12427 	sc->phy.release(sc);
   12428 
   12429 	if (rv == 0) {
   12430 		if (wm_phy_resetisblocked(sc)) {
   12431 			printf("XXX reset is blocked(4)\n");
   12432 			goto out;
   12433 		}
   12434 		wm_reset_phy(sc);
   12435 		if (wm_phy_resetisblocked(sc))
   12436 			printf("XXX reset is blocked(4)\n");
   12437 	}
   12438 
   12439 out:
   12440 	/*
   12441 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12442 	 */
   12443 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12444 		delay(10*1000);
   12445 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12446 	}
   12447 }
   12448 
   12449 static void
   12450 wm_init_manageability(struct wm_softc *sc)
   12451 {
   12452 
   12453 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12454 		device_xname(sc->sc_dev), __func__));
   12455 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12456 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12457 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12458 
   12459 		/* Disable hardware interception of ARP */
   12460 		manc &= ~MANC_ARP_EN;
   12461 
   12462 		/* Enable receiving management packets to the host */
   12463 		if (sc->sc_type >= WM_T_82571) {
   12464 			manc |= MANC_EN_MNG2HOST;
   12465 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12466 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12467 		}
   12468 
   12469 		CSR_WRITE(sc, WMREG_MANC, manc);
   12470 	}
   12471 }
   12472 
   12473 static void
   12474 wm_release_manageability(struct wm_softc *sc)
   12475 {
   12476 
   12477 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12478 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12479 
   12480 		manc |= MANC_ARP_EN;
   12481 		if (sc->sc_type >= WM_T_82571)
   12482 			manc &= ~MANC_EN_MNG2HOST;
   12483 
   12484 		CSR_WRITE(sc, WMREG_MANC, manc);
   12485 	}
   12486 }
   12487 
   12488 static void
   12489 wm_get_wakeup(struct wm_softc *sc)
   12490 {
   12491 
   12492 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12493 	switch (sc->sc_type) {
   12494 	case WM_T_82573:
   12495 	case WM_T_82583:
   12496 		sc->sc_flags |= WM_F_HAS_AMT;
   12497 		/* FALLTHROUGH */
   12498 	case WM_T_80003:
   12499 	case WM_T_82575:
   12500 	case WM_T_82576:
   12501 	case WM_T_82580:
   12502 	case WM_T_I350:
   12503 	case WM_T_I354:
   12504 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12505 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12506 		/* FALLTHROUGH */
   12507 	case WM_T_82541:
   12508 	case WM_T_82541_2:
   12509 	case WM_T_82547:
   12510 	case WM_T_82547_2:
   12511 	case WM_T_82571:
   12512 	case WM_T_82572:
   12513 	case WM_T_82574:
   12514 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12515 		break;
   12516 	case WM_T_ICH8:
   12517 	case WM_T_ICH9:
   12518 	case WM_T_ICH10:
   12519 	case WM_T_PCH:
   12520 	case WM_T_PCH2:
   12521 	case WM_T_PCH_LPT:
   12522 	case WM_T_PCH_SPT:
   12523 		sc->sc_flags |= WM_F_HAS_AMT;
   12524 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12525 		break;
   12526 	default:
   12527 		break;
   12528 	}
   12529 
   12530 	/* 1: HAS_MANAGE */
   12531 	if (wm_enable_mng_pass_thru(sc) != 0)
   12532 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12533 
   12534 #ifdef WM_DEBUG
   12535 	printf("\n");
   12536 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12537 		printf("HAS_AMT,");
   12538 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12539 		printf("ARC_SUBSYS_VALID,");
   12540 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12541 		printf("ASF_FIRMWARE_PRES,");
   12542 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12543 		printf("HAS_MANAGE,");
   12544 	printf("\n");
   12545 #endif
   12546 	/*
   12547 	 * Note that the WOL flags is set after the resetting of the eeprom
   12548 	 * stuff
   12549 	 */
   12550 }
   12551 
   12552 /*
   12553  * Unconfigure Ultra Low Power mode.
   12554  * Only for I217 and newer (see below).
   12555  */
   12556 static void
   12557 wm_ulp_disable(struct wm_softc *sc)
   12558 {
   12559 	uint32_t reg;
   12560 	int i = 0;
   12561 
   12562 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12563 		device_xname(sc->sc_dev), __func__));
   12564 	/* Exclude old devices */
   12565 	if ((sc->sc_type < WM_T_PCH_LPT)
   12566 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12567 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12568 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   12569 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   12570 		return;
   12571 
   12572 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   12573 		/* Request ME un-configure ULP mode in the PHY */
   12574 		reg = CSR_READ(sc, WMREG_H2ME);
   12575 		reg &= ~H2ME_ULP;
   12576 		reg |= H2ME_ENFORCE_SETTINGS;
   12577 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12578 
   12579 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   12580 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   12581 			if (i++ == 30) {
   12582 				printf("%s timed out\n", __func__);
   12583 				return;
   12584 			}
   12585 			delay(10 * 1000);
   12586 		}
   12587 		reg = CSR_READ(sc, WMREG_H2ME);
   12588 		reg &= ~H2ME_ENFORCE_SETTINGS;
   12589 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12590 
   12591 		return;
   12592 	}
   12593 
   12594 	/* Acquire semaphore */
   12595 	sc->phy.acquire(sc);
   12596 
   12597 	/* Toggle LANPHYPC */
   12598 	wm_toggle_lanphypc_pch_lpt(sc);
   12599 
   12600 	/* Unforce SMBus mode in PHY */
   12601 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12602 	if (reg == 0x0000 || reg == 0xffff) {
   12603 		uint32_t reg2;
   12604 
   12605 		printf("%s: Force SMBus first.\n", __func__);
   12606 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   12607 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   12608 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   12609 		delay(50 * 1000);
   12610 
   12611 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12612 	}
   12613 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12614 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   12615 
   12616 	/* Unforce SMBus mode in MAC */
   12617 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12618 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   12619 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12620 
   12621 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   12622 	reg |= HV_PM_CTRL_K1_ENA;
   12623 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   12624 
   12625 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   12626 	reg &= ~(I218_ULP_CONFIG1_IND
   12627 	    | I218_ULP_CONFIG1_STICKY_ULP
   12628 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   12629 	    | I218_ULP_CONFIG1_WOL_HOST
   12630 	    | I218_ULP_CONFIG1_INBAND_EXIT
   12631 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   12632 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   12633 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   12634 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12635 	reg |= I218_ULP_CONFIG1_START;
   12636 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12637 
   12638 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   12639 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   12640 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   12641 
   12642 	/* Release semaphore */
   12643 	sc->phy.release(sc);
   12644 	wm_gmii_reset(sc);
   12645 	delay(50 * 1000);
   12646 }
   12647 
   12648 /* WOL in the newer chipset interfaces (pchlan) */
   12649 static void
   12650 wm_enable_phy_wakeup(struct wm_softc *sc)
   12651 {
   12652 #if 0
   12653 	uint16_t preg;
   12654 
   12655 	/* Copy MAC RARs to PHY RARs */
   12656 
   12657 	/* Copy MAC MTA to PHY MTA */
   12658 
   12659 	/* Configure PHY Rx Control register */
   12660 
   12661 	/* Enable PHY wakeup in MAC register */
   12662 
   12663 	/* Configure and enable PHY wakeup in PHY registers */
   12664 
   12665 	/* Activate PHY wakeup */
   12666 
   12667 	/* XXX */
   12668 #endif
   12669 }
   12670 
   12671 /* Power down workaround on D3 */
   12672 static void
   12673 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12674 {
   12675 	uint32_t reg;
   12676 	int i;
   12677 
   12678 	for (i = 0; i < 2; i++) {
   12679 		/* Disable link */
   12680 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12681 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12682 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12683 
   12684 		/*
   12685 		 * Call gig speed drop workaround on Gig disable before
   12686 		 * accessing any PHY registers
   12687 		 */
   12688 		if (sc->sc_type == WM_T_ICH8)
   12689 			wm_gig_downshift_workaround_ich8lan(sc);
   12690 
   12691 		/* Write VR power-down enable */
   12692 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12693 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12694 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12695 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12696 
   12697 		/* Read it back and test */
   12698 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12699 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12700 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12701 			break;
   12702 
   12703 		/* Issue PHY reset and repeat at most one more time */
   12704 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12705 	}
   12706 }
   12707 
   12708 static void
   12709 wm_enable_wakeup(struct wm_softc *sc)
   12710 {
   12711 	uint32_t reg, pmreg;
   12712 	pcireg_t pmode;
   12713 
   12714 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12715 		device_xname(sc->sc_dev), __func__));
   12716 
   12717 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12718 		&pmreg, NULL) == 0)
   12719 		return;
   12720 
   12721 	/* Advertise the wakeup capability */
   12722 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12723 	    | CTRL_SWDPIN(3));
   12724 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12725 
   12726 	/* ICH workaround */
   12727 	switch (sc->sc_type) {
   12728 	case WM_T_ICH8:
   12729 	case WM_T_ICH9:
   12730 	case WM_T_ICH10:
   12731 	case WM_T_PCH:
   12732 	case WM_T_PCH2:
   12733 	case WM_T_PCH_LPT:
   12734 	case WM_T_PCH_SPT:
   12735 		/* Disable gig during WOL */
   12736 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12737 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12738 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12739 		if (sc->sc_type == WM_T_PCH)
   12740 			wm_gmii_reset(sc);
   12741 
   12742 		/* Power down workaround */
   12743 		if (sc->sc_phytype == WMPHY_82577) {
   12744 			struct mii_softc *child;
   12745 
   12746 			/* Assume that the PHY is copper */
   12747 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12748 			if (child->mii_mpd_rev <= 2)
   12749 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12750 				    (768 << 5) | 25, 0x0444); /* magic num */
   12751 		}
   12752 		break;
   12753 	default:
   12754 		break;
   12755 	}
   12756 
   12757 	/* Keep the laser running on fiber adapters */
   12758 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12759 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12760 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12761 		reg |= CTRL_EXT_SWDPIN(3);
   12762 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12763 	}
   12764 
   12765 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12766 #if 0	/* for the multicast packet */
   12767 	reg |= WUFC_MC;
   12768 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12769 #endif
   12770 
   12771 	if (sc->sc_type >= WM_T_PCH)
   12772 		wm_enable_phy_wakeup(sc);
   12773 	else {
   12774 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   12775 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12776 	}
   12777 
   12778 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12779 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12780 		|| (sc->sc_type == WM_T_PCH2))
   12781 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12782 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12783 
   12784 	/* Request PME */
   12785 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12786 #if 0
   12787 	/* Disable WOL */
   12788 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12789 #else
   12790 	/* For WOL */
   12791 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12792 #endif
   12793 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12794 }
   12795 
   12796 /* LPLU */
   12797 
   12798 static void
   12799 wm_lplu_d0_disable(struct wm_softc *sc)
   12800 {
   12801 	uint32_t reg;
   12802 
   12803 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12804 		device_xname(sc->sc_dev), __func__));
   12805 
   12806 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12807 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12808 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12809 }
   12810 
   12811 static void
   12812 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12813 {
   12814 	uint32_t reg;
   12815 
   12816 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12817 		device_xname(sc->sc_dev), __func__));
   12818 
   12819 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12820 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12821 	reg |= HV_OEM_BITS_ANEGNOW;
   12822 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12823 }
   12824 
   12825 /* EEE */
   12826 
   12827 static void
   12828 wm_set_eee_i350(struct wm_softc *sc)
   12829 {
   12830 	uint32_t ipcnfg, eeer;
   12831 
   12832 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12833 	eeer = CSR_READ(sc, WMREG_EEER);
   12834 
   12835 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12836 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12837 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12838 		    | EEER_LPI_FC);
   12839 	} else {
   12840 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12841 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12842 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12843 		    | EEER_LPI_FC);
   12844 	}
   12845 
   12846 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12847 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12848 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12849 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12850 }
   12851 
   12852 /*
   12853  * Workarounds (mainly PHY related).
   12854  * Basically, PHY's workarounds are in the PHY drivers.
   12855  */
   12856 
   12857 /* Work-around for 82566 Kumeran PCS lock loss */
   12858 static void
   12859 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12860 {
   12861 #if 0
   12862 	int miistatus, active, i;
   12863 	int reg;
   12864 
   12865 	miistatus = sc->sc_mii.mii_media_status;
   12866 
   12867 	/* If the link is not up, do nothing */
   12868 	if ((miistatus & IFM_ACTIVE) == 0)
   12869 		return;
   12870 
   12871 	active = sc->sc_mii.mii_media_active;
   12872 
   12873 	/* Nothing to do if the link is other than 1Gbps */
   12874 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12875 		return;
   12876 
   12877 	for (i = 0; i < 10; i++) {
   12878 		/* read twice */
   12879 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12880 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12881 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12882 			goto out;	/* GOOD! */
   12883 
   12884 		/* Reset the PHY */
   12885 		wm_gmii_reset(sc);
   12886 		delay(5*1000);
   12887 	}
   12888 
   12889 	/* Disable GigE link negotiation */
   12890 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12891 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12892 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12893 
   12894 	/*
   12895 	 * Call gig speed drop workaround on Gig disable before accessing
   12896 	 * any PHY registers.
   12897 	 */
   12898 	wm_gig_downshift_workaround_ich8lan(sc);
   12899 
   12900 out:
   12901 	return;
   12902 #endif
   12903 }
   12904 
   12905 /* WOL from S5 stops working */
   12906 static void
   12907 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12908 {
   12909 	uint16_t kmrn_reg;
   12910 
   12911 	/* Only for igp3 */
   12912 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12913 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12914 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12915 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12916 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12917 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12918 	}
   12919 }
   12920 
   12921 /*
   12922  * Workaround for pch's PHYs
   12923  * XXX should be moved to new PHY driver?
   12924  */
   12925 static void
   12926 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12927 {
   12928 
   12929 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12930 		device_xname(sc->sc_dev), __func__));
   12931 	KASSERT(sc->sc_type == WM_T_PCH);
   12932 
   12933 	if (sc->sc_phytype == WMPHY_82577)
   12934 		wm_set_mdio_slow_mode_hv(sc);
   12935 
   12936 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12937 
   12938 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12939 
   12940 	/* 82578 */
   12941 	if (sc->sc_phytype == WMPHY_82578) {
   12942 		struct mii_softc *child;
   12943 
   12944 		/*
   12945 		 * Return registers to default by doing a soft reset then
   12946 		 * writing 0x3140 to the control register
   12947 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   12948 		 */
   12949 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12950 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   12951 			PHY_RESET(child);
   12952 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   12953 			    0x3140);
   12954 		}
   12955 	}
   12956 
   12957 	/* Select page 0 */
   12958 	sc->phy.acquire(sc);
   12959 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12960 	sc->phy.release(sc);
   12961 
   12962 	/*
   12963 	 * Configure the K1 Si workaround during phy reset assuming there is
   12964 	 * link so that it disables K1 if link is in 1Gbps.
   12965 	 */
   12966 	wm_k1_gig_workaround_hv(sc, 1);
   12967 }
   12968 
   12969 static void
   12970 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12971 {
   12972 
   12973 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12974 		device_xname(sc->sc_dev), __func__));
   12975 	KASSERT(sc->sc_type == WM_T_PCH2);
   12976 
   12977 	wm_set_mdio_slow_mode_hv(sc);
   12978 }
   12979 
   12980 static int
   12981 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12982 {
   12983 	int k1_enable = sc->sc_nvm_k1_enabled;
   12984 
   12985 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12986 		device_xname(sc->sc_dev), __func__));
   12987 
   12988 	if (sc->phy.acquire(sc) != 0)
   12989 		return -1;
   12990 
   12991 	if (link) {
   12992 		k1_enable = 0;
   12993 
   12994 		/* Link stall fix for link up */
   12995 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12996 	} else {
   12997 		/* Link stall fix for link down */
   12998 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12999 	}
   13000 
   13001 	wm_configure_k1_ich8lan(sc, k1_enable);
   13002 	sc->phy.release(sc);
   13003 
   13004 	return 0;
   13005 }
   13006 
   13007 static void
   13008 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13009 {
   13010 	uint32_t reg;
   13011 
   13012 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13013 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13014 	    reg | HV_KMRN_MDIO_SLOW);
   13015 }
   13016 
   13017 static void
   13018 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13019 {
   13020 	uint32_t ctrl, ctrl_ext, tmp;
   13021 	uint16_t kmrn_reg;
   13022 
   13023 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13024 
   13025 	if (k1_enable)
   13026 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13027 	else
   13028 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13029 
   13030 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13031 
   13032 	delay(20);
   13033 
   13034 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13035 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13036 
   13037 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13038 	tmp |= CTRL_FRCSPD;
   13039 
   13040 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13041 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13042 	CSR_WRITE_FLUSH(sc);
   13043 	delay(20);
   13044 
   13045 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13046 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13047 	CSR_WRITE_FLUSH(sc);
   13048 	delay(20);
   13049 }
   13050 
   13051 /* special case - for 82575 - need to do manual init ... */
   13052 static void
   13053 wm_reset_init_script_82575(struct wm_softc *sc)
   13054 {
   13055 	/*
   13056 	 * remark: this is untested code - we have no board without EEPROM
   13057 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13058 	 */
   13059 
   13060 	/* SerDes configuration via SERDESCTRL */
   13061 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13062 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13063 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13064 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13065 
   13066 	/* CCM configuration via CCMCTL register */
   13067 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13068 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13069 
   13070 	/* PCIe lanes configuration */
   13071 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13072 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13073 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13074 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13075 
   13076 	/* PCIe PLL Configuration */
   13077 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13078 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13079 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13080 }
   13081 
   13082 static void
   13083 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13084 {
   13085 	uint32_t reg;
   13086 	uint16_t nvmword;
   13087 	int rv;
   13088 
   13089 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13090 		return;
   13091 
   13092 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13093 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13094 	if (rv != 0) {
   13095 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13096 		    __func__);
   13097 		return;
   13098 	}
   13099 
   13100 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13101 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13102 		reg |= MDICNFG_DEST;
   13103 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13104 		reg |= MDICNFG_COM_MDIO;
   13105 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13106 }
   13107 
   13108 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13109 
   13110 static bool
   13111 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13112 {
   13113 	int i;
   13114 	uint32_t reg;
   13115 	uint16_t id1, id2;
   13116 
   13117 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13118 		device_xname(sc->sc_dev), __func__));
   13119 	id1 = id2 = 0xffff;
   13120 	for (i = 0; i < 2; i++) {
   13121 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13122 		if (MII_INVALIDID(id1))
   13123 			continue;
   13124 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13125 		if (MII_INVALIDID(id2))
   13126 			continue;
   13127 		break;
   13128 	}
   13129 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13130 		goto out;
   13131 	}
   13132 
   13133 	if (sc->sc_type < WM_T_PCH_LPT) {
   13134 		sc->phy.release(sc);
   13135 		wm_set_mdio_slow_mode_hv(sc);
   13136 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13137 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13138 		sc->phy.acquire(sc);
   13139 	}
   13140 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13141 		printf("XXX return with false\n");
   13142 		return false;
   13143 	}
   13144 out:
   13145 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13146 		/* Only unforce SMBus if ME is not active */
   13147 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13148 			/* Unforce SMBus mode in PHY */
   13149 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13150 			    CV_SMB_CTRL);
   13151 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13152 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13153 			    CV_SMB_CTRL, reg);
   13154 
   13155 			/* Unforce SMBus mode in MAC */
   13156 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13157 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13158 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13159 		}
   13160 	}
   13161 	return true;
   13162 }
   13163 
   13164 static void
   13165 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13166 {
   13167 	uint32_t reg;
   13168 	int i;
   13169 
   13170 	/* Set PHY Config Counter to 50msec */
   13171 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13172 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13173 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13174 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13175 
   13176 	/* Toggle LANPHYPC */
   13177 	reg = CSR_READ(sc, WMREG_CTRL);
   13178 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13179 	reg &= ~CTRL_LANPHYPC_VALUE;
   13180 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13181 	CSR_WRITE_FLUSH(sc);
   13182 	delay(1000);
   13183 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13184 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13185 	CSR_WRITE_FLUSH(sc);
   13186 
   13187 	if (sc->sc_type < WM_T_PCH_LPT)
   13188 		delay(50 * 1000);
   13189 	else {
   13190 		i = 20;
   13191 
   13192 		do {
   13193 			delay(5 * 1000);
   13194 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13195 		    && i--);
   13196 
   13197 		delay(30 * 1000);
   13198 	}
   13199 }
   13200 
   13201 static int
   13202 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13203 {
   13204 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13205 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13206 	uint32_t rxa;
   13207 	uint16_t scale = 0, lat_enc = 0;
   13208 	int64_t lat_ns, value;
   13209 
   13210 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13211 		device_xname(sc->sc_dev), __func__));
   13212 
   13213 	if (link) {
   13214 		pcireg_t preg;
   13215 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13216 
   13217 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13218 
   13219 		/*
   13220 		 * Determine the maximum latency tolerated by the device.
   13221 		 *
   13222 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13223 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13224 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13225 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13226 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13227 		 */
   13228 		lat_ns = ((int64_t)rxa * 1024 -
   13229 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13230 		if (lat_ns < 0)
   13231 			lat_ns = 0;
   13232 		else {
   13233 			uint32_t status;
   13234 			uint16_t speed;
   13235 
   13236 			status = CSR_READ(sc, WMREG_STATUS);
   13237 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13238 			case STATUS_SPEED_10:
   13239 				speed = 10;
   13240 				break;
   13241 			case STATUS_SPEED_100:
   13242 				speed = 100;
   13243 				break;
   13244 			case STATUS_SPEED_1000:
   13245 				speed = 1000;
   13246 				break;
   13247 			default:
   13248 				printf("%s: Unknown speed (status = %08x)\n",
   13249 				    device_xname(sc->sc_dev), status);
   13250 				return -1;
   13251 			}
   13252 			lat_ns /= speed;
   13253 		}
   13254 		value = lat_ns;
   13255 
   13256 		while (value > LTRV_VALUE) {
   13257 			scale ++;
   13258 			value = howmany(value, __BIT(5));
   13259 		}
   13260 		if (scale > LTRV_SCALE_MAX) {
   13261 			printf("%s: Invalid LTR latency scale %d\n",
   13262 			    device_xname(sc->sc_dev), scale);
   13263 			return -1;
   13264 		}
   13265 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13266 
   13267 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13268 		    WM_PCI_LTR_CAP_LPT);
   13269 		max_snoop = preg & 0xffff;
   13270 		max_nosnoop = preg >> 16;
   13271 
   13272 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13273 
   13274 		if (lat_enc > max_ltr_enc) {
   13275 			lat_enc = max_ltr_enc;
   13276 		}
   13277 	}
   13278 	/* Snoop and No-Snoop latencies the same */
   13279 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13280 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13281 
   13282 	return 0;
   13283 }
   13284 
   13285 /*
   13286  * I210 Errata 25 and I211 Errata 10
   13287  * Slow System Clock.
   13288  */
   13289 static void
   13290 wm_pll_workaround_i210(struct wm_softc *sc)
   13291 {
   13292 	uint32_t mdicnfg, wuc;
   13293 	uint32_t reg;
   13294 	pcireg_t pcireg;
   13295 	uint32_t pmreg;
   13296 	uint16_t nvmword, tmp_nvmword;
   13297 	int phyval;
   13298 	bool wa_done = false;
   13299 	int i;
   13300 
   13301 	/* Save WUC and MDICNFG registers */
   13302 	wuc = CSR_READ(sc, WMREG_WUC);
   13303 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13304 
   13305 	reg = mdicnfg & ~MDICNFG_DEST;
   13306 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13307 
   13308 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13309 		nvmword = INVM_DEFAULT_AL;
   13310 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13311 
   13312 	/* Get Power Management cap offset */
   13313 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13314 		&pmreg, NULL) == 0)
   13315 		return;
   13316 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13317 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13318 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13319 
   13320 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13321 			break; /* OK */
   13322 		}
   13323 
   13324 		wa_done = true;
   13325 		/* Directly reset the internal PHY */
   13326 		reg = CSR_READ(sc, WMREG_CTRL);
   13327 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13328 
   13329 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13330 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13331 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13332 
   13333 		CSR_WRITE(sc, WMREG_WUC, 0);
   13334 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13335 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13336 
   13337 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13338 		    pmreg + PCI_PMCSR);
   13339 		pcireg |= PCI_PMCSR_STATE_D3;
   13340 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13341 		    pmreg + PCI_PMCSR, pcireg);
   13342 		delay(1000);
   13343 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13344 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13345 		    pmreg + PCI_PMCSR, pcireg);
   13346 
   13347 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13348 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13349 
   13350 		/* Restore WUC register */
   13351 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13352 	}
   13353 
   13354 	/* Restore MDICNFG setting */
   13355 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13356 	if (wa_done)
   13357 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13358 }
   13359