Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.563
      1 /*	$NetBSD: if_wm.c,v 1.563 2018/02/14 12:56:00 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.563 2018/02/14 12:56:00 knakahara Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 #ifndef WM_WATCHDOG_TIMEOUT
    187 #define WM_WATCHDOG_TIMEOUT 5
    188 #endif
    189 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    190 
    191 /*
    192  * Transmit descriptor list size.  Due to errata, we can only have
    193  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    194  * on >= 82544.  We tell the upper layers that they can queue a lot
    195  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    196  * of them at a time.
    197  *
    198  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    199  * chains containing many small mbufs have been observed in zero-copy
    200  * situations with jumbo frames.
    201  */
    202 #define	WM_NTXSEGS		256
    203 #define	WM_IFQUEUELEN		256
    204 #define	WM_TXQUEUELEN_MAX	64
    205 #define	WM_TXQUEUELEN_MAX_82547	16
    206 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    207 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    208 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    209 #define	WM_NTXDESC_82542	256
    210 #define	WM_NTXDESC_82544	4096
    211 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    212 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    213 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    214 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    215 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    216 
    217 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    218 
    219 #define	WM_TXINTERQSIZE		256
    220 
    221 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    222 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    223 #endif
    224 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    226 #endif
    227 
    228 /*
    229  * Receive descriptor list size.  We have one Rx buffer for normal
    230  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    231  * packet.  We allocate 256 receive descriptors, each with a 2k
    232  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    233  */
    234 #define	WM_NRXDESC		256
    235 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    236 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    237 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    238 
    239 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    240 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    241 #endif
    242 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    244 #endif
    245 
    246 typedef union txdescs {
    247 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    248 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    249 } txdescs_t;
    250 
    251 typedef union rxdescs {
    252 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    253 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    254 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    255 } rxdescs_t;
    256 
    257 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    258 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    259 
    260 /*
    261  * Software state for transmit jobs.
    262  */
    263 struct wm_txsoft {
    264 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    265 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    266 	int txs_firstdesc;		/* first descriptor in packet */
    267 	int txs_lastdesc;		/* last descriptor in packet */
    268 	int txs_ndesc;			/* # of descriptors used */
    269 };
    270 
    271 /*
    272  * Software state for receive buffers.  Each descriptor gets a
    273  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    274  * more than one buffer, we chain them together.
    275  */
    276 struct wm_rxsoft {
    277 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    278 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    279 };
    280 
    281 #define WM_LINKUP_TIMEOUT	50
    282 
    283 static uint16_t swfwphysem[] = {
    284 	SWFW_PHY0_SM,
    285 	SWFW_PHY1_SM,
    286 	SWFW_PHY2_SM,
    287 	SWFW_PHY3_SM
    288 };
    289 
    290 static const uint32_t wm_82580_rxpbs_table[] = {
    291 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    292 };
    293 
    294 struct wm_softc;
    295 
    296 #ifdef WM_EVENT_COUNTERS
    297 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    298 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    299 	struct evcnt qname##_ev_##evname;
    300 
    301 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    302 	do{								\
    303 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    304 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    305 		    "%s%02d%s", #qname, (qnum), #evname);		\
    306 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    307 		    (evtype), NULL, (xname),				\
    308 		    (q)->qname##_##evname##_evcnt_name);		\
    309 	}while(0)
    310 
    311 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    312 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    313 
    314 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    316 
    317 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    318 	evcnt_detach(&(q)->qname##_ev_##evname);
    319 #endif /* WM_EVENT_COUNTERS */
    320 
    321 struct wm_txqueue {
    322 	kmutex_t *txq_lock;		/* lock for tx operations */
    323 
    324 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    325 
    326 	/* Software state for the transmit descriptors. */
    327 	int txq_num;			/* must be a power of two */
    328 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    329 
    330 	/* TX control data structures. */
    331 	int txq_ndesc;			/* must be a power of two */
    332 	size_t txq_descsize;		/* a tx descriptor size */
    333 	txdescs_t *txq_descs_u;
    334         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    335 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    336 	int txq_desc_rseg;		/* real number of control segment */
    337 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    338 #define	txq_descs	txq_descs_u->sctxu_txdescs
    339 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    340 
    341 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    342 
    343 	int txq_free;			/* number of free Tx descriptors */
    344 	int txq_next;			/* next ready Tx descriptor */
    345 
    346 	int txq_sfree;			/* number of free Tx jobs */
    347 	int txq_snext;			/* next free Tx job */
    348 	int txq_sdirty;			/* dirty Tx jobs */
    349 
    350 	/* These 4 variables are used only on the 82547. */
    351 	int txq_fifo_size;		/* Tx FIFO size */
    352 	int txq_fifo_head;		/* current head of FIFO */
    353 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    354 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    355 
    356 	/*
    357 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    358 	 * CPUs. This queue intermediate them without block.
    359 	 */
    360 	pcq_t *txq_interq;
    361 
    362 	/*
    363 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    364 	 * to manage Tx H/W queue's busy flag.
    365 	 */
    366 	int txq_flags;			/* flags for H/W queue, see below */
    367 #define	WM_TXQ_NO_SPACE	0x1
    368 
    369 	bool txq_stopping;
    370 
    371 	bool txq_watchdog;
    372 	time_t txq_lastsent;
    373 
    374 	uint32_t txq_packets;		/* for AIM */
    375 	uint32_t txq_bytes;		/* for AIM */
    376 #ifdef WM_EVENT_COUNTERS
    377 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    378 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    379 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    380 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    381 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    382 						/* XXX not used? */
    383 
    384 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    385 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    386 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    387 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    388 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    389 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    390 
    391 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    392 
    393 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    394 
    395 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    396 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    397 #endif /* WM_EVENT_COUNTERS */
    398 };
    399 
    400 struct wm_rxqueue {
    401 	kmutex_t *rxq_lock;		/* lock for rx operations */
    402 
    403 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    404 
    405 	/* Software state for the receive descriptors. */
    406 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    407 
    408 	/* RX control data structures. */
    409 	int rxq_ndesc;			/* must be a power of two */
    410 	size_t rxq_descsize;		/* a rx descriptor size */
    411 	rxdescs_t *rxq_descs_u;
    412 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    413 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    414 	int rxq_desc_rseg;		/* real number of control segment */
    415 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    416 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    417 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    418 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    419 
    420 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    421 
    422 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    423 	int rxq_discard;
    424 	int rxq_len;
    425 	struct mbuf *rxq_head;
    426 	struct mbuf *rxq_tail;
    427 	struct mbuf **rxq_tailp;
    428 
    429 	bool rxq_stopping;
    430 
    431 	uint32_t rxq_packets;		/* for AIM */
    432 	uint32_t rxq_bytes;		/* for AIM */
    433 #ifdef WM_EVENT_COUNTERS
    434 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    435 	WM_Q_EVCNT_DEFINE(rxq, rxdefer);	/* Rx deferred processing */
    436 
    437 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    438 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    439 #endif
    440 };
    441 
    442 struct wm_queue {
    443 	int wmq_id;			/* index of transmit and receive queues */
    444 	int wmq_intr_idx;		/* index of MSI-X tables */
    445 
    446 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    447 	bool wmq_set_itr;
    448 
    449 	struct wm_txqueue wmq_txq;
    450 	struct wm_rxqueue wmq_rxq;
    451 
    452 	void *wmq_si;
    453 };
    454 
    455 struct wm_phyop {
    456 	int (*acquire)(struct wm_softc *);
    457 	void (*release)(struct wm_softc *);
    458 	int reset_delay_us;
    459 };
    460 
    461 struct wm_nvmop {
    462 	int (*acquire)(struct wm_softc *);
    463 	void (*release)(struct wm_softc *);
    464 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    465 };
    466 
    467 /*
    468  * Software state per device.
    469  */
    470 struct wm_softc {
    471 	device_t sc_dev;		/* generic device information */
    472 	bus_space_tag_t sc_st;		/* bus space tag */
    473 	bus_space_handle_t sc_sh;	/* bus space handle */
    474 	bus_size_t sc_ss;		/* bus space size */
    475 	bus_space_tag_t sc_iot;		/* I/O space tag */
    476 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    477 	bus_size_t sc_ios;		/* I/O space size */
    478 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    479 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    480 	bus_size_t sc_flashs;		/* flash registers space size */
    481 	off_t sc_flashreg_offset;	/*
    482 					 * offset to flash registers from
    483 					 * start of BAR
    484 					 */
    485 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    486 
    487 	struct ethercom sc_ethercom;	/* ethernet common data */
    488 	struct mii_data sc_mii;		/* MII/media information */
    489 
    490 	pci_chipset_tag_t sc_pc;
    491 	pcitag_t sc_pcitag;
    492 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    493 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    494 
    495 	uint16_t sc_pcidevid;		/* PCI device ID */
    496 	wm_chip_type sc_type;		/* MAC type */
    497 	int sc_rev;			/* MAC revision */
    498 	wm_phy_type sc_phytype;		/* PHY type */
    499 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    500 #define	WM_MEDIATYPE_UNKNOWN		0x00
    501 #define	WM_MEDIATYPE_FIBER		0x01
    502 #define	WM_MEDIATYPE_COPPER		0x02
    503 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    504 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    505 	int sc_flags;			/* flags; see below */
    506 	int sc_if_flags;		/* last if_flags */
    507 	int sc_flowflags;		/* 802.3x flow control flags */
    508 	int sc_align_tweak;
    509 
    510 	void *sc_ihs[WM_MAX_NINTR];	/*
    511 					 * interrupt cookie.
    512 					 * - legacy and msi use sc_ihs[0] only
    513 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    514 					 */
    515 	pci_intr_handle_t *sc_intrs;	/*
    516 					 * legacy and msi use sc_intrs[0] only
    517 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    518 					 */
    519 	int sc_nintrs;			/* number of interrupts */
    520 
    521 	int sc_link_intr_idx;		/* index of MSI-X tables */
    522 
    523 	callout_t sc_tick_ch;		/* tick callout */
    524 	bool sc_core_stopping;
    525 
    526 	int sc_nvm_ver_major;
    527 	int sc_nvm_ver_minor;
    528 	int sc_nvm_ver_build;
    529 	int sc_nvm_addrbits;		/* NVM address bits */
    530 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    531 	int sc_ich8_flash_base;
    532 	int sc_ich8_flash_bank_size;
    533 	int sc_nvm_k1_enabled;
    534 
    535 	int sc_nqueues;
    536 	struct wm_queue *sc_queue;
    537 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    538 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    539 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    540 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    541 
    542 	int sc_affinity_offset;
    543 
    544 #ifdef WM_EVENT_COUNTERS
    545 	/* Event counters. */
    546 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    547 
    548         /* WM_T_82542_2_1 only */
    549 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    550 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    551 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    552 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    553 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    554 #endif /* WM_EVENT_COUNTERS */
    555 
    556 	/* This variable are used only on the 82547. */
    557 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    558 
    559 	uint32_t sc_ctrl;		/* prototype CTRL register */
    560 #if 0
    561 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    562 #endif
    563 	uint32_t sc_icr;		/* prototype interrupt bits */
    564 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    565 	uint32_t sc_tctl;		/* prototype TCTL register */
    566 	uint32_t sc_rctl;		/* prototype RCTL register */
    567 	uint32_t sc_txcw;		/* prototype TXCW register */
    568 	uint32_t sc_tipg;		/* prototype TIPG register */
    569 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    570 	uint32_t sc_pba;		/* prototype PBA register */
    571 
    572 	int sc_tbi_linkup;		/* TBI link status */
    573 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    574 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    575 
    576 	int sc_mchash_type;		/* multicast filter offset */
    577 
    578 	krndsource_t rnd_source;	/* random source */
    579 
    580 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    581 
    582 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    583 	kmutex_t *sc_ich_phymtx;	/*
    584 					 * 82574/82583/ICH/PCH specific PHY
    585 					 * mutex. For 82574/82583, the mutex
    586 					 * is used for both PHY and NVM.
    587 					 */
    588 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    589 
    590 	struct wm_phyop phy;
    591 	struct wm_nvmop nvm;
    592 };
    593 
    594 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    595 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    596 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    597 
    598 #define	WM_RXCHAIN_RESET(rxq)						\
    599 do {									\
    600 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    601 	*(rxq)->rxq_tailp = NULL;					\
    602 	(rxq)->rxq_len = 0;						\
    603 } while (/*CONSTCOND*/0)
    604 
    605 #define	WM_RXCHAIN_LINK(rxq, m)						\
    606 do {									\
    607 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    608 	(rxq)->rxq_tailp = &(m)->m_next;				\
    609 } while (/*CONSTCOND*/0)
    610 
    611 #ifdef WM_EVENT_COUNTERS
    612 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    613 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    614 
    615 #define WM_Q_EVCNT_INCR(qname, evname)			\
    616 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    617 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    618 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    619 #else /* !WM_EVENT_COUNTERS */
    620 #define	WM_EVCNT_INCR(ev)	/* nothing */
    621 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    622 
    623 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    624 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    625 #endif /* !WM_EVENT_COUNTERS */
    626 
    627 #define	CSR_READ(sc, reg)						\
    628 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    629 #define	CSR_WRITE(sc, reg, val)						\
    630 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    631 #define	CSR_WRITE_FLUSH(sc)						\
    632 	(void) CSR_READ((sc), WMREG_STATUS)
    633 
    634 #define ICH8_FLASH_READ32(sc, reg)					\
    635 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    636 	    (reg) + sc->sc_flashreg_offset)
    637 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    638 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    639 	    (reg) + sc->sc_flashreg_offset, (data))
    640 
    641 #define ICH8_FLASH_READ16(sc, reg)					\
    642 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    643 	    (reg) + sc->sc_flashreg_offset)
    644 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    645 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    646 	    (reg) + sc->sc_flashreg_offset, (data))
    647 
    648 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    649 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    650 
    651 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    652 #define	WM_CDTXADDR_HI(txq, x)						\
    653 	(sizeof(bus_addr_t) == 8 ?					\
    654 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    655 
    656 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    657 #define	WM_CDRXADDR_HI(rxq, x)						\
    658 	(sizeof(bus_addr_t) == 8 ?					\
    659 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    660 
    661 /*
    662  * Register read/write functions.
    663  * Other than CSR_{READ|WRITE}().
    664  */
    665 #if 0
    666 static inline uint32_t wm_io_read(struct wm_softc *, int);
    667 #endif
    668 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    669 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    670 	uint32_t, uint32_t);
    671 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    672 
    673 /*
    674  * Descriptor sync/init functions.
    675  */
    676 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    677 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    678 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    679 
    680 /*
    681  * Device driver interface functions and commonly used functions.
    682  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    683  */
    684 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    685 static int	wm_match(device_t, cfdata_t, void *);
    686 static void	wm_attach(device_t, device_t, void *);
    687 static int	wm_detach(device_t, int);
    688 static bool	wm_suspend(device_t, const pmf_qual_t *);
    689 static bool	wm_resume(device_t, const pmf_qual_t *);
    690 static void	wm_watchdog(struct ifnet *);
    691 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *, uint16_t *);
    692 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *, uint16_t *);
    693 static void	wm_tick(void *);
    694 static int	wm_ifflags_cb(struct ethercom *);
    695 static int	wm_ioctl(struct ifnet *, u_long, void *);
    696 /* MAC address related */
    697 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    698 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    699 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    700 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    701 static void	wm_set_filter(struct wm_softc *);
    702 /* Reset and init related */
    703 static void	wm_set_vlan(struct wm_softc *);
    704 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    705 static void	wm_get_auto_rd_done(struct wm_softc *);
    706 static void	wm_lan_init_done(struct wm_softc *);
    707 static void	wm_get_cfg_done(struct wm_softc *);
    708 static void	wm_phy_post_reset(struct wm_softc *);
    709 static void	wm_write_smbus_addr(struct wm_softc *);
    710 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    711 static void	wm_initialize_hardware_bits(struct wm_softc *);
    712 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    713 static void	wm_reset_phy(struct wm_softc *);
    714 static void	wm_flush_desc_rings(struct wm_softc *);
    715 static void	wm_reset(struct wm_softc *);
    716 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    717 static void	wm_rxdrain(struct wm_rxqueue *);
    718 static void	wm_rss_getkey(uint8_t *);
    719 static void	wm_init_rss(struct wm_softc *);
    720 static void	wm_adjust_qnum(struct wm_softc *, int);
    721 static inline bool	wm_is_using_msix(struct wm_softc *);
    722 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    723 static int	wm_softint_establish(struct wm_softc *, int, int);
    724 static int	wm_setup_legacy(struct wm_softc *);
    725 static int	wm_setup_msix(struct wm_softc *);
    726 static int	wm_init(struct ifnet *);
    727 static int	wm_init_locked(struct ifnet *);
    728 static void	wm_unset_stopping_flags(struct wm_softc *);
    729 static void	wm_set_stopping_flags(struct wm_softc *);
    730 static void	wm_stop(struct ifnet *, int);
    731 static void	wm_stop_locked(struct ifnet *, int);
    732 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    733 static void	wm_82547_txfifo_stall(void *);
    734 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    735 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    736 /* DMA related */
    737 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    738 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    739 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    740 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    741     struct wm_txqueue *);
    742 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    743 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    744 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    745     struct wm_rxqueue *);
    746 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    747 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    748 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    749 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    750 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    751 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    752 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    753     struct wm_txqueue *);
    754 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    755     struct wm_rxqueue *);
    756 static int	wm_alloc_txrx_queues(struct wm_softc *);
    757 static void	wm_free_txrx_queues(struct wm_softc *);
    758 static int	wm_init_txrx_queues(struct wm_softc *);
    759 /* Start */
    760 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    761     struct wm_txsoft *, uint32_t *, uint8_t *);
    762 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    763 static void	wm_start(struct ifnet *);
    764 static void	wm_start_locked(struct ifnet *);
    765 static int	wm_transmit(struct ifnet *, struct mbuf *);
    766 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    767 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    768 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    769     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    770 static void	wm_nq_start(struct ifnet *);
    771 static void	wm_nq_start_locked(struct ifnet *);
    772 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    773 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    774 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    775 static void	wm_deferred_start_locked(struct wm_txqueue *);
    776 static void	wm_handle_queue(void *);
    777 /* Interrupt */
    778 static bool	wm_txeof(struct wm_txqueue *, u_int);
    779 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    780 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    781 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    782 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    783 static void	wm_linkintr(struct wm_softc *, uint32_t);
    784 static int	wm_intr_legacy(void *);
    785 static inline void	wm_txrxintr_disable(struct wm_queue *);
    786 static inline void	wm_txrxintr_enable(struct wm_queue *);
    787 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    788 static int	wm_txrxintr_msix(void *);
    789 static int	wm_linkintr_msix(void *);
    790 
    791 /*
    792  * Media related.
    793  * GMII, SGMII, TBI, SERDES and SFP.
    794  */
    795 /* Common */
    796 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    797 /* GMII related */
    798 static void	wm_gmii_reset(struct wm_softc *);
    799 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    800 static int	wm_get_phy_id_82575(struct wm_softc *);
    801 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    802 static int	wm_gmii_mediachange(struct ifnet *);
    803 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    804 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    805 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    806 static int	wm_gmii_i82543_readreg(device_t, int, int);
    807 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    808 static int	wm_gmii_mdic_readreg(device_t, int, int);
    809 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    810 static int	wm_gmii_i82544_readreg(device_t, int, int);
    811 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    812 static int	wm_gmii_i80003_readreg(device_t, int, int);
    813 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    814 static int	wm_gmii_bm_readreg(device_t, int, int);
    815 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    816 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    817 static int	wm_gmii_hv_readreg(device_t, int, int);
    818 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    819 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    820 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    821 static int	wm_gmii_82580_readreg(device_t, int, int);
    822 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    823 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    824 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    825 static void	wm_gmii_statchg(struct ifnet *);
    826 /*
    827  * kumeran related (80003, ICH* and PCH*).
    828  * These functions are not for accessing MII registers but for accessing
    829  * kumeran specific registers.
    830  */
    831 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    832 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    833 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    834 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    835 /* SGMII */
    836 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    837 static int	wm_sgmii_readreg(device_t, int, int);
    838 static void	wm_sgmii_writereg(device_t, int, int, int);
    839 /* TBI related */
    840 static void	wm_tbi_mediainit(struct wm_softc *);
    841 static int	wm_tbi_mediachange(struct ifnet *);
    842 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    843 static int	wm_check_for_link(struct wm_softc *);
    844 static void	wm_tbi_tick(struct wm_softc *);
    845 /* SERDES related */
    846 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    847 static int	wm_serdes_mediachange(struct ifnet *);
    848 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    849 static void	wm_serdes_tick(struct wm_softc *);
    850 /* SFP related */
    851 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    852 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    853 
    854 /*
    855  * NVM related.
    856  * Microwire, SPI (w/wo EERD) and Flash.
    857  */
    858 /* Misc functions */
    859 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    860 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    861 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    862 /* Microwire */
    863 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    864 /* SPI */
    865 static int	wm_nvm_ready_spi(struct wm_softc *);
    866 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    867 /* Using with EERD */
    868 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    869 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    870 /* Flash */
    871 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    872     unsigned int *);
    873 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    874 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    875 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    876 	uint32_t *);
    877 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    878 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    879 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    880 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    881 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    882 /* iNVM */
    883 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    884 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    885 /* Lock, detecting NVM type, validate checksum and read */
    886 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    887 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    888 static int	wm_nvm_validate_checksum(struct wm_softc *);
    889 static void	wm_nvm_version_invm(struct wm_softc *);
    890 static void	wm_nvm_version(struct wm_softc *);
    891 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    892 
    893 /*
    894  * Hardware semaphores.
    895  * Very complexed...
    896  */
    897 static int	wm_get_null(struct wm_softc *);
    898 static void	wm_put_null(struct wm_softc *);
    899 static int	wm_get_eecd(struct wm_softc *);
    900 static void	wm_put_eecd(struct wm_softc *);
    901 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    902 static void	wm_put_swsm_semaphore(struct wm_softc *);
    903 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    904 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    905 static int	wm_get_nvm_80003(struct wm_softc *);
    906 static void	wm_put_nvm_80003(struct wm_softc *);
    907 static int	wm_get_nvm_82571(struct wm_softc *);
    908 static void	wm_put_nvm_82571(struct wm_softc *);
    909 static int	wm_get_phy_82575(struct wm_softc *);
    910 static void	wm_put_phy_82575(struct wm_softc *);
    911 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    912 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    913 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    914 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    915 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    916 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    917 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    918 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    919 
    920 /*
    921  * Management mode and power management related subroutines.
    922  * BMC, AMT, suspend/resume and EEE.
    923  */
    924 #if 0
    925 static int	wm_check_mng_mode(struct wm_softc *);
    926 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    927 static int	wm_check_mng_mode_82574(struct wm_softc *);
    928 static int	wm_check_mng_mode_generic(struct wm_softc *);
    929 #endif
    930 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    931 static bool	wm_phy_resetisblocked(struct wm_softc *);
    932 static void	wm_get_hw_control(struct wm_softc *);
    933 static void	wm_release_hw_control(struct wm_softc *);
    934 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    935 static void	wm_smbustopci(struct wm_softc *);
    936 static void	wm_init_manageability(struct wm_softc *);
    937 static void	wm_release_manageability(struct wm_softc *);
    938 static void	wm_get_wakeup(struct wm_softc *);
    939 static void	wm_ulp_disable(struct wm_softc *);
    940 static void	wm_enable_phy_wakeup(struct wm_softc *);
    941 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    942 static void	wm_enable_wakeup(struct wm_softc *);
    943 static void	wm_disable_aspm(struct wm_softc *);
    944 /* LPLU (Low Power Link Up) */
    945 static void	wm_lplu_d0_disable(struct wm_softc *);
    946 /* EEE */
    947 static void	wm_set_eee_i350(struct wm_softc *);
    948 
    949 /*
    950  * Workarounds (mainly PHY related).
    951  * Basically, PHY's workarounds are in the PHY drivers.
    952  */
    953 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    954 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    955 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    956 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    957 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    958 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    959 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    960 static void	wm_reset_init_script_82575(struct wm_softc *);
    961 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    962 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    963 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    964 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    965 static void	wm_pll_workaround_i210(struct wm_softc *);
    966 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    967 
    968 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    969     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    970 
    971 /*
    972  * Devices supported by this driver.
    973  */
    974 static const struct wm_product {
    975 	pci_vendor_id_t		wmp_vendor;
    976 	pci_product_id_t	wmp_product;
    977 	const char		*wmp_name;
    978 	wm_chip_type		wmp_type;
    979 	uint32_t		wmp_flags;
    980 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    981 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    982 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    983 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    984 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    985 } wm_products[] = {
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    987 	  "Intel i82542 1000BASE-X Ethernet",
    988 	  WM_T_82542_2_1,	WMP_F_FIBER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    991 	  "Intel i82543GC 1000BASE-X Ethernet",
    992 	  WM_T_82543,		WMP_F_FIBER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    995 	  "Intel i82543GC 1000BASE-T Ethernet",
    996 	  WM_T_82543,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    999 	  "Intel i82544EI 1000BASE-T Ethernet",
   1000 	  WM_T_82544,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1003 	  "Intel i82544EI 1000BASE-X Ethernet",
   1004 	  WM_T_82544,		WMP_F_FIBER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1007 	  "Intel i82544GC 1000BASE-T Ethernet",
   1008 	  WM_T_82544,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1011 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1012 	  WM_T_82544,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1015 	  "Intel i82540EM 1000BASE-T Ethernet",
   1016 	  WM_T_82540,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1019 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1020 	  WM_T_82540,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1023 	  "Intel i82540EP 1000BASE-T Ethernet",
   1024 	  WM_T_82540,		WMP_F_COPPER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1027 	  "Intel i82540EP 1000BASE-T Ethernet",
   1028 	  WM_T_82540,		WMP_F_COPPER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1031 	  "Intel i82540EP 1000BASE-T Ethernet",
   1032 	  WM_T_82540,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1035 	  "Intel i82545EM 1000BASE-T Ethernet",
   1036 	  WM_T_82545,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1039 	  "Intel i82545GM 1000BASE-T Ethernet",
   1040 	  WM_T_82545_3,		WMP_F_COPPER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1043 	  "Intel i82545GM 1000BASE-X Ethernet",
   1044 	  WM_T_82545_3,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1047 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1048 	  WM_T_82545_3,		WMP_F_SERDES },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1051 	  "Intel i82546EB 1000BASE-T Ethernet",
   1052 	  WM_T_82546,		WMP_F_COPPER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1055 	  "Intel i82546EB 1000BASE-T Ethernet",
   1056 	  WM_T_82546,		WMP_F_COPPER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1059 	  "Intel i82545EM 1000BASE-X Ethernet",
   1060 	  WM_T_82545,		WMP_F_FIBER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1063 	  "Intel i82546EB 1000BASE-X Ethernet",
   1064 	  WM_T_82546,		WMP_F_FIBER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1067 	  "Intel i82546GB 1000BASE-T Ethernet",
   1068 	  WM_T_82546_3,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1071 	  "Intel i82546GB 1000BASE-X Ethernet",
   1072 	  WM_T_82546_3,		WMP_F_FIBER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1075 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1076 	  WM_T_82546_3,		WMP_F_SERDES },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1079 	  "i82546GB quad-port Gigabit Ethernet",
   1080 	  WM_T_82546_3,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1083 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1084 	  WM_T_82546_3,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1087 	  "Intel PRO/1000MT (82546GB)",
   1088 	  WM_T_82546_3,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1091 	  "Intel i82541EI 1000BASE-T Ethernet",
   1092 	  WM_T_82541,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1095 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1096 	  WM_T_82541,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1099 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1100 	  WM_T_82541,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1103 	  "Intel i82541ER 1000BASE-T Ethernet",
   1104 	  WM_T_82541_2,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1107 	  "Intel i82541GI 1000BASE-T Ethernet",
   1108 	  WM_T_82541_2,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1111 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1112 	  WM_T_82541_2,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1115 	  "Intel i82541PI 1000BASE-T Ethernet",
   1116 	  WM_T_82541_2,		WMP_F_COPPER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1119 	  "Intel i82547EI 1000BASE-T Ethernet",
   1120 	  WM_T_82547,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1123 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1124 	  WM_T_82547,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1127 	  "Intel i82547GI 1000BASE-T Ethernet",
   1128 	  WM_T_82547_2,		WMP_F_COPPER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1131 	  "Intel PRO/1000 PT (82571EB)",
   1132 	  WM_T_82571,		WMP_F_COPPER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1135 	  "Intel PRO/1000 PF (82571EB)",
   1136 	  WM_T_82571,		WMP_F_FIBER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1139 	  "Intel PRO/1000 PB (82571EB)",
   1140 	  WM_T_82571,		WMP_F_SERDES },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1143 	  "Intel PRO/1000 QT (82571EB)",
   1144 	  WM_T_82571,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1147 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1148 	  WM_T_82571,		WMP_F_COPPER, },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1151 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1152 	  WM_T_82571,		WMP_F_COPPER, },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1155 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1156 	  WM_T_82571,		WMP_F_SERDES, },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1159 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1160 	  WM_T_82571,		WMP_F_SERDES, },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1163 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1164 	  WM_T_82571,		WMP_F_FIBER, },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1167 	  "Intel i82572EI 1000baseT Ethernet",
   1168 	  WM_T_82572,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1171 	  "Intel i82572EI 1000baseX Ethernet",
   1172 	  WM_T_82572,		WMP_F_FIBER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1175 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1176 	  WM_T_82572,		WMP_F_SERDES },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1179 	  "Intel i82572EI 1000baseT Ethernet",
   1180 	  WM_T_82572,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1183 	  "Intel i82573E",
   1184 	  WM_T_82573,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1187 	  "Intel i82573E IAMT",
   1188 	  WM_T_82573,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1191 	  "Intel i82573L Gigabit Ethernet",
   1192 	  WM_T_82573,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1195 	  "Intel i82574L",
   1196 	  WM_T_82574,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1199 	  "Intel i82574L",
   1200 	  WM_T_82574,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1203 	  "Intel i82583V",
   1204 	  WM_T_82583,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1207 	  "i80003 dual 1000baseT Ethernet",
   1208 	  WM_T_80003,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1211 	  "i80003 dual 1000baseX Ethernet",
   1212 	  WM_T_80003,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1215 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1216 	  WM_T_80003,		WMP_F_SERDES },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1219 	  "Intel i80003 1000baseT Ethernet",
   1220 	  WM_T_80003,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1223 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1224 	  WM_T_80003,		WMP_F_SERDES },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1227 	  "Intel i82801H (M_AMT) LAN Controller",
   1228 	  WM_T_ICH8,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1230 	  "Intel i82801H (AMT) LAN Controller",
   1231 	  WM_T_ICH8,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1233 	  "Intel i82801H LAN Controller",
   1234 	  WM_T_ICH8,		WMP_F_COPPER },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1236 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1237 	  WM_T_ICH8,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1239 	  "Intel i82801H (M) LAN Controller",
   1240 	  WM_T_ICH8,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1242 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1243 	  WM_T_ICH8,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1245 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1246 	  WM_T_ICH8,		WMP_F_COPPER },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1248 	  "82567V-3 LAN Controller",
   1249 	  WM_T_ICH8,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1251 	  "82801I (AMT) LAN Controller",
   1252 	  WM_T_ICH9,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1254 	  "82801I 10/100 LAN Controller",
   1255 	  WM_T_ICH9,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1257 	  "82801I (G) 10/100 LAN Controller",
   1258 	  WM_T_ICH9,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1260 	  "82801I (GT) 10/100 LAN Controller",
   1261 	  WM_T_ICH9,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1263 	  "82801I (C) LAN Controller",
   1264 	  WM_T_ICH9,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1266 	  "82801I mobile LAN Controller",
   1267 	  WM_T_ICH9,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1269 	  "82801I mobile (V) LAN Controller",
   1270 	  WM_T_ICH9,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1272 	  "82801I mobile (AMT) LAN Controller",
   1273 	  WM_T_ICH9,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1275 	  "82567LM-4 LAN Controller",
   1276 	  WM_T_ICH9,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1278 	  "82567LM-2 LAN Controller",
   1279 	  WM_T_ICH10,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1281 	  "82567LF-2 LAN Controller",
   1282 	  WM_T_ICH10,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1284 	  "82567LM-3 LAN Controller",
   1285 	  WM_T_ICH10,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1287 	  "82567LF-3 LAN Controller",
   1288 	  WM_T_ICH10,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1290 	  "82567V-2 LAN Controller",
   1291 	  WM_T_ICH10,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1293 	  "82567V-3? LAN Controller",
   1294 	  WM_T_ICH10,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1296 	  "HANKSVILLE LAN Controller",
   1297 	  WM_T_ICH10,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1299 	  "PCH LAN (82577LM) Controller",
   1300 	  WM_T_PCH,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1302 	  "PCH LAN (82577LC) Controller",
   1303 	  WM_T_PCH,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1305 	  "PCH LAN (82578DM) Controller",
   1306 	  WM_T_PCH,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1308 	  "PCH LAN (82578DC) Controller",
   1309 	  WM_T_PCH,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1311 	  "PCH2 LAN (82579LM) Controller",
   1312 	  WM_T_PCH2,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1314 	  "PCH2 LAN (82579V) Controller",
   1315 	  WM_T_PCH2,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1317 	  "82575EB dual-1000baseT Ethernet",
   1318 	  WM_T_82575,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1320 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1321 	  WM_T_82575,		WMP_F_SERDES },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1323 	  "82575GB quad-1000baseT Ethernet",
   1324 	  WM_T_82575,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1326 	  "82575GB quad-1000baseT Ethernet (PM)",
   1327 	  WM_T_82575,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1329 	  "82576 1000BaseT Ethernet",
   1330 	  WM_T_82576,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1332 	  "82576 1000BaseX Ethernet",
   1333 	  WM_T_82576,		WMP_F_FIBER },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1336 	  "82576 gigabit Ethernet (SERDES)",
   1337 	  WM_T_82576,		WMP_F_SERDES },
   1338 
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1340 	  "82576 quad-1000BaseT Ethernet",
   1341 	  WM_T_82576,		WMP_F_COPPER },
   1342 
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1344 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1345 	  WM_T_82576,		WMP_F_COPPER },
   1346 
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1348 	  "82576 gigabit Ethernet",
   1349 	  WM_T_82576,		WMP_F_COPPER },
   1350 
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1352 	  "82576 gigabit Ethernet (SERDES)",
   1353 	  WM_T_82576,		WMP_F_SERDES },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1355 	  "82576 quad-gigabit Ethernet (SERDES)",
   1356 	  WM_T_82576,		WMP_F_SERDES },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1359 	  "82580 1000BaseT Ethernet",
   1360 	  WM_T_82580,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1362 	  "82580 1000BaseX Ethernet",
   1363 	  WM_T_82580,		WMP_F_FIBER },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1366 	  "82580 1000BaseT Ethernet (SERDES)",
   1367 	  WM_T_82580,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1370 	  "82580 gigabit Ethernet (SGMII)",
   1371 	  WM_T_82580,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1373 	  "82580 dual-1000BaseT Ethernet",
   1374 	  WM_T_82580,		WMP_F_COPPER },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1377 	  "82580 quad-1000BaseX Ethernet",
   1378 	  WM_T_82580,		WMP_F_FIBER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1381 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1382 	  WM_T_82580,		WMP_F_COPPER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1385 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1386 	  WM_T_82580,		WMP_F_SERDES },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1389 	  "DH89XXCC 1000BASE-KX Ethernet",
   1390 	  WM_T_82580,		WMP_F_SERDES },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1393 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1394 	  WM_T_82580,		WMP_F_SERDES },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1397 	  "I350 Gigabit Network Connection",
   1398 	  WM_T_I350,		WMP_F_COPPER },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1401 	  "I350 Gigabit Fiber Network Connection",
   1402 	  WM_T_I350,		WMP_F_FIBER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1405 	  "I350 Gigabit Backplane Connection",
   1406 	  WM_T_I350,		WMP_F_SERDES },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1409 	  "I350 Quad Port Gigabit Ethernet",
   1410 	  WM_T_I350,		WMP_F_SERDES },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1413 	  "I350 Gigabit Connection",
   1414 	  WM_T_I350,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1417 	  "I354 Gigabit Ethernet (KX)",
   1418 	  WM_T_I354,		WMP_F_SERDES },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1421 	  "I354 Gigabit Ethernet (SGMII)",
   1422 	  WM_T_I354,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1425 	  "I354 Gigabit Ethernet (2.5G)",
   1426 	  WM_T_I354,		WMP_F_COPPER },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1429 	  "I210-T1 Ethernet Server Adapter",
   1430 	  WM_T_I210,		WMP_F_COPPER },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1433 	  "I210 Ethernet (Copper OEM)",
   1434 	  WM_T_I210,		WMP_F_COPPER },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1437 	  "I210 Ethernet (Copper IT)",
   1438 	  WM_T_I210,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1441 	  "I210 Ethernet (FLASH less)",
   1442 	  WM_T_I210,		WMP_F_COPPER },
   1443 
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1445 	  "I210 Gigabit Ethernet (Fiber)",
   1446 	  WM_T_I210,		WMP_F_FIBER },
   1447 
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1449 	  "I210 Gigabit Ethernet (SERDES)",
   1450 	  WM_T_I210,		WMP_F_SERDES },
   1451 
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1453 	  "I210 Gigabit Ethernet (FLASH less)",
   1454 	  WM_T_I210,		WMP_F_SERDES },
   1455 
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1457 	  "I210 Gigabit Ethernet (SGMII)",
   1458 	  WM_T_I210,		WMP_F_COPPER },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1461 	  "I211 Ethernet (COPPER)",
   1462 	  WM_T_I211,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1464 	  "I217 V Ethernet Connection",
   1465 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1467 	  "I217 LM Ethernet Connection",
   1468 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1470 	  "I218 V Ethernet Connection",
   1471 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1473 	  "I218 V Ethernet Connection",
   1474 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1476 	  "I218 V Ethernet Connection",
   1477 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1479 	  "I218 LM Ethernet Connection",
   1480 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1482 	  "I218 LM Ethernet Connection",
   1483 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1485 	  "I218 LM Ethernet Connection",
   1486 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1487 #if 0
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1489 	  "I219 V Ethernet Connection",
   1490 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1492 	  "I219 V Ethernet Connection",
   1493 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1495 	  "I219 V Ethernet Connection",
   1496 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1498 	  "I219 V Ethernet Connection",
   1499 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1501 	  "I219 LM Ethernet Connection",
   1502 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1504 	  "I219 LM Ethernet Connection",
   1505 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1507 	  "I219 LM Ethernet Connection",
   1508 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1510 	  "I219 LM Ethernet Connection",
   1511 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1513 	  "I219 LM Ethernet Connection",
   1514 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1515 #endif
   1516 	{ 0,			0,
   1517 	  NULL,
   1518 	  0,			0 },
   1519 };
   1520 
   1521 /*
   1522  * Register read/write functions.
   1523  * Other than CSR_{READ|WRITE}().
   1524  */
   1525 
   1526 #if 0 /* Not currently used */
   1527 static inline uint32_t
   1528 wm_io_read(struct wm_softc *sc, int reg)
   1529 {
   1530 
   1531 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1532 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1533 }
   1534 #endif
   1535 
   1536 static inline void
   1537 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1538 {
   1539 
   1540 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1541 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1542 }
   1543 
   1544 static inline void
   1545 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1546     uint32_t data)
   1547 {
   1548 	uint32_t regval;
   1549 	int i;
   1550 
   1551 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1552 
   1553 	CSR_WRITE(sc, reg, regval);
   1554 
   1555 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1556 		delay(5);
   1557 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1558 			break;
   1559 	}
   1560 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1561 		aprint_error("%s: WARNING:"
   1562 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1563 		    device_xname(sc->sc_dev), reg);
   1564 	}
   1565 }
   1566 
   1567 static inline void
   1568 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1569 {
   1570 	wa->wa_low = htole32(v & 0xffffffffU);
   1571 	if (sizeof(bus_addr_t) == 8)
   1572 		wa->wa_high = htole32((uint64_t) v >> 32);
   1573 	else
   1574 		wa->wa_high = 0;
   1575 }
   1576 
   1577 /*
   1578  * Descriptor sync/init functions.
   1579  */
   1580 static inline void
   1581 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1582 {
   1583 	struct wm_softc *sc = txq->txq_sc;
   1584 
   1585 	/* If it will wrap around, sync to the end of the ring. */
   1586 	if ((start + num) > WM_NTXDESC(txq)) {
   1587 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1588 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1589 		    (WM_NTXDESC(txq) - start), ops);
   1590 		num -= (WM_NTXDESC(txq) - start);
   1591 		start = 0;
   1592 	}
   1593 
   1594 	/* Now sync whatever is left. */
   1595 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1596 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1597 }
   1598 
   1599 static inline void
   1600 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1601 {
   1602 	struct wm_softc *sc = rxq->rxq_sc;
   1603 
   1604 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1605 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1606 }
   1607 
   1608 static inline void
   1609 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1610 {
   1611 	struct wm_softc *sc = rxq->rxq_sc;
   1612 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1613 	struct mbuf *m = rxs->rxs_mbuf;
   1614 
   1615 	/*
   1616 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1617 	 * so that the payload after the Ethernet header is aligned
   1618 	 * to a 4-byte boundary.
   1619 
   1620 	 * XXX BRAINDAMAGE ALERT!
   1621 	 * The stupid chip uses the same size for every buffer, which
   1622 	 * is set in the Receive Control register.  We are using the 2K
   1623 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1624 	 * reason, we can't "scoot" packets longer than the standard
   1625 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1626 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1627 	 * the upper layer copy the headers.
   1628 	 */
   1629 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1630 
   1631 	if (sc->sc_type == WM_T_82574) {
   1632 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1633 		rxd->erx_data.erxd_addr =
   1634 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1635 		rxd->erx_data.erxd_dd = 0;
   1636 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1637 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1638 
   1639 		rxd->nqrx_data.nrxd_paddr =
   1640 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1641 		/* Currently, split header is not supported. */
   1642 		rxd->nqrx_data.nrxd_haddr = 0;
   1643 	} else {
   1644 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1645 
   1646 		wm_set_dma_addr(&rxd->wrx_addr,
   1647 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1648 		rxd->wrx_len = 0;
   1649 		rxd->wrx_cksum = 0;
   1650 		rxd->wrx_status = 0;
   1651 		rxd->wrx_errors = 0;
   1652 		rxd->wrx_special = 0;
   1653 	}
   1654 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1655 
   1656 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1657 }
   1658 
   1659 /*
   1660  * Device driver interface functions and commonly used functions.
   1661  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1662  */
   1663 
   1664 /* Lookup supported device table */
   1665 static const struct wm_product *
   1666 wm_lookup(const struct pci_attach_args *pa)
   1667 {
   1668 	const struct wm_product *wmp;
   1669 
   1670 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1671 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1672 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1673 			return wmp;
   1674 	}
   1675 	return NULL;
   1676 }
   1677 
   1678 /* The match function (ca_match) */
   1679 static int
   1680 wm_match(device_t parent, cfdata_t cf, void *aux)
   1681 {
   1682 	struct pci_attach_args *pa = aux;
   1683 
   1684 	if (wm_lookup(pa) != NULL)
   1685 		return 1;
   1686 
   1687 	return 0;
   1688 }
   1689 
   1690 /* The attach function (ca_attach) */
   1691 static void
   1692 wm_attach(device_t parent, device_t self, void *aux)
   1693 {
   1694 	struct wm_softc *sc = device_private(self);
   1695 	struct pci_attach_args *pa = aux;
   1696 	prop_dictionary_t dict;
   1697 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1698 	pci_chipset_tag_t pc = pa->pa_pc;
   1699 	int counts[PCI_INTR_TYPE_SIZE];
   1700 	pci_intr_type_t max_type;
   1701 	const char *eetype, *xname;
   1702 	bus_space_tag_t memt;
   1703 	bus_space_handle_t memh;
   1704 	bus_size_t memsize;
   1705 	int memh_valid;
   1706 	int i, error;
   1707 	const struct wm_product *wmp;
   1708 	prop_data_t ea;
   1709 	prop_number_t pn;
   1710 	uint8_t enaddr[ETHER_ADDR_LEN];
   1711 	char buf[256];
   1712 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1713 	pcireg_t preg, memtype;
   1714 	uint16_t eeprom_data, apme_mask;
   1715 	bool force_clear_smbi;
   1716 	uint32_t link_mode;
   1717 	uint32_t reg;
   1718 
   1719 	sc->sc_dev = self;
   1720 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1721 	sc->sc_core_stopping = false;
   1722 
   1723 	wmp = wm_lookup(pa);
   1724 #ifdef DIAGNOSTIC
   1725 	if (wmp == NULL) {
   1726 		printf("\n");
   1727 		panic("wm_attach: impossible");
   1728 	}
   1729 #endif
   1730 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1731 
   1732 	sc->sc_pc = pa->pa_pc;
   1733 	sc->sc_pcitag = pa->pa_tag;
   1734 
   1735 	if (pci_dma64_available(pa))
   1736 		sc->sc_dmat = pa->pa_dmat64;
   1737 	else
   1738 		sc->sc_dmat = pa->pa_dmat;
   1739 
   1740 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1741 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1742 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1743 
   1744 	sc->sc_type = wmp->wmp_type;
   1745 
   1746 	/* Set default function pointers */
   1747 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1748 	sc->phy.release = sc->nvm.release = wm_put_null;
   1749 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1750 
   1751 	if (sc->sc_type < WM_T_82543) {
   1752 		if (sc->sc_rev < 2) {
   1753 			aprint_error_dev(sc->sc_dev,
   1754 			    "i82542 must be at least rev. 2\n");
   1755 			return;
   1756 		}
   1757 		if (sc->sc_rev < 3)
   1758 			sc->sc_type = WM_T_82542_2_0;
   1759 	}
   1760 
   1761 	/*
   1762 	 * Disable MSI for Errata:
   1763 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1764 	 *
   1765 	 *  82544: Errata 25
   1766 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1767 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1768 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1769 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1770 	 *
   1771 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1772 	 *
   1773 	 *  82571 & 82572: Errata 63
   1774 	 */
   1775 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1776 	    || (sc->sc_type == WM_T_82572))
   1777 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1778 
   1779 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1780 	    || (sc->sc_type == WM_T_82580)
   1781 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1782 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1783 		sc->sc_flags |= WM_F_NEWQUEUE;
   1784 
   1785 	/* Set device properties (mactype) */
   1786 	dict = device_properties(sc->sc_dev);
   1787 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1788 
   1789 	/*
   1790 	 * Map the device.  All devices support memory-mapped acccess,
   1791 	 * and it is really required for normal operation.
   1792 	 */
   1793 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1794 	switch (memtype) {
   1795 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1796 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1797 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1798 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1799 		break;
   1800 	default:
   1801 		memh_valid = 0;
   1802 		break;
   1803 	}
   1804 
   1805 	if (memh_valid) {
   1806 		sc->sc_st = memt;
   1807 		sc->sc_sh = memh;
   1808 		sc->sc_ss = memsize;
   1809 	} else {
   1810 		aprint_error_dev(sc->sc_dev,
   1811 		    "unable to map device registers\n");
   1812 		return;
   1813 	}
   1814 
   1815 	/*
   1816 	 * In addition, i82544 and later support I/O mapped indirect
   1817 	 * register access.  It is not desirable (nor supported in
   1818 	 * this driver) to use it for normal operation, though it is
   1819 	 * required to work around bugs in some chip versions.
   1820 	 */
   1821 	if (sc->sc_type >= WM_T_82544) {
   1822 		/* First we have to find the I/O BAR. */
   1823 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1824 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1825 			if (memtype == PCI_MAPREG_TYPE_IO)
   1826 				break;
   1827 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1828 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1829 				i += 4;	/* skip high bits, too */
   1830 		}
   1831 		if (i < PCI_MAPREG_END) {
   1832 			/*
   1833 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1834 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1835 			 * It's no problem because newer chips has no this
   1836 			 * bug.
   1837 			 *
   1838 			 * The i8254x doesn't apparently respond when the
   1839 			 * I/O BAR is 0, which looks somewhat like it's not
   1840 			 * been configured.
   1841 			 */
   1842 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1843 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1844 				aprint_error_dev(sc->sc_dev,
   1845 				    "WARNING: I/O BAR at zero.\n");
   1846 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1847 					0, &sc->sc_iot, &sc->sc_ioh,
   1848 					NULL, &sc->sc_ios) == 0) {
   1849 				sc->sc_flags |= WM_F_IOH_VALID;
   1850 			} else {
   1851 				aprint_error_dev(sc->sc_dev,
   1852 				    "WARNING: unable to map I/O space\n");
   1853 			}
   1854 		}
   1855 
   1856 	}
   1857 
   1858 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1859 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1860 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1861 	if (sc->sc_type < WM_T_82542_2_1)
   1862 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1863 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1864 
   1865 	/* power up chip */
   1866 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1867 	    NULL)) && error != EOPNOTSUPP) {
   1868 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1869 		return;
   1870 	}
   1871 
   1872 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1873 	/*
   1874 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1875 	 * resource.
   1876 	 */
   1877 	if (sc->sc_nqueues > 1) {
   1878 		max_type = PCI_INTR_TYPE_MSIX;
   1879 		/*
   1880 		 *  82583 has a MSI-X capability in the PCI configuration space
   1881 		 * but it doesn't support it. At least the document doesn't
   1882 		 * say anything about MSI-X.
   1883 		 */
   1884 		counts[PCI_INTR_TYPE_MSIX]
   1885 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1886 	} else {
   1887 		max_type = PCI_INTR_TYPE_MSI;
   1888 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1889 	}
   1890 
   1891 	/* Allocation settings */
   1892 	counts[PCI_INTR_TYPE_MSI] = 1;
   1893 	counts[PCI_INTR_TYPE_INTX] = 1;
   1894 	/* overridden by disable flags */
   1895 	if (wm_disable_msi != 0) {
   1896 		counts[PCI_INTR_TYPE_MSI] = 0;
   1897 		if (wm_disable_msix != 0) {
   1898 			max_type = PCI_INTR_TYPE_INTX;
   1899 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1900 		}
   1901 	} else if (wm_disable_msix != 0) {
   1902 		max_type = PCI_INTR_TYPE_MSI;
   1903 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1904 	}
   1905 
   1906 alloc_retry:
   1907 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1908 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1909 		return;
   1910 	}
   1911 
   1912 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1913 		error = wm_setup_msix(sc);
   1914 		if (error) {
   1915 			pci_intr_release(pc, sc->sc_intrs,
   1916 			    counts[PCI_INTR_TYPE_MSIX]);
   1917 
   1918 			/* Setup for MSI: Disable MSI-X */
   1919 			max_type = PCI_INTR_TYPE_MSI;
   1920 			counts[PCI_INTR_TYPE_MSI] = 1;
   1921 			counts[PCI_INTR_TYPE_INTX] = 1;
   1922 			goto alloc_retry;
   1923 		}
   1924 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1925 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1926 		error = wm_setup_legacy(sc);
   1927 		if (error) {
   1928 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1929 			    counts[PCI_INTR_TYPE_MSI]);
   1930 
   1931 			/* The next try is for INTx: Disable MSI */
   1932 			max_type = PCI_INTR_TYPE_INTX;
   1933 			counts[PCI_INTR_TYPE_INTX] = 1;
   1934 			goto alloc_retry;
   1935 		}
   1936 	} else {
   1937 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1938 		error = wm_setup_legacy(sc);
   1939 		if (error) {
   1940 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1941 			    counts[PCI_INTR_TYPE_INTX]);
   1942 			return;
   1943 		}
   1944 	}
   1945 
   1946 	/*
   1947 	 * Check the function ID (unit number of the chip).
   1948 	 */
   1949 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1950 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1951 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1952 	    || (sc->sc_type == WM_T_82580)
   1953 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1954 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1955 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1956 	else
   1957 		sc->sc_funcid = 0;
   1958 
   1959 	/*
   1960 	 * Determine a few things about the bus we're connected to.
   1961 	 */
   1962 	if (sc->sc_type < WM_T_82543) {
   1963 		/* We don't really know the bus characteristics here. */
   1964 		sc->sc_bus_speed = 33;
   1965 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1966 		/*
   1967 		 * CSA (Communication Streaming Architecture) is about as fast
   1968 		 * a 32-bit 66MHz PCI Bus.
   1969 		 */
   1970 		sc->sc_flags |= WM_F_CSA;
   1971 		sc->sc_bus_speed = 66;
   1972 		aprint_verbose_dev(sc->sc_dev,
   1973 		    "Communication Streaming Architecture\n");
   1974 		if (sc->sc_type == WM_T_82547) {
   1975 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1976 			callout_setfunc(&sc->sc_txfifo_ch,
   1977 					wm_82547_txfifo_stall, sc);
   1978 			aprint_verbose_dev(sc->sc_dev,
   1979 			    "using 82547 Tx FIFO stall work-around\n");
   1980 		}
   1981 	} else if (sc->sc_type >= WM_T_82571) {
   1982 		sc->sc_flags |= WM_F_PCIE;
   1983 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1984 		    && (sc->sc_type != WM_T_ICH10)
   1985 		    && (sc->sc_type != WM_T_PCH)
   1986 		    && (sc->sc_type != WM_T_PCH2)
   1987 		    && (sc->sc_type != WM_T_PCH_LPT)
   1988 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1989 			/* ICH* and PCH* have no PCIe capability registers */
   1990 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1991 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1992 				NULL) == 0)
   1993 				aprint_error_dev(sc->sc_dev,
   1994 				    "unable to find PCIe capability\n");
   1995 		}
   1996 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1997 	} else {
   1998 		reg = CSR_READ(sc, WMREG_STATUS);
   1999 		if (reg & STATUS_BUS64)
   2000 			sc->sc_flags |= WM_F_BUS64;
   2001 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2002 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2003 
   2004 			sc->sc_flags |= WM_F_PCIX;
   2005 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2006 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2007 				aprint_error_dev(sc->sc_dev,
   2008 				    "unable to find PCIX capability\n");
   2009 			else if (sc->sc_type != WM_T_82545_3 &&
   2010 				 sc->sc_type != WM_T_82546_3) {
   2011 				/*
   2012 				 * Work around a problem caused by the BIOS
   2013 				 * setting the max memory read byte count
   2014 				 * incorrectly.
   2015 				 */
   2016 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2017 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2018 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2019 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2020 
   2021 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2022 				    PCIX_CMD_BYTECNT_SHIFT;
   2023 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2024 				    PCIX_STATUS_MAXB_SHIFT;
   2025 				if (bytecnt > maxb) {
   2026 					aprint_verbose_dev(sc->sc_dev,
   2027 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2028 					    512 << bytecnt, 512 << maxb);
   2029 					pcix_cmd = (pcix_cmd &
   2030 					    ~PCIX_CMD_BYTECNT_MASK) |
   2031 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2032 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2033 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2034 					    pcix_cmd);
   2035 				}
   2036 			}
   2037 		}
   2038 		/*
   2039 		 * The quad port adapter is special; it has a PCIX-PCIX
   2040 		 * bridge on the board, and can run the secondary bus at
   2041 		 * a higher speed.
   2042 		 */
   2043 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2044 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2045 								      : 66;
   2046 		} else if (sc->sc_flags & WM_F_PCIX) {
   2047 			switch (reg & STATUS_PCIXSPD_MASK) {
   2048 			case STATUS_PCIXSPD_50_66:
   2049 				sc->sc_bus_speed = 66;
   2050 				break;
   2051 			case STATUS_PCIXSPD_66_100:
   2052 				sc->sc_bus_speed = 100;
   2053 				break;
   2054 			case STATUS_PCIXSPD_100_133:
   2055 				sc->sc_bus_speed = 133;
   2056 				break;
   2057 			default:
   2058 				aprint_error_dev(sc->sc_dev,
   2059 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2060 				    reg & STATUS_PCIXSPD_MASK);
   2061 				sc->sc_bus_speed = 66;
   2062 				break;
   2063 			}
   2064 		} else
   2065 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2066 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2067 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2068 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2069 	}
   2070 
   2071 	/* Disable ASPM L0s and/or L1 for workaround */
   2072 	wm_disable_aspm(sc);
   2073 
   2074 	/* clear interesting stat counters */
   2075 	CSR_READ(sc, WMREG_COLC);
   2076 	CSR_READ(sc, WMREG_RXERRC);
   2077 
   2078 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2079 	    || (sc->sc_type >= WM_T_ICH8))
   2080 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2081 	if (sc->sc_type >= WM_T_ICH8)
   2082 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2083 
   2084 	/* Set PHY, NVM mutex related stuff */
   2085 	switch (sc->sc_type) {
   2086 	case WM_T_82542_2_0:
   2087 	case WM_T_82542_2_1:
   2088 	case WM_T_82543:
   2089 	case WM_T_82544:
   2090 		/* Microwire */
   2091 		sc->nvm.read = wm_nvm_read_uwire;
   2092 		sc->sc_nvm_wordsize = 64;
   2093 		sc->sc_nvm_addrbits = 6;
   2094 		break;
   2095 	case WM_T_82540:
   2096 	case WM_T_82545:
   2097 	case WM_T_82545_3:
   2098 	case WM_T_82546:
   2099 	case WM_T_82546_3:
   2100 		/* Microwire */
   2101 		sc->nvm.read = wm_nvm_read_uwire;
   2102 		reg = CSR_READ(sc, WMREG_EECD);
   2103 		if (reg & EECD_EE_SIZE) {
   2104 			sc->sc_nvm_wordsize = 256;
   2105 			sc->sc_nvm_addrbits = 8;
   2106 		} else {
   2107 			sc->sc_nvm_wordsize = 64;
   2108 			sc->sc_nvm_addrbits = 6;
   2109 		}
   2110 		sc->sc_flags |= WM_F_LOCK_EECD;
   2111 		sc->nvm.acquire = wm_get_eecd;
   2112 		sc->nvm.release = wm_put_eecd;
   2113 		break;
   2114 	case WM_T_82541:
   2115 	case WM_T_82541_2:
   2116 	case WM_T_82547:
   2117 	case WM_T_82547_2:
   2118 		reg = CSR_READ(sc, WMREG_EECD);
   2119 		/*
   2120 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2121 		 * on 8254[17], so set flags and functios before calling it.
   2122 		 */
   2123 		sc->sc_flags |= WM_F_LOCK_EECD;
   2124 		sc->nvm.acquire = wm_get_eecd;
   2125 		sc->nvm.release = wm_put_eecd;
   2126 		if (reg & EECD_EE_TYPE) {
   2127 			/* SPI */
   2128 			sc->nvm.read = wm_nvm_read_spi;
   2129 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2130 			wm_nvm_set_addrbits_size_eecd(sc);
   2131 		} else {
   2132 			/* Microwire */
   2133 			sc->nvm.read = wm_nvm_read_uwire;
   2134 			if ((reg & EECD_EE_ABITS) != 0) {
   2135 				sc->sc_nvm_wordsize = 256;
   2136 				sc->sc_nvm_addrbits = 8;
   2137 			} else {
   2138 				sc->sc_nvm_wordsize = 64;
   2139 				sc->sc_nvm_addrbits = 6;
   2140 			}
   2141 		}
   2142 		break;
   2143 	case WM_T_82571:
   2144 	case WM_T_82572:
   2145 		/* SPI */
   2146 		sc->nvm.read = wm_nvm_read_eerd;
   2147 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2148 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2149 		wm_nvm_set_addrbits_size_eecd(sc);
   2150 		sc->phy.acquire = wm_get_swsm_semaphore;
   2151 		sc->phy.release = wm_put_swsm_semaphore;
   2152 		sc->nvm.acquire = wm_get_nvm_82571;
   2153 		sc->nvm.release = wm_put_nvm_82571;
   2154 		break;
   2155 	case WM_T_82573:
   2156 	case WM_T_82574:
   2157 	case WM_T_82583:
   2158 		sc->nvm.read = wm_nvm_read_eerd;
   2159 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2160 		if (sc->sc_type == WM_T_82573) {
   2161 			sc->phy.acquire = wm_get_swsm_semaphore;
   2162 			sc->phy.release = wm_put_swsm_semaphore;
   2163 			sc->nvm.acquire = wm_get_nvm_82571;
   2164 			sc->nvm.release = wm_put_nvm_82571;
   2165 		} else {
   2166 			/* Both PHY and NVM use the same semaphore. */
   2167 			sc->phy.acquire = sc->nvm.acquire
   2168 			    = wm_get_swfwhw_semaphore;
   2169 			sc->phy.release = sc->nvm.release
   2170 			    = wm_put_swfwhw_semaphore;
   2171 		}
   2172 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2173 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2174 			sc->sc_nvm_wordsize = 2048;
   2175 		} else {
   2176 			/* SPI */
   2177 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2178 			wm_nvm_set_addrbits_size_eecd(sc);
   2179 		}
   2180 		break;
   2181 	case WM_T_82575:
   2182 	case WM_T_82576:
   2183 	case WM_T_82580:
   2184 	case WM_T_I350:
   2185 	case WM_T_I354:
   2186 	case WM_T_80003:
   2187 		/* SPI */
   2188 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2189 		wm_nvm_set_addrbits_size_eecd(sc);
   2190 		if((sc->sc_type == WM_T_80003)
   2191 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2192 			sc->nvm.read = wm_nvm_read_eerd;
   2193 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2194 		} else {
   2195 			sc->nvm.read = wm_nvm_read_spi;
   2196 			sc->sc_flags |= WM_F_LOCK_EECD;
   2197 		}
   2198 		sc->phy.acquire = wm_get_phy_82575;
   2199 		sc->phy.release = wm_put_phy_82575;
   2200 		sc->nvm.acquire = wm_get_nvm_80003;
   2201 		sc->nvm.release = wm_put_nvm_80003;
   2202 		break;
   2203 	case WM_T_ICH8:
   2204 	case WM_T_ICH9:
   2205 	case WM_T_ICH10:
   2206 	case WM_T_PCH:
   2207 	case WM_T_PCH2:
   2208 	case WM_T_PCH_LPT:
   2209 		sc->nvm.read = wm_nvm_read_ich8;
   2210 		/* FLASH */
   2211 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2212 		sc->sc_nvm_wordsize = 2048;
   2213 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2214 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2215 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2216 			aprint_error_dev(sc->sc_dev,
   2217 			    "can't map FLASH registers\n");
   2218 			goto out;
   2219 		}
   2220 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2221 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2222 		    ICH_FLASH_SECTOR_SIZE;
   2223 		sc->sc_ich8_flash_bank_size =
   2224 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2225 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2226 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2227 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2228 		sc->sc_flashreg_offset = 0;
   2229 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2230 		sc->phy.release = wm_put_swflag_ich8lan;
   2231 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2232 		sc->nvm.release = wm_put_nvm_ich8lan;
   2233 		break;
   2234 	case WM_T_PCH_SPT:
   2235 		sc->nvm.read = wm_nvm_read_spt;
   2236 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2237 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2238 		sc->sc_flasht = sc->sc_st;
   2239 		sc->sc_flashh = sc->sc_sh;
   2240 		sc->sc_ich8_flash_base = 0;
   2241 		sc->sc_nvm_wordsize =
   2242 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2243 			* NVM_SIZE_MULTIPLIER;
   2244 		/* It is size in bytes, we want words */
   2245 		sc->sc_nvm_wordsize /= 2;
   2246 		/* assume 2 banks */
   2247 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2248 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2249 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2250 		sc->phy.release = wm_put_swflag_ich8lan;
   2251 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2252 		sc->nvm.release = wm_put_nvm_ich8lan;
   2253 		break;
   2254 	case WM_T_I210:
   2255 	case WM_T_I211:
   2256 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2257 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2258 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2259 			sc->nvm.read = wm_nvm_read_eerd;
   2260 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2261 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2262 			wm_nvm_set_addrbits_size_eecd(sc);
   2263 		} else {
   2264 			sc->nvm.read = wm_nvm_read_invm;
   2265 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2266 			sc->sc_nvm_wordsize = INVM_SIZE;
   2267 		}
   2268 		sc->phy.acquire = wm_get_phy_82575;
   2269 		sc->phy.release = wm_put_phy_82575;
   2270 		sc->nvm.acquire = wm_get_nvm_80003;
   2271 		sc->nvm.release = wm_put_nvm_80003;
   2272 		break;
   2273 	default:
   2274 		break;
   2275 	}
   2276 
   2277 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2278 	switch (sc->sc_type) {
   2279 	case WM_T_82571:
   2280 	case WM_T_82572:
   2281 		reg = CSR_READ(sc, WMREG_SWSM2);
   2282 		if ((reg & SWSM2_LOCK) == 0) {
   2283 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2284 			force_clear_smbi = true;
   2285 		} else
   2286 			force_clear_smbi = false;
   2287 		break;
   2288 	case WM_T_82573:
   2289 	case WM_T_82574:
   2290 	case WM_T_82583:
   2291 		force_clear_smbi = true;
   2292 		break;
   2293 	default:
   2294 		force_clear_smbi = false;
   2295 		break;
   2296 	}
   2297 	if (force_clear_smbi) {
   2298 		reg = CSR_READ(sc, WMREG_SWSM);
   2299 		if ((reg & SWSM_SMBI) != 0)
   2300 			aprint_error_dev(sc->sc_dev,
   2301 			    "Please update the Bootagent\n");
   2302 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2303 	}
   2304 
   2305 	/*
   2306 	 * Defer printing the EEPROM type until after verifying the checksum
   2307 	 * This allows the EEPROM type to be printed correctly in the case
   2308 	 * that no EEPROM is attached.
   2309 	 */
   2310 	/*
   2311 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2312 	 * this for later, so we can fail future reads from the EEPROM.
   2313 	 */
   2314 	if (wm_nvm_validate_checksum(sc)) {
   2315 		/*
   2316 		 * Read twice again because some PCI-e parts fail the
   2317 		 * first check due to the link being in sleep state.
   2318 		 */
   2319 		if (wm_nvm_validate_checksum(sc))
   2320 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2321 	}
   2322 
   2323 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2324 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2325 	else {
   2326 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2327 		    sc->sc_nvm_wordsize);
   2328 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2329 			aprint_verbose("iNVM");
   2330 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2331 			aprint_verbose("FLASH(HW)");
   2332 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2333 			aprint_verbose("FLASH");
   2334 		else {
   2335 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2336 				eetype = "SPI";
   2337 			else
   2338 				eetype = "MicroWire";
   2339 			aprint_verbose("(%d address bits) %s EEPROM",
   2340 			    sc->sc_nvm_addrbits, eetype);
   2341 		}
   2342 	}
   2343 	wm_nvm_version(sc);
   2344 	aprint_verbose("\n");
   2345 
   2346 	/*
   2347 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2348 	 * incorrect.
   2349 	 */
   2350 	wm_gmii_setup_phytype(sc, 0, 0);
   2351 
   2352 	/* Reset the chip to a known state. */
   2353 	wm_reset(sc);
   2354 
   2355 	/* Check for I21[01] PLL workaround */
   2356 	if (sc->sc_type == WM_T_I210)
   2357 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2358 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2359 		/* NVM image release 3.25 has a workaround */
   2360 		if ((sc->sc_nvm_ver_major < 3)
   2361 		    || ((sc->sc_nvm_ver_major == 3)
   2362 			&& (sc->sc_nvm_ver_minor < 25))) {
   2363 			aprint_verbose_dev(sc->sc_dev,
   2364 			    "ROM image version %d.%d is older than 3.25\n",
   2365 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2366 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2367 		}
   2368 	}
   2369 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2370 		wm_pll_workaround_i210(sc);
   2371 
   2372 	wm_get_wakeup(sc);
   2373 
   2374 	/* Non-AMT based hardware can now take control from firmware */
   2375 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2376 		wm_get_hw_control(sc);
   2377 
   2378 	/*
   2379 	 * Read the Ethernet address from the EEPROM, if not first found
   2380 	 * in device properties.
   2381 	 */
   2382 	ea = prop_dictionary_get(dict, "mac-address");
   2383 	if (ea != NULL) {
   2384 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2385 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2386 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2387 	} else {
   2388 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2389 			aprint_error_dev(sc->sc_dev,
   2390 			    "unable to read Ethernet address\n");
   2391 			goto out;
   2392 		}
   2393 	}
   2394 
   2395 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2396 	    ether_sprintf(enaddr));
   2397 
   2398 	/*
   2399 	 * Read the config info from the EEPROM, and set up various
   2400 	 * bits in the control registers based on their contents.
   2401 	 */
   2402 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2403 	if (pn != NULL) {
   2404 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2405 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2406 	} else {
   2407 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2408 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2409 			goto out;
   2410 		}
   2411 	}
   2412 
   2413 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2414 	if (pn != NULL) {
   2415 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2416 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2417 	} else {
   2418 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2419 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2420 			goto out;
   2421 		}
   2422 	}
   2423 
   2424 	/* check for WM_F_WOL */
   2425 	switch (sc->sc_type) {
   2426 	case WM_T_82542_2_0:
   2427 	case WM_T_82542_2_1:
   2428 	case WM_T_82543:
   2429 		/* dummy? */
   2430 		eeprom_data = 0;
   2431 		apme_mask = NVM_CFG3_APME;
   2432 		break;
   2433 	case WM_T_82544:
   2434 		apme_mask = NVM_CFG2_82544_APM_EN;
   2435 		eeprom_data = cfg2;
   2436 		break;
   2437 	case WM_T_82546:
   2438 	case WM_T_82546_3:
   2439 	case WM_T_82571:
   2440 	case WM_T_82572:
   2441 	case WM_T_82573:
   2442 	case WM_T_82574:
   2443 	case WM_T_82583:
   2444 	case WM_T_80003:
   2445 	default:
   2446 		apme_mask = NVM_CFG3_APME;
   2447 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2448 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2449 		break;
   2450 	case WM_T_82575:
   2451 	case WM_T_82576:
   2452 	case WM_T_82580:
   2453 	case WM_T_I350:
   2454 	case WM_T_I354: /* XXX ok? */
   2455 	case WM_T_ICH8:
   2456 	case WM_T_ICH9:
   2457 	case WM_T_ICH10:
   2458 	case WM_T_PCH:
   2459 	case WM_T_PCH2:
   2460 	case WM_T_PCH_LPT:
   2461 	case WM_T_PCH_SPT:
   2462 		/* XXX The funcid should be checked on some devices */
   2463 		apme_mask = WUC_APME;
   2464 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2465 		break;
   2466 	}
   2467 
   2468 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2469 	if ((eeprom_data & apme_mask) != 0)
   2470 		sc->sc_flags |= WM_F_WOL;
   2471 
   2472 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2473 		/* Check NVM for autonegotiation */
   2474 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2475 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2476 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2477 		}
   2478 	}
   2479 
   2480 	/*
   2481 	 * XXX need special handling for some multiple port cards
   2482 	 * to disable a paticular port.
   2483 	 */
   2484 
   2485 	if (sc->sc_type >= WM_T_82544) {
   2486 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2487 		if (pn != NULL) {
   2488 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2489 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2490 		} else {
   2491 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2492 				aprint_error_dev(sc->sc_dev,
   2493 				    "unable to read SWDPIN\n");
   2494 				goto out;
   2495 			}
   2496 		}
   2497 	}
   2498 
   2499 	if (cfg1 & NVM_CFG1_ILOS)
   2500 		sc->sc_ctrl |= CTRL_ILOS;
   2501 
   2502 	/*
   2503 	 * XXX
   2504 	 * This code isn't correct because pin 2 and 3 are located
   2505 	 * in different position on newer chips. Check all datasheet.
   2506 	 *
   2507 	 * Until resolve this problem, check if a chip < 82580
   2508 	 */
   2509 	if (sc->sc_type <= WM_T_82580) {
   2510 		if (sc->sc_type >= WM_T_82544) {
   2511 			sc->sc_ctrl |=
   2512 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2513 			    CTRL_SWDPIO_SHIFT;
   2514 			sc->sc_ctrl |=
   2515 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2516 			    CTRL_SWDPINS_SHIFT;
   2517 		} else {
   2518 			sc->sc_ctrl |=
   2519 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2520 			    CTRL_SWDPIO_SHIFT;
   2521 		}
   2522 	}
   2523 
   2524 	/* XXX For other than 82580? */
   2525 	if (sc->sc_type == WM_T_82580) {
   2526 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2527 		if (nvmword & __BIT(13))
   2528 			sc->sc_ctrl |= CTRL_ILOS;
   2529 	}
   2530 
   2531 #if 0
   2532 	if (sc->sc_type >= WM_T_82544) {
   2533 		if (cfg1 & NVM_CFG1_IPS0)
   2534 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2535 		if (cfg1 & NVM_CFG1_IPS1)
   2536 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2537 		sc->sc_ctrl_ext |=
   2538 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2539 		    CTRL_EXT_SWDPIO_SHIFT;
   2540 		sc->sc_ctrl_ext |=
   2541 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2542 		    CTRL_EXT_SWDPINS_SHIFT;
   2543 	} else {
   2544 		sc->sc_ctrl_ext |=
   2545 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2546 		    CTRL_EXT_SWDPIO_SHIFT;
   2547 	}
   2548 #endif
   2549 
   2550 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2551 #if 0
   2552 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2553 #endif
   2554 
   2555 	if (sc->sc_type == WM_T_PCH) {
   2556 		uint16_t val;
   2557 
   2558 		/* Save the NVM K1 bit setting */
   2559 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2560 
   2561 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2562 			sc->sc_nvm_k1_enabled = 1;
   2563 		else
   2564 			sc->sc_nvm_k1_enabled = 0;
   2565 	}
   2566 
   2567 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2568 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2569 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2570 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2571 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2572 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2573 		/* Copper only */
   2574 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2575 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2576 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2577 	    || (sc->sc_type ==WM_T_I211)) {
   2578 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2579 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2580 		switch (link_mode) {
   2581 		case CTRL_EXT_LINK_MODE_1000KX:
   2582 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2583 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2584 			break;
   2585 		case CTRL_EXT_LINK_MODE_SGMII:
   2586 			if (wm_sgmii_uses_mdio(sc)) {
   2587 				aprint_verbose_dev(sc->sc_dev,
   2588 				    "SGMII(MDIO)\n");
   2589 				sc->sc_flags |= WM_F_SGMII;
   2590 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2591 				break;
   2592 			}
   2593 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2594 			/*FALLTHROUGH*/
   2595 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2596 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2597 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2598 				if (link_mode
   2599 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2600 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2601 					sc->sc_flags |= WM_F_SGMII;
   2602 				} else {
   2603 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2604 					aprint_verbose_dev(sc->sc_dev,
   2605 					    "SERDES\n");
   2606 				}
   2607 				break;
   2608 			}
   2609 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2610 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2611 
   2612 			/* Change current link mode setting */
   2613 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2614 			switch (sc->sc_mediatype) {
   2615 			case WM_MEDIATYPE_COPPER:
   2616 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2617 				break;
   2618 			case WM_MEDIATYPE_SERDES:
   2619 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2620 				break;
   2621 			default:
   2622 				break;
   2623 			}
   2624 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2625 			break;
   2626 		case CTRL_EXT_LINK_MODE_GMII:
   2627 		default:
   2628 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2629 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2630 			break;
   2631 		}
   2632 
   2633 		reg &= ~CTRL_EXT_I2C_ENA;
   2634 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2635 			reg |= CTRL_EXT_I2C_ENA;
   2636 		else
   2637 			reg &= ~CTRL_EXT_I2C_ENA;
   2638 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2639 	} else if (sc->sc_type < WM_T_82543 ||
   2640 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2641 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2642 			aprint_error_dev(sc->sc_dev,
   2643 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2644 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2645 		}
   2646 	} else {
   2647 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2648 			aprint_error_dev(sc->sc_dev,
   2649 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2650 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2651 		}
   2652 	}
   2653 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2654 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2655 
   2656 	/* Set device properties (macflags) */
   2657 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2658 
   2659 	/* Initialize the media structures accordingly. */
   2660 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2661 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2662 	else
   2663 		wm_tbi_mediainit(sc); /* All others */
   2664 
   2665 	ifp = &sc->sc_ethercom.ec_if;
   2666 	xname = device_xname(sc->sc_dev);
   2667 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2668 	ifp->if_softc = sc;
   2669 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2670 #ifdef WM_MPSAFE
   2671 	ifp->if_extflags = IFEF_MPSAFE;
   2672 #endif
   2673 	ifp->if_ioctl = wm_ioctl;
   2674 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2675 		ifp->if_start = wm_nq_start;
   2676 		/*
   2677 		 * When the number of CPUs is one and the controller can use
   2678 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2679 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2680 		 * and the other is used for link status changing.
   2681 		 * In this situation, wm_nq_transmit() is disadvantageous
   2682 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2683 		 */
   2684 		if (wm_is_using_multiqueue(sc))
   2685 			ifp->if_transmit = wm_nq_transmit;
   2686 	} else {
   2687 		ifp->if_start = wm_start;
   2688 		/*
   2689 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2690 		 */
   2691 		if (wm_is_using_multiqueue(sc))
   2692 			ifp->if_transmit = wm_transmit;
   2693 	}
   2694 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2695 	ifp->if_init = wm_init;
   2696 	ifp->if_stop = wm_stop;
   2697 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2698 	IFQ_SET_READY(&ifp->if_snd);
   2699 
   2700 	/* Check for jumbo frame */
   2701 	switch (sc->sc_type) {
   2702 	case WM_T_82573:
   2703 		/* XXX limited to 9234 if ASPM is disabled */
   2704 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2705 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2706 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2707 		break;
   2708 	case WM_T_82571:
   2709 	case WM_T_82572:
   2710 	case WM_T_82574:
   2711 	case WM_T_82583:
   2712 	case WM_T_82575:
   2713 	case WM_T_82576:
   2714 	case WM_T_82580:
   2715 	case WM_T_I350:
   2716 	case WM_T_I354:
   2717 	case WM_T_I210:
   2718 	case WM_T_I211:
   2719 	case WM_T_80003:
   2720 	case WM_T_ICH9:
   2721 	case WM_T_ICH10:
   2722 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2723 	case WM_T_PCH_LPT:
   2724 	case WM_T_PCH_SPT:
   2725 		/* XXX limited to 9234 */
   2726 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2727 		break;
   2728 	case WM_T_PCH:
   2729 		/* XXX limited to 4096 */
   2730 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2731 		break;
   2732 	case WM_T_82542_2_0:
   2733 	case WM_T_82542_2_1:
   2734 	case WM_T_ICH8:
   2735 		/* No support for jumbo frame */
   2736 		break;
   2737 	default:
   2738 		/* ETHER_MAX_LEN_JUMBO */
   2739 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2740 		break;
   2741 	}
   2742 
   2743 	/* If we're a i82543 or greater, we can support VLANs. */
   2744 	if (sc->sc_type >= WM_T_82543)
   2745 		sc->sc_ethercom.ec_capabilities |=
   2746 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2747 
   2748 	/*
   2749 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2750 	 * on i82543 and later.
   2751 	 */
   2752 	if (sc->sc_type >= WM_T_82543) {
   2753 		ifp->if_capabilities |=
   2754 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2755 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2756 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2757 		    IFCAP_CSUM_TCPv6_Tx |
   2758 		    IFCAP_CSUM_UDPv6_Tx;
   2759 	}
   2760 
   2761 	/*
   2762 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2763 	 *
   2764 	 *	82541GI (8086:1076) ... no
   2765 	 *	82572EI (8086:10b9) ... yes
   2766 	 */
   2767 	if (sc->sc_type >= WM_T_82571) {
   2768 		ifp->if_capabilities |=
   2769 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2770 	}
   2771 
   2772 	/*
   2773 	 * If we're a i82544 or greater (except i82547), we can do
   2774 	 * TCP segmentation offload.
   2775 	 */
   2776 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2777 		ifp->if_capabilities |= IFCAP_TSOv4;
   2778 	}
   2779 
   2780 	if (sc->sc_type >= WM_T_82571) {
   2781 		ifp->if_capabilities |= IFCAP_TSOv6;
   2782 	}
   2783 
   2784 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2785 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2786 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2787 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2788 
   2789 #ifdef WM_MPSAFE
   2790 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2791 #else
   2792 	sc->sc_core_lock = NULL;
   2793 #endif
   2794 
   2795 	/* Attach the interface. */
   2796 	error = if_initialize(ifp);
   2797 	if (error != 0) {
   2798 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2799 		    error);
   2800 		return; /* Error */
   2801 	}
   2802 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2803 	ether_ifattach(ifp, enaddr);
   2804 	if_register(ifp);
   2805 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2806 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2807 			  RND_FLAG_DEFAULT);
   2808 
   2809 #ifdef WM_EVENT_COUNTERS
   2810 	/* Attach event counters. */
   2811 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2812 	    NULL, xname, "linkintr");
   2813 
   2814 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2815 	    NULL, xname, "tx_xoff");
   2816 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2817 	    NULL, xname, "tx_xon");
   2818 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2819 	    NULL, xname, "rx_xoff");
   2820 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2821 	    NULL, xname, "rx_xon");
   2822 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2823 	    NULL, xname, "rx_macctl");
   2824 #endif /* WM_EVENT_COUNTERS */
   2825 
   2826 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2827 		pmf_class_network_register(self, ifp);
   2828 	else
   2829 		aprint_error_dev(self, "couldn't establish power handler\n");
   2830 
   2831 	sc->sc_flags |= WM_F_ATTACHED;
   2832  out:
   2833 	return;
   2834 }
   2835 
   2836 /* The detach function (ca_detach) */
   2837 static int
   2838 wm_detach(device_t self, int flags __unused)
   2839 {
   2840 	struct wm_softc *sc = device_private(self);
   2841 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2842 	int i;
   2843 
   2844 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2845 		return 0;
   2846 
   2847 	/* Stop the interface. Callouts are stopped in it. */
   2848 	wm_stop(ifp, 1);
   2849 
   2850 	pmf_device_deregister(self);
   2851 
   2852 #ifdef WM_EVENT_COUNTERS
   2853 	evcnt_detach(&sc->sc_ev_linkintr);
   2854 
   2855 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2856 	evcnt_detach(&sc->sc_ev_tx_xon);
   2857 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2858 	evcnt_detach(&sc->sc_ev_rx_xon);
   2859 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2860 #endif /* WM_EVENT_COUNTERS */
   2861 
   2862 	/* Tell the firmware about the release */
   2863 	WM_CORE_LOCK(sc);
   2864 	wm_release_manageability(sc);
   2865 	wm_release_hw_control(sc);
   2866 	wm_enable_wakeup(sc);
   2867 	WM_CORE_UNLOCK(sc);
   2868 
   2869 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2870 
   2871 	/* Delete all remaining media. */
   2872 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2873 
   2874 	ether_ifdetach(ifp);
   2875 	if_detach(ifp);
   2876 	if_percpuq_destroy(sc->sc_ipq);
   2877 
   2878 	/* Unload RX dmamaps and free mbufs */
   2879 	for (i = 0; i < sc->sc_nqueues; i++) {
   2880 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2881 		mutex_enter(rxq->rxq_lock);
   2882 		wm_rxdrain(rxq);
   2883 		mutex_exit(rxq->rxq_lock);
   2884 	}
   2885 	/* Must unlock here */
   2886 
   2887 	/* Disestablish the interrupt handler */
   2888 	for (i = 0; i < sc->sc_nintrs; i++) {
   2889 		if (sc->sc_ihs[i] != NULL) {
   2890 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2891 			sc->sc_ihs[i] = NULL;
   2892 		}
   2893 	}
   2894 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2895 
   2896 	wm_free_txrx_queues(sc);
   2897 
   2898 	/* Unmap the registers */
   2899 	if (sc->sc_ss) {
   2900 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2901 		sc->sc_ss = 0;
   2902 	}
   2903 	if (sc->sc_ios) {
   2904 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2905 		sc->sc_ios = 0;
   2906 	}
   2907 	if (sc->sc_flashs) {
   2908 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2909 		sc->sc_flashs = 0;
   2910 	}
   2911 
   2912 	if (sc->sc_core_lock)
   2913 		mutex_obj_free(sc->sc_core_lock);
   2914 	if (sc->sc_ich_phymtx)
   2915 		mutex_obj_free(sc->sc_ich_phymtx);
   2916 	if (sc->sc_ich_nvmmtx)
   2917 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2918 
   2919 	return 0;
   2920 }
   2921 
   2922 static bool
   2923 wm_suspend(device_t self, const pmf_qual_t *qual)
   2924 {
   2925 	struct wm_softc *sc = device_private(self);
   2926 
   2927 	wm_release_manageability(sc);
   2928 	wm_release_hw_control(sc);
   2929 	wm_enable_wakeup(sc);
   2930 
   2931 	return true;
   2932 }
   2933 
   2934 static bool
   2935 wm_resume(device_t self, const pmf_qual_t *qual)
   2936 {
   2937 	struct wm_softc *sc = device_private(self);
   2938 
   2939 	/* Disable ASPM L0s and/or L1 for workaround */
   2940 	wm_disable_aspm(sc);
   2941 	wm_init_manageability(sc);
   2942 
   2943 	return true;
   2944 }
   2945 
   2946 /*
   2947  * wm_watchdog:		[ifnet interface function]
   2948  *
   2949  *	Watchdog timer handler.
   2950  */
   2951 static void
   2952 wm_watchdog(struct ifnet *ifp)
   2953 {
   2954 	int qid;
   2955 	struct wm_softc *sc = ifp->if_softc;
   2956 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2957 
   2958 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2959 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2960 
   2961 		wm_watchdog_txq(ifp, txq, &hang_queue);
   2962 	}
   2963 
   2964 	/*
   2965 	 * IF any of queues hanged up, reset the interface.
   2966 	 */
   2967 	if (hang_queue != 0) {
   2968 		(void) wm_init(ifp);
   2969 
   2970 		/*
   2971 		 * There are still some upper layer processing which call
   2972 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   2973 		 */
   2974 		/* Try to get more packets going. */
   2975 		ifp->if_start(ifp);
   2976 	}
   2977 }
   2978 
   2979 
   2980 static void
   2981 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   2982 {
   2983 
   2984 	mutex_enter(txq->txq_lock);
   2985 	if (txq->txq_watchdog &&
   2986 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   2987 		wm_watchdog_txq_locked(ifp, txq, hang);
   2988 	}
   2989 	mutex_exit(txq->txq_lock);
   2990 }
   2991 
   2992 static void
   2993 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   2994 {
   2995 	struct wm_softc *sc = ifp->if_softc;
   2996 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   2997 
   2998 	KASSERT(mutex_owned(txq->txq_lock));
   2999 
   3000 	/*
   3001 	 * Since we're using delayed interrupts, sweep up
   3002 	 * before we report an error.
   3003 	 */
   3004 	wm_txeof(txq, UINT_MAX);
   3005 	if (txq->txq_watchdog)
   3006 		*hang |= __BIT(wmq->wmq_id);
   3007 
   3008 	if (txq->txq_free != WM_NTXDESC(txq)) {
   3009 #ifdef WM_DEBUG
   3010 		int i, j;
   3011 		struct wm_txsoft *txs;
   3012 #endif
   3013 		log(LOG_ERR,
   3014 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3015 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3016 		    txq->txq_next);
   3017 		ifp->if_oerrors++;
   3018 #ifdef WM_DEBUG
   3019 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   3020 		    i = WM_NEXTTXS(txq, i)) {
   3021 		    txs = &txq->txq_soft[i];
   3022 		    printf("txs %d tx %d -> %d\n",
   3023 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3024 		    for (j = txs->txs_firstdesc; ;
   3025 			j = WM_NEXTTX(txq, j)) {
   3026 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3027 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3028 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3029 				    printf("\t %#08x%08x\n",
   3030 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3031 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3032 			    } else {
   3033 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3034 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3035 					txq->txq_descs[j].wtx_addr.wa_low);
   3036 				    printf("\t %#04x%02x%02x%08x\n",
   3037 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3038 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3039 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3040 					txq->txq_descs[j].wtx_cmdlen);
   3041 			    }
   3042 			if (j == txs->txs_lastdesc)
   3043 				break;
   3044 			}
   3045 		}
   3046 #endif
   3047 	}
   3048 }
   3049 
   3050 /*
   3051  * wm_tick:
   3052  *
   3053  *	One second timer, used to check link status, sweep up
   3054  *	completed transmit jobs, etc.
   3055  */
   3056 static void
   3057 wm_tick(void *arg)
   3058 {
   3059 	struct wm_softc *sc = arg;
   3060 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3061 #ifndef WM_MPSAFE
   3062 	int s = splnet();
   3063 #endif
   3064 
   3065 	WM_CORE_LOCK(sc);
   3066 
   3067 	if (sc->sc_core_stopping) {
   3068 		WM_CORE_UNLOCK(sc);
   3069 #ifndef WM_MPSAFE
   3070 		splx(s);
   3071 #endif
   3072 		return;
   3073 	}
   3074 
   3075 	if (sc->sc_type >= WM_T_82542_2_1) {
   3076 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3077 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3078 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3079 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3080 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3081 	}
   3082 
   3083 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3084 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3085 	    + CSR_READ(sc, WMREG_CRCERRS)
   3086 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3087 	    + CSR_READ(sc, WMREG_SYMERRC)
   3088 	    + CSR_READ(sc, WMREG_RXERRC)
   3089 	    + CSR_READ(sc, WMREG_SEC)
   3090 	    + CSR_READ(sc, WMREG_CEXTERR)
   3091 	    + CSR_READ(sc, WMREG_RLEC);
   3092 	/*
   3093 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3094 	 * memory. It does not mean the number of dropped packet. Because
   3095 	 * ethernet controller can receive packets in such case if there is
   3096 	 * space in phy's FIFO.
   3097 	 *
   3098 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3099 	 * own EVCNT instead of if_iqdrops.
   3100 	 */
   3101 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3102 
   3103 	if (sc->sc_flags & WM_F_HAS_MII)
   3104 		mii_tick(&sc->sc_mii);
   3105 	else if ((sc->sc_type >= WM_T_82575)
   3106 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3107 		wm_serdes_tick(sc);
   3108 	else
   3109 		wm_tbi_tick(sc);
   3110 
   3111 	WM_CORE_UNLOCK(sc);
   3112 
   3113 	wm_watchdog(ifp);
   3114 
   3115 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3116 }
   3117 
   3118 static int
   3119 wm_ifflags_cb(struct ethercom *ec)
   3120 {
   3121 	struct ifnet *ifp = &ec->ec_if;
   3122 	struct wm_softc *sc = ifp->if_softc;
   3123 	int rc = 0;
   3124 
   3125 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3126 		device_xname(sc->sc_dev), __func__));
   3127 
   3128 	WM_CORE_LOCK(sc);
   3129 
   3130 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3131 	sc->sc_if_flags = ifp->if_flags;
   3132 
   3133 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3134 		rc = ENETRESET;
   3135 		goto out;
   3136 	}
   3137 
   3138 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3139 		wm_set_filter(sc);
   3140 
   3141 	wm_set_vlan(sc);
   3142 
   3143 out:
   3144 	WM_CORE_UNLOCK(sc);
   3145 
   3146 	return rc;
   3147 }
   3148 
   3149 /*
   3150  * wm_ioctl:		[ifnet interface function]
   3151  *
   3152  *	Handle control requests from the operator.
   3153  */
   3154 static int
   3155 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3156 {
   3157 	struct wm_softc *sc = ifp->if_softc;
   3158 	struct ifreq *ifr = (struct ifreq *) data;
   3159 	struct ifaddr *ifa = (struct ifaddr *)data;
   3160 	struct sockaddr_dl *sdl;
   3161 	int s, error;
   3162 
   3163 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3164 		device_xname(sc->sc_dev), __func__));
   3165 
   3166 #ifndef WM_MPSAFE
   3167 	s = splnet();
   3168 #endif
   3169 	switch (cmd) {
   3170 	case SIOCSIFMEDIA:
   3171 	case SIOCGIFMEDIA:
   3172 		WM_CORE_LOCK(sc);
   3173 		/* Flow control requires full-duplex mode. */
   3174 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3175 		    (ifr->ifr_media & IFM_FDX) == 0)
   3176 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3177 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3178 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3179 				/* We can do both TXPAUSE and RXPAUSE. */
   3180 				ifr->ifr_media |=
   3181 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3182 			}
   3183 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3184 		}
   3185 		WM_CORE_UNLOCK(sc);
   3186 #ifdef WM_MPSAFE
   3187 		s = splnet();
   3188 #endif
   3189 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3190 #ifdef WM_MPSAFE
   3191 		splx(s);
   3192 #endif
   3193 		break;
   3194 	case SIOCINITIFADDR:
   3195 		WM_CORE_LOCK(sc);
   3196 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3197 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3198 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3199 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3200 			/* unicast address is first multicast entry */
   3201 			wm_set_filter(sc);
   3202 			error = 0;
   3203 			WM_CORE_UNLOCK(sc);
   3204 			break;
   3205 		}
   3206 		WM_CORE_UNLOCK(sc);
   3207 		/*FALLTHROUGH*/
   3208 	default:
   3209 #ifdef WM_MPSAFE
   3210 		s = splnet();
   3211 #endif
   3212 		/* It may call wm_start, so unlock here */
   3213 		error = ether_ioctl(ifp, cmd, data);
   3214 #ifdef WM_MPSAFE
   3215 		splx(s);
   3216 #endif
   3217 		if (error != ENETRESET)
   3218 			break;
   3219 
   3220 		error = 0;
   3221 
   3222 		if (cmd == SIOCSIFCAP) {
   3223 			error = (*ifp->if_init)(ifp);
   3224 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3225 			;
   3226 		else if (ifp->if_flags & IFF_RUNNING) {
   3227 			/*
   3228 			 * Multicast list has changed; set the hardware filter
   3229 			 * accordingly.
   3230 			 */
   3231 			WM_CORE_LOCK(sc);
   3232 			wm_set_filter(sc);
   3233 			WM_CORE_UNLOCK(sc);
   3234 		}
   3235 		break;
   3236 	}
   3237 
   3238 #ifndef WM_MPSAFE
   3239 	splx(s);
   3240 #endif
   3241 	return error;
   3242 }
   3243 
   3244 /* MAC address related */
   3245 
   3246 /*
   3247  * Get the offset of MAC address and return it.
   3248  * If error occured, use offset 0.
   3249  */
   3250 static uint16_t
   3251 wm_check_alt_mac_addr(struct wm_softc *sc)
   3252 {
   3253 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3254 	uint16_t offset = NVM_OFF_MACADDR;
   3255 
   3256 	/* Try to read alternative MAC address pointer */
   3257 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3258 		return 0;
   3259 
   3260 	/* Check pointer if it's valid or not. */
   3261 	if ((offset == 0x0000) || (offset == 0xffff))
   3262 		return 0;
   3263 
   3264 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3265 	/*
   3266 	 * Check whether alternative MAC address is valid or not.
   3267 	 * Some cards have non 0xffff pointer but those don't use
   3268 	 * alternative MAC address in reality.
   3269 	 *
   3270 	 * Check whether the broadcast bit is set or not.
   3271 	 */
   3272 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3273 		if (((myea[0] & 0xff) & 0x01) == 0)
   3274 			return offset; /* Found */
   3275 
   3276 	/* Not found */
   3277 	return 0;
   3278 }
   3279 
   3280 static int
   3281 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3282 {
   3283 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3284 	uint16_t offset = NVM_OFF_MACADDR;
   3285 	int do_invert = 0;
   3286 
   3287 	switch (sc->sc_type) {
   3288 	case WM_T_82580:
   3289 	case WM_T_I350:
   3290 	case WM_T_I354:
   3291 		/* EEPROM Top Level Partitioning */
   3292 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3293 		break;
   3294 	case WM_T_82571:
   3295 	case WM_T_82575:
   3296 	case WM_T_82576:
   3297 	case WM_T_80003:
   3298 	case WM_T_I210:
   3299 	case WM_T_I211:
   3300 		offset = wm_check_alt_mac_addr(sc);
   3301 		if (offset == 0)
   3302 			if ((sc->sc_funcid & 0x01) == 1)
   3303 				do_invert = 1;
   3304 		break;
   3305 	default:
   3306 		if ((sc->sc_funcid & 0x01) == 1)
   3307 			do_invert = 1;
   3308 		break;
   3309 	}
   3310 
   3311 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3312 		goto bad;
   3313 
   3314 	enaddr[0] = myea[0] & 0xff;
   3315 	enaddr[1] = myea[0] >> 8;
   3316 	enaddr[2] = myea[1] & 0xff;
   3317 	enaddr[3] = myea[1] >> 8;
   3318 	enaddr[4] = myea[2] & 0xff;
   3319 	enaddr[5] = myea[2] >> 8;
   3320 
   3321 	/*
   3322 	 * Toggle the LSB of the MAC address on the second port
   3323 	 * of some dual port cards.
   3324 	 */
   3325 	if (do_invert != 0)
   3326 		enaddr[5] ^= 1;
   3327 
   3328 	return 0;
   3329 
   3330  bad:
   3331 	return -1;
   3332 }
   3333 
   3334 /*
   3335  * wm_set_ral:
   3336  *
   3337  *	Set an entery in the receive address list.
   3338  */
   3339 static void
   3340 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3341 {
   3342 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3343 	uint32_t wlock_mac;
   3344 	int rv;
   3345 
   3346 	if (enaddr != NULL) {
   3347 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3348 		    (enaddr[3] << 24);
   3349 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3350 		ral_hi |= RAL_AV;
   3351 	} else {
   3352 		ral_lo = 0;
   3353 		ral_hi = 0;
   3354 	}
   3355 
   3356 	switch (sc->sc_type) {
   3357 	case WM_T_82542_2_0:
   3358 	case WM_T_82542_2_1:
   3359 	case WM_T_82543:
   3360 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3361 		CSR_WRITE_FLUSH(sc);
   3362 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3363 		CSR_WRITE_FLUSH(sc);
   3364 		break;
   3365 	case WM_T_PCH2:
   3366 	case WM_T_PCH_LPT:
   3367 	case WM_T_PCH_SPT:
   3368 		if (idx == 0) {
   3369 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3370 			CSR_WRITE_FLUSH(sc);
   3371 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3372 			CSR_WRITE_FLUSH(sc);
   3373 			return;
   3374 		}
   3375 		if (sc->sc_type != WM_T_PCH2) {
   3376 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3377 			    FWSM_WLOCK_MAC);
   3378 			addrl = WMREG_SHRAL(idx - 1);
   3379 			addrh = WMREG_SHRAH(idx - 1);
   3380 		} else {
   3381 			wlock_mac = 0;
   3382 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3383 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3384 		}
   3385 
   3386 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3387 			rv = wm_get_swflag_ich8lan(sc);
   3388 			if (rv != 0)
   3389 				return;
   3390 			CSR_WRITE(sc, addrl, ral_lo);
   3391 			CSR_WRITE_FLUSH(sc);
   3392 			CSR_WRITE(sc, addrh, ral_hi);
   3393 			CSR_WRITE_FLUSH(sc);
   3394 			wm_put_swflag_ich8lan(sc);
   3395 		}
   3396 
   3397 		break;
   3398 	default:
   3399 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3400 		CSR_WRITE_FLUSH(sc);
   3401 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3402 		CSR_WRITE_FLUSH(sc);
   3403 		break;
   3404 	}
   3405 }
   3406 
   3407 /*
   3408  * wm_mchash:
   3409  *
   3410  *	Compute the hash of the multicast address for the 4096-bit
   3411  *	multicast filter.
   3412  */
   3413 static uint32_t
   3414 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3415 {
   3416 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3417 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3418 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3419 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3420 	uint32_t hash;
   3421 
   3422 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3423 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3424 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3425 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3426 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3427 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3428 		return (hash & 0x3ff);
   3429 	}
   3430 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3431 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3432 
   3433 	return (hash & 0xfff);
   3434 }
   3435 
   3436 /*
   3437  * wm_set_filter:
   3438  *
   3439  *	Set up the receive filter.
   3440  */
   3441 static void
   3442 wm_set_filter(struct wm_softc *sc)
   3443 {
   3444 	struct ethercom *ec = &sc->sc_ethercom;
   3445 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3446 	struct ether_multi *enm;
   3447 	struct ether_multistep step;
   3448 	bus_addr_t mta_reg;
   3449 	uint32_t hash, reg, bit;
   3450 	int i, size, ralmax;
   3451 
   3452 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3453 		device_xname(sc->sc_dev), __func__));
   3454 
   3455 	if (sc->sc_type >= WM_T_82544)
   3456 		mta_reg = WMREG_CORDOVA_MTA;
   3457 	else
   3458 		mta_reg = WMREG_MTA;
   3459 
   3460 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3461 
   3462 	if (ifp->if_flags & IFF_BROADCAST)
   3463 		sc->sc_rctl |= RCTL_BAM;
   3464 	if (ifp->if_flags & IFF_PROMISC) {
   3465 		sc->sc_rctl |= RCTL_UPE;
   3466 		goto allmulti;
   3467 	}
   3468 
   3469 	/*
   3470 	 * Set the station address in the first RAL slot, and
   3471 	 * clear the remaining slots.
   3472 	 */
   3473 	if (sc->sc_type == WM_T_ICH8)
   3474 		size = WM_RAL_TABSIZE_ICH8 -1;
   3475 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3476 	    || (sc->sc_type == WM_T_PCH))
   3477 		size = WM_RAL_TABSIZE_ICH8;
   3478 	else if (sc->sc_type == WM_T_PCH2)
   3479 		size = WM_RAL_TABSIZE_PCH2;
   3480 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3481 		size = WM_RAL_TABSIZE_PCH_LPT;
   3482 	else if (sc->sc_type == WM_T_82575)
   3483 		size = WM_RAL_TABSIZE_82575;
   3484 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3485 		size = WM_RAL_TABSIZE_82576;
   3486 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3487 		size = WM_RAL_TABSIZE_I350;
   3488 	else
   3489 		size = WM_RAL_TABSIZE;
   3490 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3491 
   3492 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3493 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3494 		switch (i) {
   3495 		case 0:
   3496 			/* We can use all entries */
   3497 			ralmax = size;
   3498 			break;
   3499 		case 1:
   3500 			/* Only RAR[0] */
   3501 			ralmax = 1;
   3502 			break;
   3503 		default:
   3504 			/* available SHRA + RAR[0] */
   3505 			ralmax = i + 1;
   3506 		}
   3507 	} else
   3508 		ralmax = size;
   3509 	for (i = 1; i < size; i++) {
   3510 		if (i < ralmax)
   3511 			wm_set_ral(sc, NULL, i);
   3512 	}
   3513 
   3514 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3515 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3516 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3517 	    || (sc->sc_type == WM_T_PCH_SPT))
   3518 		size = WM_ICH8_MC_TABSIZE;
   3519 	else
   3520 		size = WM_MC_TABSIZE;
   3521 	/* Clear out the multicast table. */
   3522 	for (i = 0; i < size; i++) {
   3523 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3524 		CSR_WRITE_FLUSH(sc);
   3525 	}
   3526 
   3527 	ETHER_LOCK(ec);
   3528 	ETHER_FIRST_MULTI(step, ec, enm);
   3529 	while (enm != NULL) {
   3530 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3531 			ETHER_UNLOCK(ec);
   3532 			/*
   3533 			 * We must listen to a range of multicast addresses.
   3534 			 * For now, just accept all multicasts, rather than
   3535 			 * trying to set only those filter bits needed to match
   3536 			 * the range.  (At this time, the only use of address
   3537 			 * ranges is for IP multicast routing, for which the
   3538 			 * range is big enough to require all bits set.)
   3539 			 */
   3540 			goto allmulti;
   3541 		}
   3542 
   3543 		hash = wm_mchash(sc, enm->enm_addrlo);
   3544 
   3545 		reg = (hash >> 5);
   3546 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3547 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3548 		    || (sc->sc_type == WM_T_PCH2)
   3549 		    || (sc->sc_type == WM_T_PCH_LPT)
   3550 		    || (sc->sc_type == WM_T_PCH_SPT))
   3551 			reg &= 0x1f;
   3552 		else
   3553 			reg &= 0x7f;
   3554 		bit = hash & 0x1f;
   3555 
   3556 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3557 		hash |= 1U << bit;
   3558 
   3559 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3560 			/*
   3561 			 * 82544 Errata 9: Certain register cannot be written
   3562 			 * with particular alignments in PCI-X bus operation
   3563 			 * (FCAH, MTA and VFTA).
   3564 			 */
   3565 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3566 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3567 			CSR_WRITE_FLUSH(sc);
   3568 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3569 			CSR_WRITE_FLUSH(sc);
   3570 		} else {
   3571 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3572 			CSR_WRITE_FLUSH(sc);
   3573 		}
   3574 
   3575 		ETHER_NEXT_MULTI(step, enm);
   3576 	}
   3577 	ETHER_UNLOCK(ec);
   3578 
   3579 	ifp->if_flags &= ~IFF_ALLMULTI;
   3580 	goto setit;
   3581 
   3582  allmulti:
   3583 	ifp->if_flags |= IFF_ALLMULTI;
   3584 	sc->sc_rctl |= RCTL_MPE;
   3585 
   3586  setit:
   3587 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3588 }
   3589 
   3590 /* Reset and init related */
   3591 
   3592 static void
   3593 wm_set_vlan(struct wm_softc *sc)
   3594 {
   3595 
   3596 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3597 		device_xname(sc->sc_dev), __func__));
   3598 
   3599 	/* Deal with VLAN enables. */
   3600 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3601 		sc->sc_ctrl |= CTRL_VME;
   3602 	else
   3603 		sc->sc_ctrl &= ~CTRL_VME;
   3604 
   3605 	/* Write the control registers. */
   3606 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3607 }
   3608 
   3609 static void
   3610 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3611 {
   3612 	uint32_t gcr;
   3613 	pcireg_t ctrl2;
   3614 
   3615 	gcr = CSR_READ(sc, WMREG_GCR);
   3616 
   3617 	/* Only take action if timeout value is defaulted to 0 */
   3618 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3619 		goto out;
   3620 
   3621 	if ((gcr & GCR_CAP_VER2) == 0) {
   3622 		gcr |= GCR_CMPL_TMOUT_10MS;
   3623 		goto out;
   3624 	}
   3625 
   3626 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3627 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3628 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3629 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3630 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3631 
   3632 out:
   3633 	/* Disable completion timeout resend */
   3634 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3635 
   3636 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3637 }
   3638 
   3639 void
   3640 wm_get_auto_rd_done(struct wm_softc *sc)
   3641 {
   3642 	int i;
   3643 
   3644 	/* wait for eeprom to reload */
   3645 	switch (sc->sc_type) {
   3646 	case WM_T_82571:
   3647 	case WM_T_82572:
   3648 	case WM_T_82573:
   3649 	case WM_T_82574:
   3650 	case WM_T_82583:
   3651 	case WM_T_82575:
   3652 	case WM_T_82576:
   3653 	case WM_T_82580:
   3654 	case WM_T_I350:
   3655 	case WM_T_I354:
   3656 	case WM_T_I210:
   3657 	case WM_T_I211:
   3658 	case WM_T_80003:
   3659 	case WM_T_ICH8:
   3660 	case WM_T_ICH9:
   3661 		for (i = 0; i < 10; i++) {
   3662 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3663 				break;
   3664 			delay(1000);
   3665 		}
   3666 		if (i == 10) {
   3667 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3668 			    "complete\n", device_xname(sc->sc_dev));
   3669 		}
   3670 		break;
   3671 	default:
   3672 		break;
   3673 	}
   3674 }
   3675 
   3676 void
   3677 wm_lan_init_done(struct wm_softc *sc)
   3678 {
   3679 	uint32_t reg = 0;
   3680 	int i;
   3681 
   3682 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3683 		device_xname(sc->sc_dev), __func__));
   3684 
   3685 	/* Wait for eeprom to reload */
   3686 	switch (sc->sc_type) {
   3687 	case WM_T_ICH10:
   3688 	case WM_T_PCH:
   3689 	case WM_T_PCH2:
   3690 	case WM_T_PCH_LPT:
   3691 	case WM_T_PCH_SPT:
   3692 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3693 			reg = CSR_READ(sc, WMREG_STATUS);
   3694 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3695 				break;
   3696 			delay(100);
   3697 		}
   3698 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3699 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3700 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3701 		}
   3702 		break;
   3703 	default:
   3704 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3705 		    __func__);
   3706 		break;
   3707 	}
   3708 
   3709 	reg &= ~STATUS_LAN_INIT_DONE;
   3710 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3711 }
   3712 
   3713 void
   3714 wm_get_cfg_done(struct wm_softc *sc)
   3715 {
   3716 	int mask;
   3717 	uint32_t reg;
   3718 	int i;
   3719 
   3720 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3721 		device_xname(sc->sc_dev), __func__));
   3722 
   3723 	/* Wait for eeprom to reload */
   3724 	switch (sc->sc_type) {
   3725 	case WM_T_82542_2_0:
   3726 	case WM_T_82542_2_1:
   3727 		/* null */
   3728 		break;
   3729 	case WM_T_82543:
   3730 	case WM_T_82544:
   3731 	case WM_T_82540:
   3732 	case WM_T_82545:
   3733 	case WM_T_82545_3:
   3734 	case WM_T_82546:
   3735 	case WM_T_82546_3:
   3736 	case WM_T_82541:
   3737 	case WM_T_82541_2:
   3738 	case WM_T_82547:
   3739 	case WM_T_82547_2:
   3740 	case WM_T_82573:
   3741 	case WM_T_82574:
   3742 	case WM_T_82583:
   3743 		/* generic */
   3744 		delay(10*1000);
   3745 		break;
   3746 	case WM_T_80003:
   3747 	case WM_T_82571:
   3748 	case WM_T_82572:
   3749 	case WM_T_82575:
   3750 	case WM_T_82576:
   3751 	case WM_T_82580:
   3752 	case WM_T_I350:
   3753 	case WM_T_I354:
   3754 	case WM_T_I210:
   3755 	case WM_T_I211:
   3756 		if (sc->sc_type == WM_T_82571) {
   3757 			/* Only 82571 shares port 0 */
   3758 			mask = EEMNGCTL_CFGDONE_0;
   3759 		} else
   3760 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3761 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3762 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3763 				break;
   3764 			delay(1000);
   3765 		}
   3766 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3767 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3768 				device_xname(sc->sc_dev), __func__));
   3769 		}
   3770 		break;
   3771 	case WM_T_ICH8:
   3772 	case WM_T_ICH9:
   3773 	case WM_T_ICH10:
   3774 	case WM_T_PCH:
   3775 	case WM_T_PCH2:
   3776 	case WM_T_PCH_LPT:
   3777 	case WM_T_PCH_SPT:
   3778 		delay(10*1000);
   3779 		if (sc->sc_type >= WM_T_ICH10)
   3780 			wm_lan_init_done(sc);
   3781 		else
   3782 			wm_get_auto_rd_done(sc);
   3783 
   3784 		reg = CSR_READ(sc, WMREG_STATUS);
   3785 		if ((reg & STATUS_PHYRA) != 0)
   3786 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3787 		break;
   3788 	default:
   3789 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3790 		    __func__);
   3791 		break;
   3792 	}
   3793 }
   3794 
   3795 void
   3796 wm_phy_post_reset(struct wm_softc *sc)
   3797 {
   3798 	uint32_t reg;
   3799 
   3800 	/* This function is only for ICH8 and newer. */
   3801 	if (sc->sc_type < WM_T_ICH8)
   3802 		return;
   3803 
   3804 	if (wm_phy_resetisblocked(sc)) {
   3805 		/* XXX */
   3806 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3807 		return;
   3808 	}
   3809 
   3810 	/* Allow time for h/w to get to quiescent state after reset */
   3811 	delay(10*1000);
   3812 
   3813 	/* Perform any necessary post-reset workarounds */
   3814 	if (sc->sc_type == WM_T_PCH)
   3815 		wm_hv_phy_workaround_ich8lan(sc);
   3816 	if (sc->sc_type == WM_T_PCH2)
   3817 		wm_lv_phy_workaround_ich8lan(sc);
   3818 
   3819 	/* Clear the host wakeup bit after lcd reset */
   3820 	if (sc->sc_type >= WM_T_PCH) {
   3821 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3822 		    BM_PORT_GEN_CFG);
   3823 		reg &= ~BM_WUC_HOST_WU_BIT;
   3824 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3825 		    BM_PORT_GEN_CFG, reg);
   3826 	}
   3827 
   3828 	/* Configure the LCD with the extended configuration region in NVM */
   3829 	wm_init_lcd_from_nvm(sc);
   3830 
   3831 	/* Configure the LCD with the OEM bits in NVM */
   3832 }
   3833 
   3834 /* Only for PCH and newer */
   3835 static void
   3836 wm_write_smbus_addr(struct wm_softc *sc)
   3837 {
   3838 	uint32_t strap, freq;
   3839 	uint32_t phy_data;
   3840 
   3841 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3842 		device_xname(sc->sc_dev), __func__));
   3843 
   3844 	strap = CSR_READ(sc, WMREG_STRAP);
   3845 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3846 
   3847 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3848 
   3849 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3850 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3851 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3852 
   3853 	if (sc->sc_phytype == WMPHY_I217) {
   3854 		/* Restore SMBus frequency */
   3855 		if (freq --) {
   3856 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3857 			    | HV_SMB_ADDR_FREQ_HIGH);
   3858 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3859 			    HV_SMB_ADDR_FREQ_LOW);
   3860 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3861 			    HV_SMB_ADDR_FREQ_HIGH);
   3862 		} else {
   3863 			DPRINTF(WM_DEBUG_INIT,
   3864 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3865 				device_xname(sc->sc_dev), __func__));
   3866 		}
   3867 	}
   3868 
   3869 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3870 }
   3871 
   3872 void
   3873 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3874 {
   3875 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3876 	uint16_t phy_page = 0;
   3877 
   3878 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3879 		device_xname(sc->sc_dev), __func__));
   3880 
   3881 	switch (sc->sc_type) {
   3882 	case WM_T_ICH8:
   3883 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3884 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3885 			return;
   3886 
   3887 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3888 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3889 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3890 			break;
   3891 		}
   3892 		/* FALLTHROUGH */
   3893 	case WM_T_PCH:
   3894 	case WM_T_PCH2:
   3895 	case WM_T_PCH_LPT:
   3896 	case WM_T_PCH_SPT:
   3897 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3898 		break;
   3899 	default:
   3900 		return;
   3901 	}
   3902 
   3903 	sc->phy.acquire(sc);
   3904 
   3905 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3906 	if ((reg & sw_cfg_mask) == 0)
   3907 		goto release;
   3908 
   3909 	/*
   3910 	 * Make sure HW does not configure LCD from PHY extended configuration
   3911 	 * before SW configuration
   3912 	 */
   3913 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3914 	if ((sc->sc_type < WM_T_PCH2)
   3915 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3916 		goto release;
   3917 
   3918 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3919 		device_xname(sc->sc_dev), __func__));
   3920 	/* word_addr is in DWORD */
   3921 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3922 
   3923 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3924 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3925 
   3926 	if (((sc->sc_type == WM_T_PCH)
   3927 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3928 	    || (sc->sc_type > WM_T_PCH)) {
   3929 		/*
   3930 		 * HW configures the SMBus address and LEDs when the OEM and
   3931 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3932 		 * are cleared, SW will configure them instead.
   3933 		 */
   3934 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3935 			device_xname(sc->sc_dev), __func__));
   3936 		wm_write_smbus_addr(sc);
   3937 
   3938 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3939 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3940 	}
   3941 
   3942 	/* Configure LCD from extended configuration region. */
   3943 	for (i = 0; i < cnf_size; i++) {
   3944 		uint16_t reg_data, reg_addr;
   3945 
   3946 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3947 			goto release;
   3948 
   3949 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3950 			goto release;
   3951 
   3952 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3953 			phy_page = reg_data;
   3954 
   3955 		reg_addr &= IGPHY_MAXREGADDR;
   3956 		reg_addr |= phy_page;
   3957 
   3958 		sc->phy.release(sc); /* XXX */
   3959 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3960 		sc->phy.acquire(sc); /* XXX */
   3961 	}
   3962 
   3963 release:
   3964 	sc->phy.release(sc);
   3965 	return;
   3966 }
   3967 
   3968 
   3969 /* Init hardware bits */
   3970 void
   3971 wm_initialize_hardware_bits(struct wm_softc *sc)
   3972 {
   3973 	uint32_t tarc0, tarc1, reg;
   3974 
   3975 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3976 		device_xname(sc->sc_dev), __func__));
   3977 
   3978 	/* For 82571 variant, 80003 and ICHs */
   3979 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3980 	    || (sc->sc_type >= WM_T_80003)) {
   3981 
   3982 		/* Transmit Descriptor Control 0 */
   3983 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3984 		reg |= TXDCTL_COUNT_DESC;
   3985 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3986 
   3987 		/* Transmit Descriptor Control 1 */
   3988 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3989 		reg |= TXDCTL_COUNT_DESC;
   3990 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3991 
   3992 		/* TARC0 */
   3993 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3994 		switch (sc->sc_type) {
   3995 		case WM_T_82571:
   3996 		case WM_T_82572:
   3997 		case WM_T_82573:
   3998 		case WM_T_82574:
   3999 		case WM_T_82583:
   4000 		case WM_T_80003:
   4001 			/* Clear bits 30..27 */
   4002 			tarc0 &= ~__BITS(30, 27);
   4003 			break;
   4004 		default:
   4005 			break;
   4006 		}
   4007 
   4008 		switch (sc->sc_type) {
   4009 		case WM_T_82571:
   4010 		case WM_T_82572:
   4011 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4012 
   4013 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4014 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4015 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4016 			/* 8257[12] Errata No.7 */
   4017 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4018 
   4019 			/* TARC1 bit 28 */
   4020 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4021 				tarc1 &= ~__BIT(28);
   4022 			else
   4023 				tarc1 |= __BIT(28);
   4024 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4025 
   4026 			/*
   4027 			 * 8257[12] Errata No.13
   4028 			 * Disable Dyamic Clock Gating.
   4029 			 */
   4030 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4031 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4032 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4033 			break;
   4034 		case WM_T_82573:
   4035 		case WM_T_82574:
   4036 		case WM_T_82583:
   4037 			if ((sc->sc_type == WM_T_82574)
   4038 			    || (sc->sc_type == WM_T_82583))
   4039 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4040 
   4041 			/* Extended Device Control */
   4042 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4043 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4044 			reg |= __BIT(22);	/* Set bit 22 */
   4045 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4046 
   4047 			/* Device Control */
   4048 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4049 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4050 
   4051 			/* PCIe Control Register */
   4052 			/*
   4053 			 * 82573 Errata (unknown).
   4054 			 *
   4055 			 * 82574 Errata 25 and 82583 Errata 12
   4056 			 * "Dropped Rx Packets":
   4057 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4058 			 */
   4059 			reg = CSR_READ(sc, WMREG_GCR);
   4060 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4061 			CSR_WRITE(sc, WMREG_GCR, reg);
   4062 
   4063 			if ((sc->sc_type == WM_T_82574)
   4064 			    || (sc->sc_type == WM_T_82583)) {
   4065 				/*
   4066 				 * Document says this bit must be set for
   4067 				 * proper operation.
   4068 				 */
   4069 				reg = CSR_READ(sc, WMREG_GCR);
   4070 				reg |= __BIT(22);
   4071 				CSR_WRITE(sc, WMREG_GCR, reg);
   4072 
   4073 				/*
   4074 				 * Apply workaround for hardware errata
   4075 				 * documented in errata docs Fixes issue where
   4076 				 * some error prone or unreliable PCIe
   4077 				 * completions are occurring, particularly
   4078 				 * with ASPM enabled. Without fix, issue can
   4079 				 * cause Tx timeouts.
   4080 				 */
   4081 				reg = CSR_READ(sc, WMREG_GCR2);
   4082 				reg |= __BIT(0);
   4083 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4084 			}
   4085 			break;
   4086 		case WM_T_80003:
   4087 			/* TARC0 */
   4088 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4089 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4090 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4091 
   4092 			/* TARC1 bit 28 */
   4093 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4094 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4095 				tarc1 &= ~__BIT(28);
   4096 			else
   4097 				tarc1 |= __BIT(28);
   4098 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4099 			break;
   4100 		case WM_T_ICH8:
   4101 		case WM_T_ICH9:
   4102 		case WM_T_ICH10:
   4103 		case WM_T_PCH:
   4104 		case WM_T_PCH2:
   4105 		case WM_T_PCH_LPT:
   4106 		case WM_T_PCH_SPT:
   4107 			/* TARC0 */
   4108 			if (sc->sc_type == WM_T_ICH8) {
   4109 				/* Set TARC0 bits 29 and 28 */
   4110 				tarc0 |= __BITS(29, 28);
   4111 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4112 				tarc0 |= __BIT(29);
   4113 				/*
   4114 				 *  Drop bit 28. From Linux.
   4115 				 * See I218/I219 spec update
   4116 				 * "5. Buffer Overrun While the I219 is
   4117 				 * Processing DMA Transactions"
   4118 				 */
   4119 				tarc0 &= ~__BIT(28);
   4120 			}
   4121 			/* Set TARC0 bits 23,24,26,27 */
   4122 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4123 
   4124 			/* CTRL_EXT */
   4125 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4126 			reg |= __BIT(22);	/* Set bit 22 */
   4127 			/*
   4128 			 * Enable PHY low-power state when MAC is at D3
   4129 			 * w/o WoL
   4130 			 */
   4131 			if (sc->sc_type >= WM_T_PCH)
   4132 				reg |= CTRL_EXT_PHYPDEN;
   4133 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4134 
   4135 			/* TARC1 */
   4136 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4137 			/* bit 28 */
   4138 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4139 				tarc1 &= ~__BIT(28);
   4140 			else
   4141 				tarc1 |= __BIT(28);
   4142 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4143 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4144 
   4145 			/* Device Status */
   4146 			if (sc->sc_type == WM_T_ICH8) {
   4147 				reg = CSR_READ(sc, WMREG_STATUS);
   4148 				reg &= ~__BIT(31);
   4149 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4150 
   4151 			}
   4152 
   4153 			/* IOSFPC */
   4154 			if (sc->sc_type == WM_T_PCH_SPT) {
   4155 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4156 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4157 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4158 			}
   4159 			/*
   4160 			 * Work-around descriptor data corruption issue during
   4161 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4162 			 * capability.
   4163 			 */
   4164 			reg = CSR_READ(sc, WMREG_RFCTL);
   4165 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4166 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4167 			break;
   4168 		default:
   4169 			break;
   4170 		}
   4171 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4172 
   4173 		switch (sc->sc_type) {
   4174 		/*
   4175 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4176 		 * Avoid RSS Hash Value bug.
   4177 		 */
   4178 		case WM_T_82571:
   4179 		case WM_T_82572:
   4180 		case WM_T_82573:
   4181 		case WM_T_80003:
   4182 		case WM_T_ICH8:
   4183 			reg = CSR_READ(sc, WMREG_RFCTL);
   4184 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4185 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4186 			break;
   4187 		case WM_T_82574:
   4188 			/* use extened Rx descriptor. */
   4189 			reg = CSR_READ(sc, WMREG_RFCTL);
   4190 			reg |= WMREG_RFCTL_EXSTEN;
   4191 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4192 			break;
   4193 		default:
   4194 			break;
   4195 		}
   4196 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4197 		/*
   4198 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4199 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4200 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4201 		 * Correctly by the Device"
   4202 		 *
   4203 		 * I354(C2000) Errata AVR53:
   4204 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4205 		 * Hang"
   4206 		 */
   4207 		reg = CSR_READ(sc, WMREG_RFCTL);
   4208 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4209 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4210 	}
   4211 }
   4212 
   4213 static uint32_t
   4214 wm_rxpbs_adjust_82580(uint32_t val)
   4215 {
   4216 	uint32_t rv = 0;
   4217 
   4218 	if (val < __arraycount(wm_82580_rxpbs_table))
   4219 		rv = wm_82580_rxpbs_table[val];
   4220 
   4221 	return rv;
   4222 }
   4223 
   4224 /*
   4225  * wm_reset_phy:
   4226  *
   4227  *	generic PHY reset function.
   4228  *	Same as e1000_phy_hw_reset_generic()
   4229  */
   4230 static void
   4231 wm_reset_phy(struct wm_softc *sc)
   4232 {
   4233 	uint32_t reg;
   4234 
   4235 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4236 		device_xname(sc->sc_dev), __func__));
   4237 	if (wm_phy_resetisblocked(sc))
   4238 		return;
   4239 
   4240 	sc->phy.acquire(sc);
   4241 
   4242 	reg = CSR_READ(sc, WMREG_CTRL);
   4243 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4244 	CSR_WRITE_FLUSH(sc);
   4245 
   4246 	delay(sc->phy.reset_delay_us);
   4247 
   4248 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4249 	CSR_WRITE_FLUSH(sc);
   4250 
   4251 	delay(150);
   4252 
   4253 	sc->phy.release(sc);
   4254 
   4255 	wm_get_cfg_done(sc);
   4256 	wm_phy_post_reset(sc);
   4257 }
   4258 
   4259 /*
   4260  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4261  * so it is enough to check sc->sc_queue[0] only.
   4262  */
   4263 static void
   4264 wm_flush_desc_rings(struct wm_softc *sc)
   4265 {
   4266 	pcireg_t preg;
   4267 	uint32_t reg;
   4268 	struct wm_txqueue *txq;
   4269 	wiseman_txdesc_t *txd;
   4270 	int nexttx;
   4271 	uint32_t rctl;
   4272 
   4273 	/* First, disable MULR fix in FEXTNVM11 */
   4274 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4275 	reg |= FEXTNVM11_DIS_MULRFIX;
   4276 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4277 
   4278 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4279 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4280 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4281 		return;
   4282 
   4283 	/* TX */
   4284 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4285 	    device_xname(sc->sc_dev), preg, reg);
   4286 	reg = CSR_READ(sc, WMREG_TCTL);
   4287 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4288 
   4289 	txq = &sc->sc_queue[0].wmq_txq;
   4290 	nexttx = txq->txq_next;
   4291 	txd = &txq->txq_descs[nexttx];
   4292 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4293 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4294 	txd->wtx_fields.wtxu_status = 0;
   4295 	txd->wtx_fields.wtxu_options = 0;
   4296 	txd->wtx_fields.wtxu_vlan = 0;
   4297 
   4298 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4299 	    BUS_SPACE_BARRIER_WRITE);
   4300 
   4301 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4302 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4303 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4304 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4305 	delay(250);
   4306 
   4307 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4308 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4309 		return;
   4310 
   4311 	/* RX */
   4312 	printf("%s: Need RX flush (reg = %08x)\n",
   4313 	    device_xname(sc->sc_dev), preg);
   4314 	rctl = CSR_READ(sc, WMREG_RCTL);
   4315 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4316 	CSR_WRITE_FLUSH(sc);
   4317 	delay(150);
   4318 
   4319 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4320 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4321 	reg &= 0xffffc000;
   4322 	/*
   4323 	 * update thresholds: prefetch threshold to 31, host threshold
   4324 	 * to 1 and make sure the granularity is "descriptors" and not
   4325 	 * "cache lines"
   4326 	 */
   4327 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4328 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4329 
   4330 	/*
   4331 	 * momentarily enable the RX ring for the changes to take
   4332 	 * effect
   4333 	 */
   4334 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4335 	CSR_WRITE_FLUSH(sc);
   4336 	delay(150);
   4337 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4338 }
   4339 
   4340 /*
   4341  * wm_reset:
   4342  *
   4343  *	Reset the i82542 chip.
   4344  */
   4345 static void
   4346 wm_reset(struct wm_softc *sc)
   4347 {
   4348 	int phy_reset = 0;
   4349 	int i, error = 0;
   4350 	uint32_t reg;
   4351 	uint16_t kmreg;
   4352 	int rv;
   4353 
   4354 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4355 		device_xname(sc->sc_dev), __func__));
   4356 	KASSERT(sc->sc_type != 0);
   4357 
   4358 	/*
   4359 	 * Allocate on-chip memory according to the MTU size.
   4360 	 * The Packet Buffer Allocation register must be written
   4361 	 * before the chip is reset.
   4362 	 */
   4363 	switch (sc->sc_type) {
   4364 	case WM_T_82547:
   4365 	case WM_T_82547_2:
   4366 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4367 		    PBA_22K : PBA_30K;
   4368 		for (i = 0; i < sc->sc_nqueues; i++) {
   4369 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4370 			txq->txq_fifo_head = 0;
   4371 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4372 			txq->txq_fifo_size =
   4373 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4374 			txq->txq_fifo_stall = 0;
   4375 		}
   4376 		break;
   4377 	case WM_T_82571:
   4378 	case WM_T_82572:
   4379 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4380 	case WM_T_80003:
   4381 		sc->sc_pba = PBA_32K;
   4382 		break;
   4383 	case WM_T_82573:
   4384 		sc->sc_pba = PBA_12K;
   4385 		break;
   4386 	case WM_T_82574:
   4387 	case WM_T_82583:
   4388 		sc->sc_pba = PBA_20K;
   4389 		break;
   4390 	case WM_T_82576:
   4391 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4392 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4393 		break;
   4394 	case WM_T_82580:
   4395 	case WM_T_I350:
   4396 	case WM_T_I354:
   4397 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4398 		break;
   4399 	case WM_T_I210:
   4400 	case WM_T_I211:
   4401 		sc->sc_pba = PBA_34K;
   4402 		break;
   4403 	case WM_T_ICH8:
   4404 		/* Workaround for a bit corruption issue in FIFO memory */
   4405 		sc->sc_pba = PBA_8K;
   4406 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4407 		break;
   4408 	case WM_T_ICH9:
   4409 	case WM_T_ICH10:
   4410 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4411 		    PBA_14K : PBA_10K;
   4412 		break;
   4413 	case WM_T_PCH:
   4414 	case WM_T_PCH2:
   4415 	case WM_T_PCH_LPT:
   4416 	case WM_T_PCH_SPT:
   4417 		sc->sc_pba = PBA_26K;
   4418 		break;
   4419 	default:
   4420 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4421 		    PBA_40K : PBA_48K;
   4422 		break;
   4423 	}
   4424 	/*
   4425 	 * Only old or non-multiqueue devices have the PBA register
   4426 	 * XXX Need special handling for 82575.
   4427 	 */
   4428 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4429 	    || (sc->sc_type == WM_T_82575))
   4430 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4431 
   4432 	/* Prevent the PCI-E bus from sticking */
   4433 	if (sc->sc_flags & WM_F_PCIE) {
   4434 		int timeout = 800;
   4435 
   4436 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4437 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4438 
   4439 		while (timeout--) {
   4440 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4441 			    == 0)
   4442 				break;
   4443 			delay(100);
   4444 		}
   4445 		if (timeout == 0)
   4446 			device_printf(sc->sc_dev,
   4447 			    "failed to disable busmastering\n");
   4448 	}
   4449 
   4450 	/* Set the completion timeout for interface */
   4451 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4452 	    || (sc->sc_type == WM_T_82580)
   4453 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4454 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4455 		wm_set_pcie_completion_timeout(sc);
   4456 
   4457 	/* Clear interrupt */
   4458 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4459 	if (wm_is_using_msix(sc)) {
   4460 		if (sc->sc_type != WM_T_82574) {
   4461 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4462 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4463 		} else {
   4464 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4465 		}
   4466 	}
   4467 
   4468 	/* Stop the transmit and receive processes. */
   4469 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4470 	sc->sc_rctl &= ~RCTL_EN;
   4471 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4472 	CSR_WRITE_FLUSH(sc);
   4473 
   4474 	/* XXX set_tbi_sbp_82543() */
   4475 
   4476 	delay(10*1000);
   4477 
   4478 	/* Must acquire the MDIO ownership before MAC reset */
   4479 	switch (sc->sc_type) {
   4480 	case WM_T_82573:
   4481 	case WM_T_82574:
   4482 	case WM_T_82583:
   4483 		error = wm_get_hw_semaphore_82573(sc);
   4484 		break;
   4485 	default:
   4486 		break;
   4487 	}
   4488 
   4489 	/*
   4490 	 * 82541 Errata 29? & 82547 Errata 28?
   4491 	 * See also the description about PHY_RST bit in CTRL register
   4492 	 * in 8254x_GBe_SDM.pdf.
   4493 	 */
   4494 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4495 		CSR_WRITE(sc, WMREG_CTRL,
   4496 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4497 		CSR_WRITE_FLUSH(sc);
   4498 		delay(5000);
   4499 	}
   4500 
   4501 	switch (sc->sc_type) {
   4502 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4503 	case WM_T_82541:
   4504 	case WM_T_82541_2:
   4505 	case WM_T_82547:
   4506 	case WM_T_82547_2:
   4507 		/*
   4508 		 * On some chipsets, a reset through a memory-mapped write
   4509 		 * cycle can cause the chip to reset before completing the
   4510 		 * write cycle.  This causes major headache that can be
   4511 		 * avoided by issuing the reset via indirect register writes
   4512 		 * through I/O space.
   4513 		 *
   4514 		 * So, if we successfully mapped the I/O BAR at attach time,
   4515 		 * use that.  Otherwise, try our luck with a memory-mapped
   4516 		 * reset.
   4517 		 */
   4518 		if (sc->sc_flags & WM_F_IOH_VALID)
   4519 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4520 		else
   4521 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4522 		break;
   4523 	case WM_T_82545_3:
   4524 	case WM_T_82546_3:
   4525 		/* Use the shadow control register on these chips. */
   4526 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4527 		break;
   4528 	case WM_T_80003:
   4529 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4530 		sc->phy.acquire(sc);
   4531 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4532 		sc->phy.release(sc);
   4533 		break;
   4534 	case WM_T_ICH8:
   4535 	case WM_T_ICH9:
   4536 	case WM_T_ICH10:
   4537 	case WM_T_PCH:
   4538 	case WM_T_PCH2:
   4539 	case WM_T_PCH_LPT:
   4540 	case WM_T_PCH_SPT:
   4541 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4542 		if (wm_phy_resetisblocked(sc) == false) {
   4543 			/*
   4544 			 * Gate automatic PHY configuration by hardware on
   4545 			 * non-managed 82579
   4546 			 */
   4547 			if ((sc->sc_type == WM_T_PCH2)
   4548 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4549 				== 0))
   4550 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4551 
   4552 			reg |= CTRL_PHY_RESET;
   4553 			phy_reset = 1;
   4554 		} else
   4555 			printf("XXX reset is blocked!!!\n");
   4556 		sc->phy.acquire(sc);
   4557 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4558 		/* Don't insert a completion barrier when reset */
   4559 		delay(20*1000);
   4560 		mutex_exit(sc->sc_ich_phymtx);
   4561 		break;
   4562 	case WM_T_82580:
   4563 	case WM_T_I350:
   4564 	case WM_T_I354:
   4565 	case WM_T_I210:
   4566 	case WM_T_I211:
   4567 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4568 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4569 			CSR_WRITE_FLUSH(sc);
   4570 		delay(5000);
   4571 		break;
   4572 	case WM_T_82542_2_0:
   4573 	case WM_T_82542_2_1:
   4574 	case WM_T_82543:
   4575 	case WM_T_82540:
   4576 	case WM_T_82545:
   4577 	case WM_T_82546:
   4578 	case WM_T_82571:
   4579 	case WM_T_82572:
   4580 	case WM_T_82573:
   4581 	case WM_T_82574:
   4582 	case WM_T_82575:
   4583 	case WM_T_82576:
   4584 	case WM_T_82583:
   4585 	default:
   4586 		/* Everything else can safely use the documented method. */
   4587 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4588 		break;
   4589 	}
   4590 
   4591 	/* Must release the MDIO ownership after MAC reset */
   4592 	switch (sc->sc_type) {
   4593 	case WM_T_82573:
   4594 	case WM_T_82574:
   4595 	case WM_T_82583:
   4596 		if (error == 0)
   4597 			wm_put_hw_semaphore_82573(sc);
   4598 		break;
   4599 	default:
   4600 		break;
   4601 	}
   4602 
   4603 	if (phy_reset != 0)
   4604 		wm_get_cfg_done(sc);
   4605 
   4606 	/* reload EEPROM */
   4607 	switch (sc->sc_type) {
   4608 	case WM_T_82542_2_0:
   4609 	case WM_T_82542_2_1:
   4610 	case WM_T_82543:
   4611 	case WM_T_82544:
   4612 		delay(10);
   4613 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4614 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4615 		CSR_WRITE_FLUSH(sc);
   4616 		delay(2000);
   4617 		break;
   4618 	case WM_T_82540:
   4619 	case WM_T_82545:
   4620 	case WM_T_82545_3:
   4621 	case WM_T_82546:
   4622 	case WM_T_82546_3:
   4623 		delay(5*1000);
   4624 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4625 		break;
   4626 	case WM_T_82541:
   4627 	case WM_T_82541_2:
   4628 	case WM_T_82547:
   4629 	case WM_T_82547_2:
   4630 		delay(20000);
   4631 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4632 		break;
   4633 	case WM_T_82571:
   4634 	case WM_T_82572:
   4635 	case WM_T_82573:
   4636 	case WM_T_82574:
   4637 	case WM_T_82583:
   4638 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4639 			delay(10);
   4640 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4641 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4642 			CSR_WRITE_FLUSH(sc);
   4643 		}
   4644 		/* check EECD_EE_AUTORD */
   4645 		wm_get_auto_rd_done(sc);
   4646 		/*
   4647 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4648 		 * is set.
   4649 		 */
   4650 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4651 		    || (sc->sc_type == WM_T_82583))
   4652 			delay(25*1000);
   4653 		break;
   4654 	case WM_T_82575:
   4655 	case WM_T_82576:
   4656 	case WM_T_82580:
   4657 	case WM_T_I350:
   4658 	case WM_T_I354:
   4659 	case WM_T_I210:
   4660 	case WM_T_I211:
   4661 	case WM_T_80003:
   4662 		/* check EECD_EE_AUTORD */
   4663 		wm_get_auto_rd_done(sc);
   4664 		break;
   4665 	case WM_T_ICH8:
   4666 	case WM_T_ICH9:
   4667 	case WM_T_ICH10:
   4668 	case WM_T_PCH:
   4669 	case WM_T_PCH2:
   4670 	case WM_T_PCH_LPT:
   4671 	case WM_T_PCH_SPT:
   4672 		break;
   4673 	default:
   4674 		panic("%s: unknown type\n", __func__);
   4675 	}
   4676 
   4677 	/* Check whether EEPROM is present or not */
   4678 	switch (sc->sc_type) {
   4679 	case WM_T_82575:
   4680 	case WM_T_82576:
   4681 	case WM_T_82580:
   4682 	case WM_T_I350:
   4683 	case WM_T_I354:
   4684 	case WM_T_ICH8:
   4685 	case WM_T_ICH9:
   4686 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4687 			/* Not found */
   4688 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4689 			if (sc->sc_type == WM_T_82575)
   4690 				wm_reset_init_script_82575(sc);
   4691 		}
   4692 		break;
   4693 	default:
   4694 		break;
   4695 	}
   4696 
   4697 	if (phy_reset != 0)
   4698 		wm_phy_post_reset(sc);
   4699 
   4700 	if ((sc->sc_type == WM_T_82580)
   4701 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4702 		/* clear global device reset status bit */
   4703 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4704 	}
   4705 
   4706 	/* Clear any pending interrupt events. */
   4707 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4708 	reg = CSR_READ(sc, WMREG_ICR);
   4709 	if (wm_is_using_msix(sc)) {
   4710 		if (sc->sc_type != WM_T_82574) {
   4711 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4712 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4713 		} else
   4714 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4715 	}
   4716 
   4717 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4718 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4719 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4720 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4721 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4722 		reg |= KABGTXD_BGSQLBIAS;
   4723 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4724 	}
   4725 
   4726 	/* reload sc_ctrl */
   4727 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4728 
   4729 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4730 		wm_set_eee_i350(sc);
   4731 
   4732 	/*
   4733 	 * For PCH, this write will make sure that any noise will be detected
   4734 	 * as a CRC error and be dropped rather than show up as a bad packet
   4735 	 * to the DMA engine
   4736 	 */
   4737 	if (sc->sc_type == WM_T_PCH)
   4738 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4739 
   4740 	if (sc->sc_type >= WM_T_82544)
   4741 		CSR_WRITE(sc, WMREG_WUC, 0);
   4742 
   4743 	wm_reset_mdicnfg_82580(sc);
   4744 
   4745 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4746 		wm_pll_workaround_i210(sc);
   4747 
   4748 	if (sc->sc_type == WM_T_80003) {
   4749 		/* default to TRUE to enable the MDIC W/A */
   4750 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4751 
   4752 		rv = wm_kmrn_readreg(sc,
   4753 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4754 		if (rv == 0) {
   4755 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4756 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4757 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4758 			else
   4759 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4760 		}
   4761 	}
   4762 }
   4763 
   4764 /*
   4765  * wm_add_rxbuf:
   4766  *
   4767  *	Add a receive buffer to the indiciated descriptor.
   4768  */
   4769 static int
   4770 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4771 {
   4772 	struct wm_softc *sc = rxq->rxq_sc;
   4773 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4774 	struct mbuf *m;
   4775 	int error;
   4776 
   4777 	KASSERT(mutex_owned(rxq->rxq_lock));
   4778 
   4779 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4780 	if (m == NULL)
   4781 		return ENOBUFS;
   4782 
   4783 	MCLGET(m, M_DONTWAIT);
   4784 	if ((m->m_flags & M_EXT) == 0) {
   4785 		m_freem(m);
   4786 		return ENOBUFS;
   4787 	}
   4788 
   4789 	if (rxs->rxs_mbuf != NULL)
   4790 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4791 
   4792 	rxs->rxs_mbuf = m;
   4793 
   4794 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4795 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4796 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4797 	if (error) {
   4798 		/* XXX XXX XXX */
   4799 		aprint_error_dev(sc->sc_dev,
   4800 		    "unable to load rx DMA map %d, error = %d\n",
   4801 		    idx, error);
   4802 		panic("wm_add_rxbuf");
   4803 	}
   4804 
   4805 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4806 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4807 
   4808 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4809 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4810 			wm_init_rxdesc(rxq, idx);
   4811 	} else
   4812 		wm_init_rxdesc(rxq, idx);
   4813 
   4814 	return 0;
   4815 }
   4816 
   4817 /*
   4818  * wm_rxdrain:
   4819  *
   4820  *	Drain the receive queue.
   4821  */
   4822 static void
   4823 wm_rxdrain(struct wm_rxqueue *rxq)
   4824 {
   4825 	struct wm_softc *sc = rxq->rxq_sc;
   4826 	struct wm_rxsoft *rxs;
   4827 	int i;
   4828 
   4829 	KASSERT(mutex_owned(rxq->rxq_lock));
   4830 
   4831 	for (i = 0; i < WM_NRXDESC; i++) {
   4832 		rxs = &rxq->rxq_soft[i];
   4833 		if (rxs->rxs_mbuf != NULL) {
   4834 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4835 			m_freem(rxs->rxs_mbuf);
   4836 			rxs->rxs_mbuf = NULL;
   4837 		}
   4838 	}
   4839 }
   4840 
   4841 
   4842 /*
   4843  * XXX copy from FreeBSD's sys/net/rss_config.c
   4844  */
   4845 /*
   4846  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4847  * effectiveness may be limited by algorithm choice and available entropy
   4848  * during the boot.
   4849  *
   4850  * XXXRW: And that we don't randomize it yet!
   4851  *
   4852  * This is the default Microsoft RSS specification key which is also
   4853  * the Chelsio T5 firmware default key.
   4854  */
   4855 #define RSS_KEYSIZE 40
   4856 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4857 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4858 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4859 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4860 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4861 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4862 };
   4863 
   4864 /*
   4865  * Caller must pass an array of size sizeof(rss_key).
   4866  *
   4867  * XXX
   4868  * As if_ixgbe may use this function, this function should not be
   4869  * if_wm specific function.
   4870  */
   4871 static void
   4872 wm_rss_getkey(uint8_t *key)
   4873 {
   4874 
   4875 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4876 }
   4877 
   4878 /*
   4879  * Setup registers for RSS.
   4880  *
   4881  * XXX not yet VMDq support
   4882  */
   4883 static void
   4884 wm_init_rss(struct wm_softc *sc)
   4885 {
   4886 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4887 	int i;
   4888 
   4889 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4890 
   4891 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4892 		int qid, reta_ent;
   4893 
   4894 		qid  = i % sc->sc_nqueues;
   4895 		switch(sc->sc_type) {
   4896 		case WM_T_82574:
   4897 			reta_ent = __SHIFTIN(qid,
   4898 			    RETA_ENT_QINDEX_MASK_82574);
   4899 			break;
   4900 		case WM_T_82575:
   4901 			reta_ent = __SHIFTIN(qid,
   4902 			    RETA_ENT_QINDEX1_MASK_82575);
   4903 			break;
   4904 		default:
   4905 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4906 			break;
   4907 		}
   4908 
   4909 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4910 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4911 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4912 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4913 	}
   4914 
   4915 	wm_rss_getkey((uint8_t *)rss_key);
   4916 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4917 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4918 
   4919 	if (sc->sc_type == WM_T_82574)
   4920 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4921 	else
   4922 		mrqc = MRQC_ENABLE_RSS_MQ;
   4923 
   4924 	/*
   4925 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4926 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4927 	 */
   4928 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4929 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4930 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4931 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4932 
   4933 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4934 }
   4935 
   4936 /*
   4937  * Adjust TX and RX queue numbers which the system actulally uses.
   4938  *
   4939  * The numbers are affected by below parameters.
   4940  *     - The nubmer of hardware queues
   4941  *     - The number of MSI-X vectors (= "nvectors" argument)
   4942  *     - ncpu
   4943  */
   4944 static void
   4945 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4946 {
   4947 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4948 
   4949 	if (nvectors < 2) {
   4950 		sc->sc_nqueues = 1;
   4951 		return;
   4952 	}
   4953 
   4954 	switch(sc->sc_type) {
   4955 	case WM_T_82572:
   4956 		hw_ntxqueues = 2;
   4957 		hw_nrxqueues = 2;
   4958 		break;
   4959 	case WM_T_82574:
   4960 		hw_ntxqueues = 2;
   4961 		hw_nrxqueues = 2;
   4962 		break;
   4963 	case WM_T_82575:
   4964 		hw_ntxqueues = 4;
   4965 		hw_nrxqueues = 4;
   4966 		break;
   4967 	case WM_T_82576:
   4968 		hw_ntxqueues = 16;
   4969 		hw_nrxqueues = 16;
   4970 		break;
   4971 	case WM_T_82580:
   4972 	case WM_T_I350:
   4973 	case WM_T_I354:
   4974 		hw_ntxqueues = 8;
   4975 		hw_nrxqueues = 8;
   4976 		break;
   4977 	case WM_T_I210:
   4978 		hw_ntxqueues = 4;
   4979 		hw_nrxqueues = 4;
   4980 		break;
   4981 	case WM_T_I211:
   4982 		hw_ntxqueues = 2;
   4983 		hw_nrxqueues = 2;
   4984 		break;
   4985 		/*
   4986 		 * As below ethernet controllers does not support MSI-X,
   4987 		 * this driver let them not use multiqueue.
   4988 		 *     - WM_T_80003
   4989 		 *     - WM_T_ICH8
   4990 		 *     - WM_T_ICH9
   4991 		 *     - WM_T_ICH10
   4992 		 *     - WM_T_PCH
   4993 		 *     - WM_T_PCH2
   4994 		 *     - WM_T_PCH_LPT
   4995 		 */
   4996 	default:
   4997 		hw_ntxqueues = 1;
   4998 		hw_nrxqueues = 1;
   4999 		break;
   5000 	}
   5001 
   5002 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   5003 
   5004 	/*
   5005 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5006 	 * the number of queues used actually.
   5007 	 */
   5008 	if (nvectors < hw_nqueues + 1) {
   5009 		sc->sc_nqueues = nvectors - 1;
   5010 	} else {
   5011 		sc->sc_nqueues = hw_nqueues;
   5012 	}
   5013 
   5014 	/*
   5015 	 * As queues more then cpus cannot improve scaling, we limit
   5016 	 * the number of queues used actually.
   5017 	 */
   5018 	if (ncpu < sc->sc_nqueues)
   5019 		sc->sc_nqueues = ncpu;
   5020 }
   5021 
   5022 static inline bool
   5023 wm_is_using_msix(struct wm_softc *sc)
   5024 {
   5025 
   5026 	return (sc->sc_nintrs > 1);
   5027 }
   5028 
   5029 static inline bool
   5030 wm_is_using_multiqueue(struct wm_softc *sc)
   5031 {
   5032 
   5033 	return (sc->sc_nqueues > 1);
   5034 }
   5035 
   5036 static int
   5037 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5038 {
   5039 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5040 	wmq->wmq_id = qidx;
   5041 	wmq->wmq_intr_idx = intr_idx;
   5042 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5043 #ifdef WM_MPSAFE
   5044 	    | SOFTINT_MPSAFE
   5045 #endif
   5046 	    , wm_handle_queue, wmq);
   5047 	if (wmq->wmq_si != NULL)
   5048 		return 0;
   5049 
   5050 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5051 	    wmq->wmq_id);
   5052 
   5053 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5054 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5055 	return ENOMEM;
   5056 }
   5057 
   5058 /*
   5059  * Both single interrupt MSI and INTx can use this function.
   5060  */
   5061 static int
   5062 wm_setup_legacy(struct wm_softc *sc)
   5063 {
   5064 	pci_chipset_tag_t pc = sc->sc_pc;
   5065 	const char *intrstr = NULL;
   5066 	char intrbuf[PCI_INTRSTR_LEN];
   5067 	int error;
   5068 
   5069 	error = wm_alloc_txrx_queues(sc);
   5070 	if (error) {
   5071 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5072 		    error);
   5073 		return ENOMEM;
   5074 	}
   5075 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5076 	    sizeof(intrbuf));
   5077 #ifdef WM_MPSAFE
   5078 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5079 #endif
   5080 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5081 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5082 	if (sc->sc_ihs[0] == NULL) {
   5083 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5084 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5085 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5086 		return ENOMEM;
   5087 	}
   5088 
   5089 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5090 	sc->sc_nintrs = 1;
   5091 
   5092 	return wm_softint_establish(sc, 0, 0);
   5093 }
   5094 
   5095 static int
   5096 wm_setup_msix(struct wm_softc *sc)
   5097 {
   5098 	void *vih;
   5099 	kcpuset_t *affinity;
   5100 	int qidx, error, intr_idx, txrx_established;
   5101 	pci_chipset_tag_t pc = sc->sc_pc;
   5102 	const char *intrstr = NULL;
   5103 	char intrbuf[PCI_INTRSTR_LEN];
   5104 	char intr_xname[INTRDEVNAMEBUF];
   5105 
   5106 	if (sc->sc_nqueues < ncpu) {
   5107 		/*
   5108 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5109 		 * interrupts start from CPU#1.
   5110 		 */
   5111 		sc->sc_affinity_offset = 1;
   5112 	} else {
   5113 		/*
   5114 		 * In this case, this device use all CPUs. So, we unify
   5115 		 * affinitied cpu_index to msix vector number for readability.
   5116 		 */
   5117 		sc->sc_affinity_offset = 0;
   5118 	}
   5119 
   5120 	error = wm_alloc_txrx_queues(sc);
   5121 	if (error) {
   5122 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5123 		    error);
   5124 		return ENOMEM;
   5125 	}
   5126 
   5127 	kcpuset_create(&affinity, false);
   5128 	intr_idx = 0;
   5129 
   5130 	/*
   5131 	 * TX and RX
   5132 	 */
   5133 	txrx_established = 0;
   5134 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5135 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5136 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5137 
   5138 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5139 		    sizeof(intrbuf));
   5140 #ifdef WM_MPSAFE
   5141 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5142 		    PCI_INTR_MPSAFE, true);
   5143 #endif
   5144 		memset(intr_xname, 0, sizeof(intr_xname));
   5145 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5146 		    device_xname(sc->sc_dev), qidx);
   5147 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5148 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5149 		if (vih == NULL) {
   5150 			aprint_error_dev(sc->sc_dev,
   5151 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5152 			    intrstr ? " at " : "",
   5153 			    intrstr ? intrstr : "");
   5154 
   5155 			goto fail;
   5156 		}
   5157 		kcpuset_zero(affinity);
   5158 		/* Round-robin affinity */
   5159 		kcpuset_set(affinity, affinity_to);
   5160 		error = interrupt_distribute(vih, affinity, NULL);
   5161 		if (error == 0) {
   5162 			aprint_normal_dev(sc->sc_dev,
   5163 			    "for TX and RX interrupting at %s affinity to %u\n",
   5164 			    intrstr, affinity_to);
   5165 		} else {
   5166 			aprint_normal_dev(sc->sc_dev,
   5167 			    "for TX and RX interrupting at %s\n", intrstr);
   5168 		}
   5169 		sc->sc_ihs[intr_idx] = vih;
   5170 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5171 			goto fail;
   5172 		txrx_established++;
   5173 		intr_idx++;
   5174 	}
   5175 
   5176 	/*
   5177 	 * LINK
   5178 	 */
   5179 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5180 	    sizeof(intrbuf));
   5181 #ifdef WM_MPSAFE
   5182 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5183 #endif
   5184 	memset(intr_xname, 0, sizeof(intr_xname));
   5185 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5186 	    device_xname(sc->sc_dev));
   5187 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5188 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5189 	if (vih == NULL) {
   5190 		aprint_error_dev(sc->sc_dev,
   5191 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5192 		    intrstr ? " at " : "",
   5193 		    intrstr ? intrstr : "");
   5194 
   5195 		goto fail;
   5196 	}
   5197 	/* keep default affinity to LINK interrupt */
   5198 	aprint_normal_dev(sc->sc_dev,
   5199 	    "for LINK interrupting at %s\n", intrstr);
   5200 	sc->sc_ihs[intr_idx] = vih;
   5201 	sc->sc_link_intr_idx = intr_idx;
   5202 
   5203 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5204 	kcpuset_destroy(affinity);
   5205 	return 0;
   5206 
   5207  fail:
   5208 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5209 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5210 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5211 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5212 	}
   5213 
   5214 	kcpuset_destroy(affinity);
   5215 	return ENOMEM;
   5216 }
   5217 
   5218 static void
   5219 wm_unset_stopping_flags(struct wm_softc *sc)
   5220 {
   5221 	int i;
   5222 
   5223 	KASSERT(WM_CORE_LOCKED(sc));
   5224 
   5225 	/*
   5226 	 * must unset stopping flags in ascending order.
   5227 	 */
   5228 	for(i = 0; i < sc->sc_nqueues; i++) {
   5229 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5230 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5231 
   5232 		mutex_enter(txq->txq_lock);
   5233 		txq->txq_stopping = false;
   5234 		mutex_exit(txq->txq_lock);
   5235 
   5236 		mutex_enter(rxq->rxq_lock);
   5237 		rxq->rxq_stopping = false;
   5238 		mutex_exit(rxq->rxq_lock);
   5239 	}
   5240 
   5241 	sc->sc_core_stopping = false;
   5242 }
   5243 
   5244 static void
   5245 wm_set_stopping_flags(struct wm_softc *sc)
   5246 {
   5247 	int i;
   5248 
   5249 	KASSERT(WM_CORE_LOCKED(sc));
   5250 
   5251 	sc->sc_core_stopping = true;
   5252 
   5253 	/*
   5254 	 * must set stopping flags in ascending order.
   5255 	 */
   5256 	for(i = 0; i < sc->sc_nqueues; i++) {
   5257 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5258 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5259 
   5260 		mutex_enter(rxq->rxq_lock);
   5261 		rxq->rxq_stopping = true;
   5262 		mutex_exit(rxq->rxq_lock);
   5263 
   5264 		mutex_enter(txq->txq_lock);
   5265 		txq->txq_stopping = true;
   5266 		mutex_exit(txq->txq_lock);
   5267 	}
   5268 }
   5269 
   5270 /*
   5271  * write interrupt interval value to ITR or EITR
   5272  */
   5273 static void
   5274 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5275 {
   5276 
   5277 	if (!wmq->wmq_set_itr)
   5278 		return;
   5279 
   5280 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5281 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5282 
   5283 		/*
   5284 		 * 82575 doesn't have CNT_INGR field.
   5285 		 * So, overwrite counter field by software.
   5286 		 */
   5287 		if (sc->sc_type == WM_T_82575)
   5288 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5289 		else
   5290 			eitr |= EITR_CNT_INGR;
   5291 
   5292 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5293 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5294 		/*
   5295 		 * 82574 has both ITR and EITR. SET EITR when we use
   5296 		 * the multi queue function with MSI-X.
   5297 		 */
   5298 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5299 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5300 	} else {
   5301 		KASSERT(wmq->wmq_id == 0);
   5302 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5303 	}
   5304 
   5305 	wmq->wmq_set_itr = false;
   5306 }
   5307 
   5308 /*
   5309  * TODO
   5310  * Below dynamic calculation of itr is almost the same as linux igb,
   5311  * however it does not fit to wm(4). So, we will have been disable AIM
   5312  * until we will find appropriate calculation of itr.
   5313  */
   5314 /*
   5315  * calculate interrupt interval value to be going to write register in
   5316  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5317  */
   5318 static void
   5319 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5320 {
   5321 #ifdef NOTYET
   5322 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5323 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5324 	uint32_t avg_size = 0;
   5325 	uint32_t new_itr;
   5326 
   5327 	if (rxq->rxq_packets)
   5328 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5329 	if (txq->txq_packets)
   5330 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5331 
   5332 	if (avg_size == 0) {
   5333 		new_itr = 450; /* restore default value */
   5334 		goto out;
   5335 	}
   5336 
   5337 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5338 	avg_size += 24;
   5339 
   5340 	/* Don't starve jumbo frames */
   5341 	avg_size = min(avg_size, 3000);
   5342 
   5343 	/* Give a little boost to mid-size frames */
   5344 	if ((avg_size > 300) && (avg_size < 1200))
   5345 		new_itr = avg_size / 3;
   5346 	else
   5347 		new_itr = avg_size / 2;
   5348 
   5349 out:
   5350 	/*
   5351 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5352 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5353 	 */
   5354 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5355 		new_itr *= 4;
   5356 
   5357 	if (new_itr != wmq->wmq_itr) {
   5358 		wmq->wmq_itr = new_itr;
   5359 		wmq->wmq_set_itr = true;
   5360 	} else
   5361 		wmq->wmq_set_itr = false;
   5362 
   5363 	rxq->rxq_packets = 0;
   5364 	rxq->rxq_bytes = 0;
   5365 	txq->txq_packets = 0;
   5366 	txq->txq_bytes = 0;
   5367 #endif
   5368 }
   5369 
   5370 /*
   5371  * wm_init:		[ifnet interface function]
   5372  *
   5373  *	Initialize the interface.
   5374  */
   5375 static int
   5376 wm_init(struct ifnet *ifp)
   5377 {
   5378 	struct wm_softc *sc = ifp->if_softc;
   5379 	int ret;
   5380 
   5381 	WM_CORE_LOCK(sc);
   5382 	ret = wm_init_locked(ifp);
   5383 	WM_CORE_UNLOCK(sc);
   5384 
   5385 	return ret;
   5386 }
   5387 
   5388 static int
   5389 wm_init_locked(struct ifnet *ifp)
   5390 {
   5391 	struct wm_softc *sc = ifp->if_softc;
   5392 	int i, j, trynum, error = 0;
   5393 	uint32_t reg;
   5394 
   5395 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5396 		device_xname(sc->sc_dev), __func__));
   5397 	KASSERT(WM_CORE_LOCKED(sc));
   5398 
   5399 	/*
   5400 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5401 	 * There is a small but measurable benefit to avoiding the adjusment
   5402 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5403 	 * on such platforms.  One possibility is that the DMA itself is
   5404 	 * slightly more efficient if the front of the entire packet (instead
   5405 	 * of the front of the headers) is aligned.
   5406 	 *
   5407 	 * Note we must always set align_tweak to 0 if we are using
   5408 	 * jumbo frames.
   5409 	 */
   5410 #ifdef __NO_STRICT_ALIGNMENT
   5411 	sc->sc_align_tweak = 0;
   5412 #else
   5413 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5414 		sc->sc_align_tweak = 0;
   5415 	else
   5416 		sc->sc_align_tweak = 2;
   5417 #endif /* __NO_STRICT_ALIGNMENT */
   5418 
   5419 	/* Cancel any pending I/O. */
   5420 	wm_stop_locked(ifp, 0);
   5421 
   5422 	/* update statistics before reset */
   5423 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5424 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5425 
   5426 	/* PCH_SPT hardware workaround */
   5427 	if (sc->sc_type == WM_T_PCH_SPT)
   5428 		wm_flush_desc_rings(sc);
   5429 
   5430 	/* Reset the chip to a known state. */
   5431 	wm_reset(sc);
   5432 
   5433 	/*
   5434 	 * AMT based hardware can now take control from firmware
   5435 	 * Do this after reset.
   5436 	 */
   5437 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5438 		wm_get_hw_control(sc);
   5439 
   5440 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5441 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5442 		wm_legacy_irq_quirk_spt(sc);
   5443 
   5444 	/* Init hardware bits */
   5445 	wm_initialize_hardware_bits(sc);
   5446 
   5447 	/* Reset the PHY. */
   5448 	if (sc->sc_flags & WM_F_HAS_MII)
   5449 		wm_gmii_reset(sc);
   5450 
   5451 	/* Calculate (E)ITR value */
   5452 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5453 		/*
   5454 		 * For NEWQUEUE's EITR (except for 82575).
   5455 		 * 82575's EITR should be set same throttling value as other
   5456 		 * old controllers' ITR because the interrupt/sec calculation
   5457 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5458 		 *
   5459 		 * 82574's EITR should be set same throttling value as ITR.
   5460 		 *
   5461 		 * For N interrupts/sec, set this value to:
   5462 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5463 		 */
   5464 		sc->sc_itr_init = 450;
   5465 	} else if (sc->sc_type >= WM_T_82543) {
   5466 		/*
   5467 		 * Set up the interrupt throttling register (units of 256ns)
   5468 		 * Note that a footnote in Intel's documentation says this
   5469 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5470 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5471 		 * that that is also true for the 1024ns units of the other
   5472 		 * interrupt-related timer registers -- so, really, we ought
   5473 		 * to divide this value by 4 when the link speed is low.
   5474 		 *
   5475 		 * XXX implement this division at link speed change!
   5476 		 */
   5477 
   5478 		/*
   5479 		 * For N interrupts/sec, set this value to:
   5480 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5481 		 * absolute and packet timer values to this value
   5482 		 * divided by 4 to get "simple timer" behavior.
   5483 		 */
   5484 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5485 	}
   5486 
   5487 	error = wm_init_txrx_queues(sc);
   5488 	if (error)
   5489 		goto out;
   5490 
   5491 	/*
   5492 	 * Clear out the VLAN table -- we don't use it (yet).
   5493 	 */
   5494 	CSR_WRITE(sc, WMREG_VET, 0);
   5495 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5496 		trynum = 10; /* Due to hw errata */
   5497 	else
   5498 		trynum = 1;
   5499 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5500 		for (j = 0; j < trynum; j++)
   5501 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5502 
   5503 	/*
   5504 	 * Set up flow-control parameters.
   5505 	 *
   5506 	 * XXX Values could probably stand some tuning.
   5507 	 */
   5508 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5509 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5510 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5511 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5512 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5513 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5514 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5515 	}
   5516 
   5517 	sc->sc_fcrtl = FCRTL_DFLT;
   5518 	if (sc->sc_type < WM_T_82543) {
   5519 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5520 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5521 	} else {
   5522 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5523 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5524 	}
   5525 
   5526 	if (sc->sc_type == WM_T_80003)
   5527 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5528 	else
   5529 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5530 
   5531 	/* Writes the control register. */
   5532 	wm_set_vlan(sc);
   5533 
   5534 	if (sc->sc_flags & WM_F_HAS_MII) {
   5535 		uint16_t kmreg;
   5536 
   5537 		switch (sc->sc_type) {
   5538 		case WM_T_80003:
   5539 		case WM_T_ICH8:
   5540 		case WM_T_ICH9:
   5541 		case WM_T_ICH10:
   5542 		case WM_T_PCH:
   5543 		case WM_T_PCH2:
   5544 		case WM_T_PCH_LPT:
   5545 		case WM_T_PCH_SPT:
   5546 			/*
   5547 			 * Set the mac to wait the maximum time between each
   5548 			 * iteration and increase the max iterations when
   5549 			 * polling the phy; this fixes erroneous timeouts at
   5550 			 * 10Mbps.
   5551 			 */
   5552 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5553 			    0xFFFF);
   5554 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5555 			    &kmreg);
   5556 			kmreg |= 0x3F;
   5557 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5558 			    kmreg);
   5559 			break;
   5560 		default:
   5561 			break;
   5562 		}
   5563 
   5564 		if (sc->sc_type == WM_T_80003) {
   5565 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5566 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5567 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5568 
   5569 			/* Bypass RX and TX FIFO's */
   5570 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5571 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5572 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5573 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5574 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5575 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5576 		}
   5577 	}
   5578 #if 0
   5579 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5580 #endif
   5581 
   5582 	/* Set up checksum offload parameters. */
   5583 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5584 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5585 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5586 		reg |= RXCSUM_IPOFL;
   5587 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5588 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5589 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5590 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5591 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5592 
   5593 	/* Set registers about MSI-X */
   5594 	if (wm_is_using_msix(sc)) {
   5595 		uint32_t ivar;
   5596 		struct wm_queue *wmq;
   5597 		int qid, qintr_idx;
   5598 
   5599 		if (sc->sc_type == WM_T_82575) {
   5600 			/* Interrupt control */
   5601 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5602 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5603 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5604 
   5605 			/* TX and RX */
   5606 			for (i = 0; i < sc->sc_nqueues; i++) {
   5607 				wmq = &sc->sc_queue[i];
   5608 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5609 				    EITR_TX_QUEUE(wmq->wmq_id)
   5610 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5611 			}
   5612 			/* Link status */
   5613 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5614 			    EITR_OTHER);
   5615 		} else if (sc->sc_type == WM_T_82574) {
   5616 			/* Interrupt control */
   5617 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5618 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5619 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5620 
   5621 			/*
   5622 			 * workaround issue with spurious interrupts
   5623 			 * in MSI-X mode.
   5624 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5625 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5626 			 */
   5627 			reg = CSR_READ(sc, WMREG_RFCTL);
   5628 			reg |= WMREG_RFCTL_ACKDIS;
   5629 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5630 
   5631 			ivar = 0;
   5632 			/* TX and RX */
   5633 			for (i = 0; i < sc->sc_nqueues; i++) {
   5634 				wmq = &sc->sc_queue[i];
   5635 				qid = wmq->wmq_id;
   5636 				qintr_idx = wmq->wmq_intr_idx;
   5637 
   5638 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5639 				    IVAR_TX_MASK_Q_82574(qid));
   5640 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5641 				    IVAR_RX_MASK_Q_82574(qid));
   5642 			}
   5643 			/* Link status */
   5644 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5645 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5646 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5647 		} else {
   5648 			/* Interrupt control */
   5649 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5650 			    | GPIE_EIAME | GPIE_PBA);
   5651 
   5652 			switch (sc->sc_type) {
   5653 			case WM_T_82580:
   5654 			case WM_T_I350:
   5655 			case WM_T_I354:
   5656 			case WM_T_I210:
   5657 			case WM_T_I211:
   5658 				/* TX and RX */
   5659 				for (i = 0; i < sc->sc_nqueues; i++) {
   5660 					wmq = &sc->sc_queue[i];
   5661 					qid = wmq->wmq_id;
   5662 					qintr_idx = wmq->wmq_intr_idx;
   5663 
   5664 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5665 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5666 					ivar |= __SHIFTIN((qintr_idx
   5667 						| IVAR_VALID),
   5668 					    IVAR_TX_MASK_Q(qid));
   5669 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5670 					ivar |= __SHIFTIN((qintr_idx
   5671 						| IVAR_VALID),
   5672 					    IVAR_RX_MASK_Q(qid));
   5673 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5674 				}
   5675 				break;
   5676 			case WM_T_82576:
   5677 				/* TX and RX */
   5678 				for (i = 0; i < sc->sc_nqueues; i++) {
   5679 					wmq = &sc->sc_queue[i];
   5680 					qid = wmq->wmq_id;
   5681 					qintr_idx = wmq->wmq_intr_idx;
   5682 
   5683 					ivar = CSR_READ(sc,
   5684 					    WMREG_IVAR_Q_82576(qid));
   5685 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5686 					ivar |= __SHIFTIN((qintr_idx
   5687 						| IVAR_VALID),
   5688 					    IVAR_TX_MASK_Q_82576(qid));
   5689 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5690 					ivar |= __SHIFTIN((qintr_idx
   5691 						| IVAR_VALID),
   5692 					    IVAR_RX_MASK_Q_82576(qid));
   5693 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5694 					    ivar);
   5695 				}
   5696 				break;
   5697 			default:
   5698 				break;
   5699 			}
   5700 
   5701 			/* Link status */
   5702 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5703 			    IVAR_MISC_OTHER);
   5704 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5705 		}
   5706 
   5707 		if (wm_is_using_multiqueue(sc)) {
   5708 			wm_init_rss(sc);
   5709 
   5710 			/*
   5711 			** NOTE: Receive Full-Packet Checksum Offload
   5712 			** is mutually exclusive with Multiqueue. However
   5713 			** this is not the same as TCP/IP checksums which
   5714 			** still work.
   5715 			*/
   5716 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5717 			reg |= RXCSUM_PCSD;
   5718 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5719 		}
   5720 	}
   5721 
   5722 	/* Set up the interrupt registers. */
   5723 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5724 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5725 	    ICR_RXO | ICR_RXT0;
   5726 	if (wm_is_using_msix(sc)) {
   5727 		uint32_t mask;
   5728 		struct wm_queue *wmq;
   5729 
   5730 		switch (sc->sc_type) {
   5731 		case WM_T_82574:
   5732 			mask = 0;
   5733 			for (i = 0; i < sc->sc_nqueues; i++) {
   5734 				wmq = &sc->sc_queue[i];
   5735 				mask |= ICR_TXQ(wmq->wmq_id);
   5736 				mask |= ICR_RXQ(wmq->wmq_id);
   5737 			}
   5738 			mask |= ICR_OTHER;
   5739 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5740 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5741 			break;
   5742 		default:
   5743 			if (sc->sc_type == WM_T_82575) {
   5744 				mask = 0;
   5745 				for (i = 0; i < sc->sc_nqueues; i++) {
   5746 					wmq = &sc->sc_queue[i];
   5747 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5748 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5749 				}
   5750 				mask |= EITR_OTHER;
   5751 			} else {
   5752 				mask = 0;
   5753 				for (i = 0; i < sc->sc_nqueues; i++) {
   5754 					wmq = &sc->sc_queue[i];
   5755 					mask |= 1 << wmq->wmq_intr_idx;
   5756 				}
   5757 				mask |= 1 << sc->sc_link_intr_idx;
   5758 			}
   5759 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5760 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5761 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5762 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5763 			break;
   5764 		}
   5765 	} else
   5766 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5767 
   5768 	/* Set up the inter-packet gap. */
   5769 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5770 
   5771 	if (sc->sc_type >= WM_T_82543) {
   5772 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5773 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5774 			wm_itrs_writereg(sc, wmq);
   5775 		}
   5776 		/*
   5777 		 * Link interrupts occur much less than TX
   5778 		 * interrupts and RX interrupts. So, we don't
   5779 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5780 		 * FreeBSD's if_igb.
   5781 		 */
   5782 	}
   5783 
   5784 	/* Set the VLAN ethernetype. */
   5785 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5786 
   5787 	/*
   5788 	 * Set up the transmit control register; we start out with
   5789 	 * a collision distance suitable for FDX, but update it whe
   5790 	 * we resolve the media type.
   5791 	 */
   5792 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5793 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5794 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5795 	if (sc->sc_type >= WM_T_82571)
   5796 		sc->sc_tctl |= TCTL_MULR;
   5797 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5798 
   5799 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5800 		/* Write TDT after TCTL.EN is set. See the document. */
   5801 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5802 	}
   5803 
   5804 	if (sc->sc_type == WM_T_80003) {
   5805 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5806 		reg &= ~TCTL_EXT_GCEX_MASK;
   5807 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5808 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5809 	}
   5810 
   5811 	/* Set the media. */
   5812 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5813 		goto out;
   5814 
   5815 	/* Configure for OS presence */
   5816 	wm_init_manageability(sc);
   5817 
   5818 	/*
   5819 	 * Set up the receive control register; we actually program
   5820 	 * the register when we set the receive filter.  Use multicast
   5821 	 * address offset type 0.
   5822 	 *
   5823 	 * Only the i82544 has the ability to strip the incoming
   5824 	 * CRC, so we don't enable that feature.
   5825 	 */
   5826 	sc->sc_mchash_type = 0;
   5827 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5828 	    | RCTL_MO(sc->sc_mchash_type);
   5829 
   5830 	/*
   5831 	 * 82574 use one buffer extended Rx descriptor.
   5832 	 */
   5833 	if (sc->sc_type == WM_T_82574)
   5834 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5835 
   5836 	/*
   5837 	 * The I350 has a bug where it always strips the CRC whether
   5838 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5839 	 */
   5840 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5841 	    || (sc->sc_type == WM_T_I210))
   5842 		sc->sc_rctl |= RCTL_SECRC;
   5843 
   5844 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5845 	    && (ifp->if_mtu > ETHERMTU)) {
   5846 		sc->sc_rctl |= RCTL_LPE;
   5847 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5848 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5849 	}
   5850 
   5851 	if (MCLBYTES == 2048) {
   5852 		sc->sc_rctl |= RCTL_2k;
   5853 	} else {
   5854 		if (sc->sc_type >= WM_T_82543) {
   5855 			switch (MCLBYTES) {
   5856 			case 4096:
   5857 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5858 				break;
   5859 			case 8192:
   5860 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5861 				break;
   5862 			case 16384:
   5863 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5864 				break;
   5865 			default:
   5866 				panic("wm_init: MCLBYTES %d unsupported",
   5867 				    MCLBYTES);
   5868 				break;
   5869 			}
   5870 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5871 	}
   5872 
   5873 	/* Enable ECC */
   5874 	switch (sc->sc_type) {
   5875 	case WM_T_82571:
   5876 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5877 		reg |= PBA_ECC_CORR_EN;
   5878 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5879 		break;
   5880 	case WM_T_PCH_LPT:
   5881 	case WM_T_PCH_SPT:
   5882 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5883 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5884 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5885 
   5886 		sc->sc_ctrl |= CTRL_MEHE;
   5887 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5888 		break;
   5889 	default:
   5890 		break;
   5891 	}
   5892 
   5893 	/*
   5894 	 * Set the receive filter.
   5895 	 *
   5896 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5897 	 * the setting of RCTL.EN in wm_set_filter()
   5898 	 */
   5899 	wm_set_filter(sc);
   5900 
   5901 	/* On 575 and later set RDT only if RX enabled */
   5902 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5903 		int qidx;
   5904 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5905 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5906 			for (i = 0; i < WM_NRXDESC; i++) {
   5907 				mutex_enter(rxq->rxq_lock);
   5908 				wm_init_rxdesc(rxq, i);
   5909 				mutex_exit(rxq->rxq_lock);
   5910 
   5911 			}
   5912 		}
   5913 	}
   5914 
   5915 	wm_unset_stopping_flags(sc);
   5916 
   5917 	/* Start the one second link check clock. */
   5918 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5919 
   5920 	/* ...all done! */
   5921 	ifp->if_flags |= IFF_RUNNING;
   5922 	ifp->if_flags &= ~IFF_OACTIVE;
   5923 
   5924  out:
   5925 	sc->sc_if_flags = ifp->if_flags;
   5926 	if (error)
   5927 		log(LOG_ERR, "%s: interface not running\n",
   5928 		    device_xname(sc->sc_dev));
   5929 	return error;
   5930 }
   5931 
   5932 /*
   5933  * wm_stop:		[ifnet interface function]
   5934  *
   5935  *	Stop transmission on the interface.
   5936  */
   5937 static void
   5938 wm_stop(struct ifnet *ifp, int disable)
   5939 {
   5940 	struct wm_softc *sc = ifp->if_softc;
   5941 
   5942 	WM_CORE_LOCK(sc);
   5943 	wm_stop_locked(ifp, disable);
   5944 	WM_CORE_UNLOCK(sc);
   5945 }
   5946 
   5947 static void
   5948 wm_stop_locked(struct ifnet *ifp, int disable)
   5949 {
   5950 	struct wm_softc *sc = ifp->if_softc;
   5951 	struct wm_txsoft *txs;
   5952 	int i, qidx;
   5953 
   5954 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5955 		device_xname(sc->sc_dev), __func__));
   5956 	KASSERT(WM_CORE_LOCKED(sc));
   5957 
   5958 	wm_set_stopping_flags(sc);
   5959 
   5960 	/* Stop the one second clock. */
   5961 	callout_stop(&sc->sc_tick_ch);
   5962 
   5963 	/* Stop the 82547 Tx FIFO stall check timer. */
   5964 	if (sc->sc_type == WM_T_82547)
   5965 		callout_stop(&sc->sc_txfifo_ch);
   5966 
   5967 	if (sc->sc_flags & WM_F_HAS_MII) {
   5968 		/* Down the MII. */
   5969 		mii_down(&sc->sc_mii);
   5970 	} else {
   5971 #if 0
   5972 		/* Should we clear PHY's status properly? */
   5973 		wm_reset(sc);
   5974 #endif
   5975 	}
   5976 
   5977 	/* Stop the transmit and receive processes. */
   5978 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5979 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5980 	sc->sc_rctl &= ~RCTL_EN;
   5981 
   5982 	/*
   5983 	 * Clear the interrupt mask to ensure the device cannot assert its
   5984 	 * interrupt line.
   5985 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5986 	 * service any currently pending or shared interrupt.
   5987 	 */
   5988 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5989 	sc->sc_icr = 0;
   5990 	if (wm_is_using_msix(sc)) {
   5991 		if (sc->sc_type != WM_T_82574) {
   5992 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5993 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5994 		} else
   5995 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5996 	}
   5997 
   5998 	/* Release any queued transmit buffers. */
   5999 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6000 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6001 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6002 		mutex_enter(txq->txq_lock);
   6003 		txq->txq_watchdog = false; /* ensure watchdog disabled */
   6004 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6005 			txs = &txq->txq_soft[i];
   6006 			if (txs->txs_mbuf != NULL) {
   6007 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6008 				m_freem(txs->txs_mbuf);
   6009 				txs->txs_mbuf = NULL;
   6010 			}
   6011 		}
   6012 		mutex_exit(txq->txq_lock);
   6013 	}
   6014 
   6015 	/* Mark the interface as down and cancel the watchdog timer. */
   6016 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6017 
   6018 	if (disable) {
   6019 		for (i = 0; i < sc->sc_nqueues; i++) {
   6020 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6021 			mutex_enter(rxq->rxq_lock);
   6022 			wm_rxdrain(rxq);
   6023 			mutex_exit(rxq->rxq_lock);
   6024 		}
   6025 	}
   6026 
   6027 #if 0 /* notyet */
   6028 	if (sc->sc_type >= WM_T_82544)
   6029 		CSR_WRITE(sc, WMREG_WUC, 0);
   6030 #endif
   6031 }
   6032 
   6033 static void
   6034 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6035 {
   6036 	struct mbuf *m;
   6037 	int i;
   6038 
   6039 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6040 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6041 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6042 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6043 		    m->m_data, m->m_len, m->m_flags);
   6044 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6045 	    i, i == 1 ? "" : "s");
   6046 }
   6047 
   6048 /*
   6049  * wm_82547_txfifo_stall:
   6050  *
   6051  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6052  *	reset the FIFO pointers, and restart packet transmission.
   6053  */
   6054 static void
   6055 wm_82547_txfifo_stall(void *arg)
   6056 {
   6057 	struct wm_softc *sc = arg;
   6058 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6059 
   6060 	mutex_enter(txq->txq_lock);
   6061 
   6062 	if (txq->txq_stopping)
   6063 		goto out;
   6064 
   6065 	if (txq->txq_fifo_stall) {
   6066 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6067 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6068 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6069 			/*
   6070 			 * Packets have drained.  Stop transmitter, reset
   6071 			 * FIFO pointers, restart transmitter, and kick
   6072 			 * the packet queue.
   6073 			 */
   6074 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6075 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6076 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6077 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6078 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6079 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6080 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6081 			CSR_WRITE_FLUSH(sc);
   6082 
   6083 			txq->txq_fifo_head = 0;
   6084 			txq->txq_fifo_stall = 0;
   6085 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6086 		} else {
   6087 			/*
   6088 			 * Still waiting for packets to drain; try again in
   6089 			 * another tick.
   6090 			 */
   6091 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6092 		}
   6093 	}
   6094 
   6095 out:
   6096 	mutex_exit(txq->txq_lock);
   6097 }
   6098 
   6099 /*
   6100  * wm_82547_txfifo_bugchk:
   6101  *
   6102  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6103  *	prevent enqueueing a packet that would wrap around the end
   6104  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6105  *
   6106  *	We do this by checking the amount of space before the end
   6107  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6108  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6109  *	the internal FIFO pointers to the beginning, and restart
   6110  *	transmission on the interface.
   6111  */
   6112 #define	WM_FIFO_HDR		0x10
   6113 #define	WM_82547_PAD_LEN	0x3e0
   6114 static int
   6115 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6116 {
   6117 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6118 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6119 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6120 
   6121 	/* Just return if already stalled. */
   6122 	if (txq->txq_fifo_stall)
   6123 		return 1;
   6124 
   6125 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6126 		/* Stall only occurs in half-duplex mode. */
   6127 		goto send_packet;
   6128 	}
   6129 
   6130 	if (len >= WM_82547_PAD_LEN + space) {
   6131 		txq->txq_fifo_stall = 1;
   6132 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6133 		return 1;
   6134 	}
   6135 
   6136  send_packet:
   6137 	txq->txq_fifo_head += len;
   6138 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6139 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6140 
   6141 	return 0;
   6142 }
   6143 
   6144 static int
   6145 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6146 {
   6147 	int error;
   6148 
   6149 	/*
   6150 	 * Allocate the control data structures, and create and load the
   6151 	 * DMA map for it.
   6152 	 *
   6153 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6154 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6155 	 * both sets within the same 4G segment.
   6156 	 */
   6157 	if (sc->sc_type < WM_T_82544)
   6158 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6159 	else
   6160 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6161 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6162 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6163 	else
   6164 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6165 
   6166 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6167 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6168 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6169 		aprint_error_dev(sc->sc_dev,
   6170 		    "unable to allocate TX control data, error = %d\n",
   6171 		    error);
   6172 		goto fail_0;
   6173 	}
   6174 
   6175 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6176 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6177 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6178 		aprint_error_dev(sc->sc_dev,
   6179 		    "unable to map TX control data, error = %d\n", error);
   6180 		goto fail_1;
   6181 	}
   6182 
   6183 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6184 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6185 		aprint_error_dev(sc->sc_dev,
   6186 		    "unable to create TX control data DMA map, error = %d\n",
   6187 		    error);
   6188 		goto fail_2;
   6189 	}
   6190 
   6191 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6192 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6193 		aprint_error_dev(sc->sc_dev,
   6194 		    "unable to load TX control data DMA map, error = %d\n",
   6195 		    error);
   6196 		goto fail_3;
   6197 	}
   6198 
   6199 	return 0;
   6200 
   6201  fail_3:
   6202 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6203  fail_2:
   6204 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6205 	    WM_TXDESCS_SIZE(txq));
   6206  fail_1:
   6207 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6208  fail_0:
   6209 	return error;
   6210 }
   6211 
   6212 static void
   6213 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6214 {
   6215 
   6216 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6217 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6218 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6219 	    WM_TXDESCS_SIZE(txq));
   6220 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6221 }
   6222 
   6223 static int
   6224 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6225 {
   6226 	int error;
   6227 	size_t rxq_descs_size;
   6228 
   6229 	/*
   6230 	 * Allocate the control data structures, and create and load the
   6231 	 * DMA map for it.
   6232 	 *
   6233 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6234 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6235 	 * both sets within the same 4G segment.
   6236 	 */
   6237 	rxq->rxq_ndesc = WM_NRXDESC;
   6238 	if (sc->sc_type == WM_T_82574)
   6239 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6240 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6241 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6242 	else
   6243 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6244 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6245 
   6246 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6247 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6248 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6249 		aprint_error_dev(sc->sc_dev,
   6250 		    "unable to allocate RX control data, error = %d\n",
   6251 		    error);
   6252 		goto fail_0;
   6253 	}
   6254 
   6255 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6256 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6257 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6258 		aprint_error_dev(sc->sc_dev,
   6259 		    "unable to map RX control data, error = %d\n", error);
   6260 		goto fail_1;
   6261 	}
   6262 
   6263 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6264 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6265 		aprint_error_dev(sc->sc_dev,
   6266 		    "unable to create RX control data DMA map, error = %d\n",
   6267 		    error);
   6268 		goto fail_2;
   6269 	}
   6270 
   6271 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6272 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6273 		aprint_error_dev(sc->sc_dev,
   6274 		    "unable to load RX control data DMA map, error = %d\n",
   6275 		    error);
   6276 		goto fail_3;
   6277 	}
   6278 
   6279 	return 0;
   6280 
   6281  fail_3:
   6282 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6283  fail_2:
   6284 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6285 	    rxq_descs_size);
   6286  fail_1:
   6287 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6288  fail_0:
   6289 	return error;
   6290 }
   6291 
   6292 static void
   6293 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6294 {
   6295 
   6296 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6297 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6298 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6299 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6300 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6301 }
   6302 
   6303 
   6304 static int
   6305 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6306 {
   6307 	int i, error;
   6308 
   6309 	/* Create the transmit buffer DMA maps. */
   6310 	WM_TXQUEUELEN(txq) =
   6311 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6312 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6313 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6314 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6315 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6316 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6317 			aprint_error_dev(sc->sc_dev,
   6318 			    "unable to create Tx DMA map %d, error = %d\n",
   6319 			    i, error);
   6320 			goto fail;
   6321 		}
   6322 	}
   6323 
   6324 	return 0;
   6325 
   6326  fail:
   6327 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6328 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6329 			bus_dmamap_destroy(sc->sc_dmat,
   6330 			    txq->txq_soft[i].txs_dmamap);
   6331 	}
   6332 	return error;
   6333 }
   6334 
   6335 static void
   6336 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6337 {
   6338 	int i;
   6339 
   6340 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6341 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6342 			bus_dmamap_destroy(sc->sc_dmat,
   6343 			    txq->txq_soft[i].txs_dmamap);
   6344 	}
   6345 }
   6346 
   6347 static int
   6348 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6349 {
   6350 	int i, error;
   6351 
   6352 	/* Create the receive buffer DMA maps. */
   6353 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6354 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6355 			    MCLBYTES, 0, 0,
   6356 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6357 			aprint_error_dev(sc->sc_dev,
   6358 			    "unable to create Rx DMA map %d error = %d\n",
   6359 			    i, error);
   6360 			goto fail;
   6361 		}
   6362 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6363 	}
   6364 
   6365 	return 0;
   6366 
   6367  fail:
   6368 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6369 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6370 			bus_dmamap_destroy(sc->sc_dmat,
   6371 			    rxq->rxq_soft[i].rxs_dmamap);
   6372 	}
   6373 	return error;
   6374 }
   6375 
   6376 static void
   6377 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6378 {
   6379 	int i;
   6380 
   6381 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6382 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6383 			bus_dmamap_destroy(sc->sc_dmat,
   6384 			    rxq->rxq_soft[i].rxs_dmamap);
   6385 	}
   6386 }
   6387 
   6388 /*
   6389  * wm_alloc_quques:
   6390  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6391  */
   6392 static int
   6393 wm_alloc_txrx_queues(struct wm_softc *sc)
   6394 {
   6395 	int i, error, tx_done, rx_done;
   6396 
   6397 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6398 	    KM_SLEEP);
   6399 	if (sc->sc_queue == NULL) {
   6400 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6401 		error = ENOMEM;
   6402 		goto fail_0;
   6403 	}
   6404 
   6405 	/*
   6406 	 * For transmission
   6407 	 */
   6408 	error = 0;
   6409 	tx_done = 0;
   6410 	for (i = 0; i < sc->sc_nqueues; i++) {
   6411 #ifdef WM_EVENT_COUNTERS
   6412 		int j;
   6413 		const char *xname;
   6414 #endif
   6415 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6416 		txq->txq_sc = sc;
   6417 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6418 
   6419 		error = wm_alloc_tx_descs(sc, txq);
   6420 		if (error)
   6421 			break;
   6422 		error = wm_alloc_tx_buffer(sc, txq);
   6423 		if (error) {
   6424 			wm_free_tx_descs(sc, txq);
   6425 			break;
   6426 		}
   6427 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6428 		if (txq->txq_interq == NULL) {
   6429 			wm_free_tx_descs(sc, txq);
   6430 			wm_free_tx_buffer(sc, txq);
   6431 			error = ENOMEM;
   6432 			break;
   6433 		}
   6434 
   6435 #ifdef WM_EVENT_COUNTERS
   6436 		xname = device_xname(sc->sc_dev);
   6437 
   6438 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6439 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6440 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6441 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6442 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6443 
   6444 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6445 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6446 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6447 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6448 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6449 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6450 
   6451 		for (j = 0; j < WM_NTXSEGS; j++) {
   6452 			snprintf(txq->txq_txseg_evcnt_names[j],
   6453 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6454 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6455 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6456 		}
   6457 
   6458 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6459 
   6460 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6461 #endif /* WM_EVENT_COUNTERS */
   6462 
   6463 		tx_done++;
   6464 	}
   6465 	if (error)
   6466 		goto fail_1;
   6467 
   6468 	/*
   6469 	 * For recieve
   6470 	 */
   6471 	error = 0;
   6472 	rx_done = 0;
   6473 	for (i = 0; i < sc->sc_nqueues; i++) {
   6474 #ifdef WM_EVENT_COUNTERS
   6475 		const char *xname;
   6476 #endif
   6477 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6478 		rxq->rxq_sc = sc;
   6479 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6480 
   6481 		error = wm_alloc_rx_descs(sc, rxq);
   6482 		if (error)
   6483 			break;
   6484 
   6485 		error = wm_alloc_rx_buffer(sc, rxq);
   6486 		if (error) {
   6487 			wm_free_rx_descs(sc, rxq);
   6488 			break;
   6489 		}
   6490 
   6491 #ifdef WM_EVENT_COUNTERS
   6492 		xname = device_xname(sc->sc_dev);
   6493 
   6494 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6495 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxdefer, rxq, i, xname);
   6496 
   6497 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6498 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6499 #endif /* WM_EVENT_COUNTERS */
   6500 
   6501 		rx_done++;
   6502 	}
   6503 	if (error)
   6504 		goto fail_2;
   6505 
   6506 	return 0;
   6507 
   6508  fail_2:
   6509 	for (i = 0; i < rx_done; i++) {
   6510 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6511 		wm_free_rx_buffer(sc, rxq);
   6512 		wm_free_rx_descs(sc, rxq);
   6513 		if (rxq->rxq_lock)
   6514 			mutex_obj_free(rxq->rxq_lock);
   6515 	}
   6516  fail_1:
   6517 	for (i = 0; i < tx_done; i++) {
   6518 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6519 		pcq_destroy(txq->txq_interq);
   6520 		wm_free_tx_buffer(sc, txq);
   6521 		wm_free_tx_descs(sc, txq);
   6522 		if (txq->txq_lock)
   6523 			mutex_obj_free(txq->txq_lock);
   6524 	}
   6525 
   6526 	kmem_free(sc->sc_queue,
   6527 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6528  fail_0:
   6529 	return error;
   6530 }
   6531 
   6532 /*
   6533  * wm_free_quques:
   6534  *	Free {tx,rx}descs and {tx,rx} buffers
   6535  */
   6536 static void
   6537 wm_free_txrx_queues(struct wm_softc *sc)
   6538 {
   6539 	int i;
   6540 
   6541 	for (i = 0; i < sc->sc_nqueues; i++) {
   6542 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6543 
   6544 #ifdef WM_EVENT_COUNTERS
   6545 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6546 		WM_Q_EVCNT_DETACH(rxq, rxdefer, rxq, i);
   6547 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6548 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6549 #endif /* WM_EVENT_COUNTERS */
   6550 
   6551 		wm_free_rx_buffer(sc, rxq);
   6552 		wm_free_rx_descs(sc, rxq);
   6553 		if (rxq->rxq_lock)
   6554 			mutex_obj_free(rxq->rxq_lock);
   6555 	}
   6556 
   6557 	for (i = 0; i < sc->sc_nqueues; i++) {
   6558 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6559 		struct mbuf *m;
   6560 #ifdef WM_EVENT_COUNTERS
   6561 		int j;
   6562 
   6563 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6564 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6565 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6566 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6567 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6568 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6569 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6570 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6571 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6572 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6573 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6574 
   6575 		for (j = 0; j < WM_NTXSEGS; j++)
   6576 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6577 
   6578 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6579 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6580 #endif /* WM_EVENT_COUNTERS */
   6581 
   6582 		/* drain txq_interq */
   6583 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6584 			m_freem(m);
   6585 		pcq_destroy(txq->txq_interq);
   6586 
   6587 		wm_free_tx_buffer(sc, txq);
   6588 		wm_free_tx_descs(sc, txq);
   6589 		if (txq->txq_lock)
   6590 			mutex_obj_free(txq->txq_lock);
   6591 	}
   6592 
   6593 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6594 }
   6595 
   6596 static void
   6597 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6598 {
   6599 
   6600 	KASSERT(mutex_owned(txq->txq_lock));
   6601 
   6602 	/* Initialize the transmit descriptor ring. */
   6603 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6604 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6605 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6606 	txq->txq_free = WM_NTXDESC(txq);
   6607 	txq->txq_next = 0;
   6608 }
   6609 
   6610 static void
   6611 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6612     struct wm_txqueue *txq)
   6613 {
   6614 
   6615 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6616 		device_xname(sc->sc_dev), __func__));
   6617 	KASSERT(mutex_owned(txq->txq_lock));
   6618 
   6619 	if (sc->sc_type < WM_T_82543) {
   6620 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6621 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6622 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6623 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6624 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6625 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6626 	} else {
   6627 		int qid = wmq->wmq_id;
   6628 
   6629 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6630 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6631 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6632 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6633 
   6634 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6635 			/*
   6636 			 * Don't write TDT before TCTL.EN is set.
   6637 			 * See the document.
   6638 			 */
   6639 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6640 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6641 			    | TXDCTL_WTHRESH(0));
   6642 		else {
   6643 			/* XXX should update with AIM? */
   6644 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6645 			if (sc->sc_type >= WM_T_82540) {
   6646 				/* should be same */
   6647 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6648 			}
   6649 
   6650 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6651 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6652 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6653 		}
   6654 	}
   6655 }
   6656 
   6657 static void
   6658 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6659 {
   6660 	int i;
   6661 
   6662 	KASSERT(mutex_owned(txq->txq_lock));
   6663 
   6664 	/* Initialize the transmit job descriptors. */
   6665 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6666 		txq->txq_soft[i].txs_mbuf = NULL;
   6667 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6668 	txq->txq_snext = 0;
   6669 	txq->txq_sdirty = 0;
   6670 }
   6671 
   6672 static void
   6673 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6674     struct wm_txqueue *txq)
   6675 {
   6676 
   6677 	KASSERT(mutex_owned(txq->txq_lock));
   6678 
   6679 	/*
   6680 	 * Set up some register offsets that are different between
   6681 	 * the i82542 and the i82543 and later chips.
   6682 	 */
   6683 	if (sc->sc_type < WM_T_82543)
   6684 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6685 	else
   6686 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6687 
   6688 	wm_init_tx_descs(sc, txq);
   6689 	wm_init_tx_regs(sc, wmq, txq);
   6690 	wm_init_tx_buffer(sc, txq);
   6691 
   6692 	txq->txq_watchdog = false;
   6693 }
   6694 
   6695 static void
   6696 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6697     struct wm_rxqueue *rxq)
   6698 {
   6699 
   6700 	KASSERT(mutex_owned(rxq->rxq_lock));
   6701 
   6702 	/*
   6703 	 * Initialize the receive descriptor and receive job
   6704 	 * descriptor rings.
   6705 	 */
   6706 	if (sc->sc_type < WM_T_82543) {
   6707 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6708 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6709 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6710 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6711 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6712 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6713 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6714 
   6715 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6716 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6717 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6718 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6719 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6720 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6721 	} else {
   6722 		int qid = wmq->wmq_id;
   6723 
   6724 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6725 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6726 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6727 
   6728 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6729 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6730 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6731 
   6732 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6733 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6734 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6735 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6736 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6737 			    | RXDCTL_WTHRESH(1));
   6738 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6739 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6740 		} else {
   6741 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6742 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6743 			/* XXX should update with AIM? */
   6744 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6745 			/* MUST be same */
   6746 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6747 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6748 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6749 		}
   6750 	}
   6751 }
   6752 
   6753 static int
   6754 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6755 {
   6756 	struct wm_rxsoft *rxs;
   6757 	int error, i;
   6758 
   6759 	KASSERT(mutex_owned(rxq->rxq_lock));
   6760 
   6761 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6762 		rxs = &rxq->rxq_soft[i];
   6763 		if (rxs->rxs_mbuf == NULL) {
   6764 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6765 				log(LOG_ERR, "%s: unable to allocate or map "
   6766 				    "rx buffer %d, error = %d\n",
   6767 				    device_xname(sc->sc_dev), i, error);
   6768 				/*
   6769 				 * XXX Should attempt to run with fewer receive
   6770 				 * XXX buffers instead of just failing.
   6771 				 */
   6772 				wm_rxdrain(rxq);
   6773 				return ENOMEM;
   6774 			}
   6775 		} else {
   6776 			/*
   6777 			 * For 82575 and 82576, the RX descriptors must be
   6778 			 * initialized after the setting of RCTL.EN in
   6779 			 * wm_set_filter()
   6780 			 */
   6781 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6782 				wm_init_rxdesc(rxq, i);
   6783 		}
   6784 	}
   6785 	rxq->rxq_ptr = 0;
   6786 	rxq->rxq_discard = 0;
   6787 	WM_RXCHAIN_RESET(rxq);
   6788 
   6789 	return 0;
   6790 }
   6791 
   6792 static int
   6793 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6794     struct wm_rxqueue *rxq)
   6795 {
   6796 
   6797 	KASSERT(mutex_owned(rxq->rxq_lock));
   6798 
   6799 	/*
   6800 	 * Set up some register offsets that are different between
   6801 	 * the i82542 and the i82543 and later chips.
   6802 	 */
   6803 	if (sc->sc_type < WM_T_82543)
   6804 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6805 	else
   6806 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6807 
   6808 	wm_init_rx_regs(sc, wmq, rxq);
   6809 	return wm_init_rx_buffer(sc, rxq);
   6810 }
   6811 
   6812 /*
   6813  * wm_init_quques:
   6814  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6815  */
   6816 static int
   6817 wm_init_txrx_queues(struct wm_softc *sc)
   6818 {
   6819 	int i, error = 0;
   6820 
   6821 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6822 		device_xname(sc->sc_dev), __func__));
   6823 
   6824 	for (i = 0; i < sc->sc_nqueues; i++) {
   6825 		struct wm_queue *wmq = &sc->sc_queue[i];
   6826 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6827 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6828 
   6829 		/*
   6830 		 * TODO
   6831 		 * Currently, use constant variable instead of AIM.
   6832 		 * Furthermore, the interrupt interval of multiqueue which use
   6833 		 * polling mode is less than default value.
   6834 		 * More tuning and AIM are required.
   6835 		 */
   6836 		if (wm_is_using_multiqueue(sc))
   6837 			wmq->wmq_itr = 50;
   6838 		else
   6839 			wmq->wmq_itr = sc->sc_itr_init;
   6840 		wmq->wmq_set_itr = true;
   6841 
   6842 		mutex_enter(txq->txq_lock);
   6843 		wm_init_tx_queue(sc, wmq, txq);
   6844 		mutex_exit(txq->txq_lock);
   6845 
   6846 		mutex_enter(rxq->rxq_lock);
   6847 		error = wm_init_rx_queue(sc, wmq, rxq);
   6848 		mutex_exit(rxq->rxq_lock);
   6849 		if (error)
   6850 			break;
   6851 	}
   6852 
   6853 	return error;
   6854 }
   6855 
   6856 /*
   6857  * wm_tx_offload:
   6858  *
   6859  *	Set up TCP/IP checksumming parameters for the
   6860  *	specified packet.
   6861  */
   6862 static int
   6863 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6864     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6865 {
   6866 	struct mbuf *m0 = txs->txs_mbuf;
   6867 	struct livengood_tcpip_ctxdesc *t;
   6868 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6869 	uint32_t ipcse;
   6870 	struct ether_header *eh;
   6871 	int offset, iphl;
   6872 	uint8_t fields;
   6873 
   6874 	/*
   6875 	 * XXX It would be nice if the mbuf pkthdr had offset
   6876 	 * fields for the protocol headers.
   6877 	 */
   6878 
   6879 	eh = mtod(m0, struct ether_header *);
   6880 	switch (htons(eh->ether_type)) {
   6881 	case ETHERTYPE_IP:
   6882 	case ETHERTYPE_IPV6:
   6883 		offset = ETHER_HDR_LEN;
   6884 		break;
   6885 
   6886 	case ETHERTYPE_VLAN:
   6887 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6888 		break;
   6889 
   6890 	default:
   6891 		/*
   6892 		 * Don't support this protocol or encapsulation.
   6893 		 */
   6894 		*fieldsp = 0;
   6895 		*cmdp = 0;
   6896 		return 0;
   6897 	}
   6898 
   6899 	if ((m0->m_pkthdr.csum_flags &
   6900 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6901 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6902 	} else {
   6903 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6904 	}
   6905 	ipcse = offset + iphl - 1;
   6906 
   6907 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6908 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6909 	seg = 0;
   6910 	fields = 0;
   6911 
   6912 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6913 		int hlen = offset + iphl;
   6914 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6915 
   6916 		if (__predict_false(m0->m_len <
   6917 				    (hlen + sizeof(struct tcphdr)))) {
   6918 			/*
   6919 			 * TCP/IP headers are not in the first mbuf; we need
   6920 			 * to do this the slow and painful way.  Let's just
   6921 			 * hope this doesn't happen very often.
   6922 			 */
   6923 			struct tcphdr th;
   6924 
   6925 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6926 
   6927 			m_copydata(m0, hlen, sizeof(th), &th);
   6928 			if (v4) {
   6929 				struct ip ip;
   6930 
   6931 				m_copydata(m0, offset, sizeof(ip), &ip);
   6932 				ip.ip_len = 0;
   6933 				m_copyback(m0,
   6934 				    offset + offsetof(struct ip, ip_len),
   6935 				    sizeof(ip.ip_len), &ip.ip_len);
   6936 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6937 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6938 			} else {
   6939 				struct ip6_hdr ip6;
   6940 
   6941 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6942 				ip6.ip6_plen = 0;
   6943 				m_copyback(m0,
   6944 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6945 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6946 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6947 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6948 			}
   6949 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6950 			    sizeof(th.th_sum), &th.th_sum);
   6951 
   6952 			hlen += th.th_off << 2;
   6953 		} else {
   6954 			/*
   6955 			 * TCP/IP headers are in the first mbuf; we can do
   6956 			 * this the easy way.
   6957 			 */
   6958 			struct tcphdr *th;
   6959 
   6960 			if (v4) {
   6961 				struct ip *ip =
   6962 				    (void *)(mtod(m0, char *) + offset);
   6963 				th = (void *)(mtod(m0, char *) + hlen);
   6964 
   6965 				ip->ip_len = 0;
   6966 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6967 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6968 			} else {
   6969 				struct ip6_hdr *ip6 =
   6970 				    (void *)(mtod(m0, char *) + offset);
   6971 				th = (void *)(mtod(m0, char *) + hlen);
   6972 
   6973 				ip6->ip6_plen = 0;
   6974 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6975 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6976 			}
   6977 			hlen += th->th_off << 2;
   6978 		}
   6979 
   6980 		if (v4) {
   6981 			WM_Q_EVCNT_INCR(txq, txtso);
   6982 			cmdlen |= WTX_TCPIP_CMD_IP;
   6983 		} else {
   6984 			WM_Q_EVCNT_INCR(txq, txtso6);
   6985 			ipcse = 0;
   6986 		}
   6987 		cmd |= WTX_TCPIP_CMD_TSE;
   6988 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6989 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6990 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6991 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6992 	}
   6993 
   6994 	/*
   6995 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6996 	 * offload feature, if we load the context descriptor, we
   6997 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6998 	 */
   6999 
   7000 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7001 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7002 	    WTX_TCPIP_IPCSE(ipcse);
   7003 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7004 		WM_Q_EVCNT_INCR(txq, txipsum);
   7005 		fields |= WTX_IXSM;
   7006 	}
   7007 
   7008 	offset += iphl;
   7009 
   7010 	if (m0->m_pkthdr.csum_flags &
   7011 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7012 		WM_Q_EVCNT_INCR(txq, txtusum);
   7013 		fields |= WTX_TXSM;
   7014 		tucs = WTX_TCPIP_TUCSS(offset) |
   7015 		    WTX_TCPIP_TUCSO(offset +
   7016 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7017 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7018 	} else if ((m0->m_pkthdr.csum_flags &
   7019 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7020 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7021 		fields |= WTX_TXSM;
   7022 		tucs = WTX_TCPIP_TUCSS(offset) |
   7023 		    WTX_TCPIP_TUCSO(offset +
   7024 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7025 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7026 	} else {
   7027 		/* Just initialize it to a valid TCP context. */
   7028 		tucs = WTX_TCPIP_TUCSS(offset) |
   7029 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7030 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7031 	}
   7032 
   7033 	/*
   7034 	 * We don't have to write context descriptor for every packet
   7035 	 * except for 82574. For 82574, we must write context descriptor
   7036 	 * for every packet when we use two descriptor queues.
   7037 	 * It would be overhead to write context descriptor for every packet,
   7038 	 * however it does not cause problems.
   7039 	 */
   7040 	/* Fill in the context descriptor. */
   7041 	t = (struct livengood_tcpip_ctxdesc *)
   7042 	    &txq->txq_descs[txq->txq_next];
   7043 	t->tcpip_ipcs = htole32(ipcs);
   7044 	t->tcpip_tucs = htole32(tucs);
   7045 	t->tcpip_cmdlen = htole32(cmdlen);
   7046 	t->tcpip_seg = htole32(seg);
   7047 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7048 
   7049 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7050 	txs->txs_ndesc++;
   7051 
   7052 	*cmdp = cmd;
   7053 	*fieldsp = fields;
   7054 
   7055 	return 0;
   7056 }
   7057 
   7058 static inline int
   7059 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7060 {
   7061 	struct wm_softc *sc = ifp->if_softc;
   7062 	u_int cpuid = cpu_index(curcpu());
   7063 
   7064 	/*
   7065 	 * Currently, simple distribute strategy.
   7066 	 * TODO:
   7067 	 * distribute by flowid(RSS has value).
   7068 	 */
   7069         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7070 }
   7071 
   7072 /*
   7073  * wm_start:		[ifnet interface function]
   7074  *
   7075  *	Start packet transmission on the interface.
   7076  */
   7077 static void
   7078 wm_start(struct ifnet *ifp)
   7079 {
   7080 	struct wm_softc *sc = ifp->if_softc;
   7081 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7082 
   7083 #ifdef WM_MPSAFE
   7084 	KASSERT(if_is_mpsafe(ifp));
   7085 #endif
   7086 	/*
   7087 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7088 	 */
   7089 
   7090 	mutex_enter(txq->txq_lock);
   7091 	if (!txq->txq_stopping)
   7092 		wm_start_locked(ifp);
   7093 	mutex_exit(txq->txq_lock);
   7094 }
   7095 
   7096 static void
   7097 wm_start_locked(struct ifnet *ifp)
   7098 {
   7099 	struct wm_softc *sc = ifp->if_softc;
   7100 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7101 
   7102 	wm_send_common_locked(ifp, txq, false);
   7103 }
   7104 
   7105 static int
   7106 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7107 {
   7108 	int qid;
   7109 	struct wm_softc *sc = ifp->if_softc;
   7110 	struct wm_txqueue *txq;
   7111 
   7112 	qid = wm_select_txqueue(ifp, m);
   7113 	txq = &sc->sc_queue[qid].wmq_txq;
   7114 
   7115 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7116 		m_freem(m);
   7117 		WM_Q_EVCNT_INCR(txq, txdrop);
   7118 		return ENOBUFS;
   7119 	}
   7120 
   7121 	/*
   7122 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7123 	 */
   7124 	ifp->if_obytes += m->m_pkthdr.len;
   7125 	if (m->m_flags & M_MCAST)
   7126 		ifp->if_omcasts++;
   7127 
   7128 	if (mutex_tryenter(txq->txq_lock)) {
   7129 		if (!txq->txq_stopping)
   7130 			wm_transmit_locked(ifp, txq);
   7131 		mutex_exit(txq->txq_lock);
   7132 	}
   7133 
   7134 	return 0;
   7135 }
   7136 
   7137 static void
   7138 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7139 {
   7140 
   7141 	wm_send_common_locked(ifp, txq, true);
   7142 }
   7143 
   7144 static void
   7145 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7146     bool is_transmit)
   7147 {
   7148 	struct wm_softc *sc = ifp->if_softc;
   7149 	struct mbuf *m0;
   7150 	struct wm_txsoft *txs;
   7151 	bus_dmamap_t dmamap;
   7152 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7153 	bus_addr_t curaddr;
   7154 	bus_size_t seglen, curlen;
   7155 	uint32_t cksumcmd;
   7156 	uint8_t cksumfields;
   7157 
   7158 	KASSERT(mutex_owned(txq->txq_lock));
   7159 
   7160 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7161 		return;
   7162 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7163 		return;
   7164 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7165 		return;
   7166 
   7167 	/* Remember the previous number of free descriptors. */
   7168 	ofree = txq->txq_free;
   7169 
   7170 	/*
   7171 	 * Loop through the send queue, setting up transmit descriptors
   7172 	 * until we drain the queue, or use up all available transmit
   7173 	 * descriptors.
   7174 	 */
   7175 	for (;;) {
   7176 		m0 = NULL;
   7177 
   7178 		/* Get a work queue entry. */
   7179 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7180 			wm_txeof(txq, UINT_MAX);
   7181 			if (txq->txq_sfree == 0) {
   7182 				DPRINTF(WM_DEBUG_TX,
   7183 				    ("%s: TX: no free job descriptors\n",
   7184 					device_xname(sc->sc_dev)));
   7185 				WM_Q_EVCNT_INCR(txq, txsstall);
   7186 				break;
   7187 			}
   7188 		}
   7189 
   7190 		/* Grab a packet off the queue. */
   7191 		if (is_transmit)
   7192 			m0 = pcq_get(txq->txq_interq);
   7193 		else
   7194 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7195 		if (m0 == NULL)
   7196 			break;
   7197 
   7198 		DPRINTF(WM_DEBUG_TX,
   7199 		    ("%s: TX: have packet to transmit: %p\n",
   7200 		    device_xname(sc->sc_dev), m0));
   7201 
   7202 		txs = &txq->txq_soft[txq->txq_snext];
   7203 		dmamap = txs->txs_dmamap;
   7204 
   7205 		use_tso = (m0->m_pkthdr.csum_flags &
   7206 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7207 
   7208 		/*
   7209 		 * So says the Linux driver:
   7210 		 * The controller does a simple calculation to make sure
   7211 		 * there is enough room in the FIFO before initiating the
   7212 		 * DMA for each buffer.  The calc is:
   7213 		 *	4 = ceil(buffer len / MSS)
   7214 		 * To make sure we don't overrun the FIFO, adjust the max
   7215 		 * buffer len if the MSS drops.
   7216 		 */
   7217 		dmamap->dm_maxsegsz =
   7218 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7219 		    ? m0->m_pkthdr.segsz << 2
   7220 		    : WTX_MAX_LEN;
   7221 
   7222 		/*
   7223 		 * Load the DMA map.  If this fails, the packet either
   7224 		 * didn't fit in the allotted number of segments, or we
   7225 		 * were short on resources.  For the too-many-segments
   7226 		 * case, we simply report an error and drop the packet,
   7227 		 * since we can't sanely copy a jumbo packet to a single
   7228 		 * buffer.
   7229 		 */
   7230 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7231 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7232 		if (error) {
   7233 			if (error == EFBIG) {
   7234 				WM_Q_EVCNT_INCR(txq, txdrop);
   7235 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7236 				    "DMA segments, dropping...\n",
   7237 				    device_xname(sc->sc_dev));
   7238 				wm_dump_mbuf_chain(sc, m0);
   7239 				m_freem(m0);
   7240 				continue;
   7241 			}
   7242 			/*  Short on resources, just stop for now. */
   7243 			DPRINTF(WM_DEBUG_TX,
   7244 			    ("%s: TX: dmamap load failed: %d\n",
   7245 			    device_xname(sc->sc_dev), error));
   7246 			break;
   7247 		}
   7248 
   7249 		segs_needed = dmamap->dm_nsegs;
   7250 		if (use_tso) {
   7251 			/* For sentinel descriptor; see below. */
   7252 			segs_needed++;
   7253 		}
   7254 
   7255 		/*
   7256 		 * Ensure we have enough descriptors free to describe
   7257 		 * the packet.  Note, we always reserve one descriptor
   7258 		 * at the end of the ring due to the semantics of the
   7259 		 * TDT register, plus one more in the event we need
   7260 		 * to load offload context.
   7261 		 */
   7262 		if (segs_needed > txq->txq_free - 2) {
   7263 			/*
   7264 			 * Not enough free descriptors to transmit this
   7265 			 * packet.  We haven't committed anything yet,
   7266 			 * so just unload the DMA map, put the packet
   7267 			 * pack on the queue, and punt.  Notify the upper
   7268 			 * layer that there are no more slots left.
   7269 			 */
   7270 			DPRINTF(WM_DEBUG_TX,
   7271 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7272 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7273 			    segs_needed, txq->txq_free - 1));
   7274 			if (!is_transmit)
   7275 				ifp->if_flags |= IFF_OACTIVE;
   7276 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7277 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7278 			WM_Q_EVCNT_INCR(txq, txdstall);
   7279 			break;
   7280 		}
   7281 
   7282 		/*
   7283 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7284 		 * once we know we can transmit the packet, since we
   7285 		 * do some internal FIFO space accounting here.
   7286 		 */
   7287 		if (sc->sc_type == WM_T_82547 &&
   7288 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7289 			DPRINTF(WM_DEBUG_TX,
   7290 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7291 			    device_xname(sc->sc_dev)));
   7292 			if (!is_transmit)
   7293 				ifp->if_flags |= IFF_OACTIVE;
   7294 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7295 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7296 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7297 			break;
   7298 		}
   7299 
   7300 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7301 
   7302 		DPRINTF(WM_DEBUG_TX,
   7303 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7304 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7305 
   7306 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7307 
   7308 		/*
   7309 		 * Store a pointer to the packet so that we can free it
   7310 		 * later.
   7311 		 *
   7312 		 * Initially, we consider the number of descriptors the
   7313 		 * packet uses the number of DMA segments.  This may be
   7314 		 * incremented by 1 if we do checksum offload (a descriptor
   7315 		 * is used to set the checksum context).
   7316 		 */
   7317 		txs->txs_mbuf = m0;
   7318 		txs->txs_firstdesc = txq->txq_next;
   7319 		txs->txs_ndesc = segs_needed;
   7320 
   7321 		/* Set up offload parameters for this packet. */
   7322 		if (m0->m_pkthdr.csum_flags &
   7323 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7324 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7325 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7326 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7327 					  &cksumfields) != 0) {
   7328 				/* Error message already displayed. */
   7329 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7330 				continue;
   7331 			}
   7332 		} else {
   7333 			cksumcmd = 0;
   7334 			cksumfields = 0;
   7335 		}
   7336 
   7337 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7338 
   7339 		/* Sync the DMA map. */
   7340 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7341 		    BUS_DMASYNC_PREWRITE);
   7342 
   7343 		/* Initialize the transmit descriptor. */
   7344 		for (nexttx = txq->txq_next, seg = 0;
   7345 		     seg < dmamap->dm_nsegs; seg++) {
   7346 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7347 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7348 			     seglen != 0;
   7349 			     curaddr += curlen, seglen -= curlen,
   7350 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7351 				curlen = seglen;
   7352 
   7353 				/*
   7354 				 * So says the Linux driver:
   7355 				 * Work around for premature descriptor
   7356 				 * write-backs in TSO mode.  Append a
   7357 				 * 4-byte sentinel descriptor.
   7358 				 */
   7359 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7360 				    curlen > 8)
   7361 					curlen -= 4;
   7362 
   7363 				wm_set_dma_addr(
   7364 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7365 				txq->txq_descs[nexttx].wtx_cmdlen
   7366 				    = htole32(cksumcmd | curlen);
   7367 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7368 				    = 0;
   7369 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7370 				    = cksumfields;
   7371 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7372 				lasttx = nexttx;
   7373 
   7374 				DPRINTF(WM_DEBUG_TX,
   7375 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7376 				     "len %#04zx\n",
   7377 				    device_xname(sc->sc_dev), nexttx,
   7378 				    (uint64_t)curaddr, curlen));
   7379 			}
   7380 		}
   7381 
   7382 		KASSERT(lasttx != -1);
   7383 
   7384 		/*
   7385 		 * Set up the command byte on the last descriptor of
   7386 		 * the packet.  If we're in the interrupt delay window,
   7387 		 * delay the interrupt.
   7388 		 */
   7389 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7390 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7391 
   7392 		/*
   7393 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7394 		 * up the descriptor to encapsulate the packet for us.
   7395 		 *
   7396 		 * This is only valid on the last descriptor of the packet.
   7397 		 */
   7398 		if (vlan_has_tag(m0)) {
   7399 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7400 			    htole32(WTX_CMD_VLE);
   7401 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7402 			    = htole16(vlan_get_tag(m0));
   7403 		}
   7404 
   7405 		txs->txs_lastdesc = lasttx;
   7406 
   7407 		DPRINTF(WM_DEBUG_TX,
   7408 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7409 		    device_xname(sc->sc_dev),
   7410 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7411 
   7412 		/* Sync the descriptors we're using. */
   7413 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7414 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7415 
   7416 		/* Give the packet to the chip. */
   7417 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7418 
   7419 		DPRINTF(WM_DEBUG_TX,
   7420 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7421 
   7422 		DPRINTF(WM_DEBUG_TX,
   7423 		    ("%s: TX: finished transmitting packet, job %d\n",
   7424 		    device_xname(sc->sc_dev), txq->txq_snext));
   7425 
   7426 		/* Advance the tx pointer. */
   7427 		txq->txq_free -= txs->txs_ndesc;
   7428 		txq->txq_next = nexttx;
   7429 
   7430 		txq->txq_sfree--;
   7431 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7432 
   7433 		/* Pass the packet to any BPF listeners. */
   7434 		bpf_mtap(ifp, m0);
   7435 	}
   7436 
   7437 	if (m0 != NULL) {
   7438 		if (!is_transmit)
   7439 			ifp->if_flags |= IFF_OACTIVE;
   7440 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7441 		WM_Q_EVCNT_INCR(txq, txdrop);
   7442 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7443 			__func__));
   7444 		m_freem(m0);
   7445 	}
   7446 
   7447 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7448 		/* No more slots; notify upper layer. */
   7449 		if (!is_transmit)
   7450 			ifp->if_flags |= IFF_OACTIVE;
   7451 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7452 	}
   7453 
   7454 	if (txq->txq_free != ofree) {
   7455 		/* Set a watchdog timer in case the chip flakes out. */
   7456 		txq->txq_lastsent = time_uptime;
   7457 		txq->txq_watchdog = true;
   7458 	}
   7459 }
   7460 
   7461 /*
   7462  * wm_nq_tx_offload:
   7463  *
   7464  *	Set up TCP/IP checksumming parameters for the
   7465  *	specified packet, for NEWQUEUE devices
   7466  */
   7467 static int
   7468 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7469     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7470 {
   7471 	struct mbuf *m0 = txs->txs_mbuf;
   7472 	uint32_t vl_len, mssidx, cmdc;
   7473 	struct ether_header *eh;
   7474 	int offset, iphl;
   7475 
   7476 	/*
   7477 	 * XXX It would be nice if the mbuf pkthdr had offset
   7478 	 * fields for the protocol headers.
   7479 	 */
   7480 	*cmdlenp = 0;
   7481 	*fieldsp = 0;
   7482 
   7483 	eh = mtod(m0, struct ether_header *);
   7484 	switch (htons(eh->ether_type)) {
   7485 	case ETHERTYPE_IP:
   7486 	case ETHERTYPE_IPV6:
   7487 		offset = ETHER_HDR_LEN;
   7488 		break;
   7489 
   7490 	case ETHERTYPE_VLAN:
   7491 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7492 		break;
   7493 
   7494 	default:
   7495 		/* Don't support this protocol or encapsulation. */
   7496 		*do_csum = false;
   7497 		return 0;
   7498 	}
   7499 	*do_csum = true;
   7500 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7501 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7502 
   7503 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7504 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7505 
   7506 	if ((m0->m_pkthdr.csum_flags &
   7507 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7508 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7509 	} else {
   7510 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7511 	}
   7512 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7513 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7514 
   7515 	if (vlan_has_tag(m0)) {
   7516 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7517 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7518 		*cmdlenp |= NQTX_CMD_VLE;
   7519 	}
   7520 
   7521 	mssidx = 0;
   7522 
   7523 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7524 		int hlen = offset + iphl;
   7525 		int tcp_hlen;
   7526 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7527 
   7528 		if (__predict_false(m0->m_len <
   7529 				    (hlen + sizeof(struct tcphdr)))) {
   7530 			/*
   7531 			 * TCP/IP headers are not in the first mbuf; we need
   7532 			 * to do this the slow and painful way.  Let's just
   7533 			 * hope this doesn't happen very often.
   7534 			 */
   7535 			struct tcphdr th;
   7536 
   7537 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7538 
   7539 			m_copydata(m0, hlen, sizeof(th), &th);
   7540 			if (v4) {
   7541 				struct ip ip;
   7542 
   7543 				m_copydata(m0, offset, sizeof(ip), &ip);
   7544 				ip.ip_len = 0;
   7545 				m_copyback(m0,
   7546 				    offset + offsetof(struct ip, ip_len),
   7547 				    sizeof(ip.ip_len), &ip.ip_len);
   7548 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7549 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7550 			} else {
   7551 				struct ip6_hdr ip6;
   7552 
   7553 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7554 				ip6.ip6_plen = 0;
   7555 				m_copyback(m0,
   7556 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7557 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7558 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7559 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7560 			}
   7561 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7562 			    sizeof(th.th_sum), &th.th_sum);
   7563 
   7564 			tcp_hlen = th.th_off << 2;
   7565 		} else {
   7566 			/*
   7567 			 * TCP/IP headers are in the first mbuf; we can do
   7568 			 * this the easy way.
   7569 			 */
   7570 			struct tcphdr *th;
   7571 
   7572 			if (v4) {
   7573 				struct ip *ip =
   7574 				    (void *)(mtod(m0, char *) + offset);
   7575 				th = (void *)(mtod(m0, char *) + hlen);
   7576 
   7577 				ip->ip_len = 0;
   7578 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7579 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7580 			} else {
   7581 				struct ip6_hdr *ip6 =
   7582 				    (void *)(mtod(m0, char *) + offset);
   7583 				th = (void *)(mtod(m0, char *) + hlen);
   7584 
   7585 				ip6->ip6_plen = 0;
   7586 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7587 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7588 			}
   7589 			tcp_hlen = th->th_off << 2;
   7590 		}
   7591 		hlen += tcp_hlen;
   7592 		*cmdlenp |= NQTX_CMD_TSE;
   7593 
   7594 		if (v4) {
   7595 			WM_Q_EVCNT_INCR(txq, txtso);
   7596 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7597 		} else {
   7598 			WM_Q_EVCNT_INCR(txq, txtso6);
   7599 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7600 		}
   7601 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7602 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7603 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7604 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7605 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7606 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7607 	} else {
   7608 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7609 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7610 	}
   7611 
   7612 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7613 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7614 		cmdc |= NQTXC_CMD_IP4;
   7615 	}
   7616 
   7617 	if (m0->m_pkthdr.csum_flags &
   7618 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7619 		WM_Q_EVCNT_INCR(txq, txtusum);
   7620 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7621 			cmdc |= NQTXC_CMD_TCP;
   7622 		} else {
   7623 			cmdc |= NQTXC_CMD_UDP;
   7624 		}
   7625 		cmdc |= NQTXC_CMD_IP4;
   7626 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7627 	}
   7628 	if (m0->m_pkthdr.csum_flags &
   7629 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7630 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7631 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7632 			cmdc |= NQTXC_CMD_TCP;
   7633 		} else {
   7634 			cmdc |= NQTXC_CMD_UDP;
   7635 		}
   7636 		cmdc |= NQTXC_CMD_IP6;
   7637 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7638 	}
   7639 
   7640 	/*
   7641 	 * We don't have to write context descriptor for every packet to
   7642 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7643 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7644 	 * controllers.
   7645 	 * It would be overhead to write context descriptor for every packet,
   7646 	 * however it does not cause problems.
   7647 	 */
   7648 	/* Fill in the context descriptor. */
   7649 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7650 	    htole32(vl_len);
   7651 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7652 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7653 	    htole32(cmdc);
   7654 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7655 	    htole32(mssidx);
   7656 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7657 	DPRINTF(WM_DEBUG_TX,
   7658 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7659 	    txq->txq_next, 0, vl_len));
   7660 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7661 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7662 	txs->txs_ndesc++;
   7663 	return 0;
   7664 }
   7665 
   7666 /*
   7667  * wm_nq_start:		[ifnet interface function]
   7668  *
   7669  *	Start packet transmission on the interface for NEWQUEUE devices
   7670  */
   7671 static void
   7672 wm_nq_start(struct ifnet *ifp)
   7673 {
   7674 	struct wm_softc *sc = ifp->if_softc;
   7675 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7676 
   7677 #ifdef WM_MPSAFE
   7678 	KASSERT(if_is_mpsafe(ifp));
   7679 #endif
   7680 	/*
   7681 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7682 	 */
   7683 
   7684 	mutex_enter(txq->txq_lock);
   7685 	if (!txq->txq_stopping)
   7686 		wm_nq_start_locked(ifp);
   7687 	mutex_exit(txq->txq_lock);
   7688 }
   7689 
   7690 static void
   7691 wm_nq_start_locked(struct ifnet *ifp)
   7692 {
   7693 	struct wm_softc *sc = ifp->if_softc;
   7694 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7695 
   7696 	wm_nq_send_common_locked(ifp, txq, false);
   7697 }
   7698 
   7699 static int
   7700 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7701 {
   7702 	int qid;
   7703 	struct wm_softc *sc = ifp->if_softc;
   7704 	struct wm_txqueue *txq;
   7705 
   7706 	qid = wm_select_txqueue(ifp, m);
   7707 	txq = &sc->sc_queue[qid].wmq_txq;
   7708 
   7709 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7710 		m_freem(m);
   7711 		WM_Q_EVCNT_INCR(txq, txdrop);
   7712 		return ENOBUFS;
   7713 	}
   7714 
   7715 	/*
   7716 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7717 	 */
   7718 	ifp->if_obytes += m->m_pkthdr.len;
   7719 	if (m->m_flags & M_MCAST)
   7720 		ifp->if_omcasts++;
   7721 
   7722 	/*
   7723 	 * The situations which this mutex_tryenter() fails at running time
   7724 	 * are below two patterns.
   7725 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7726 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7727 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7728 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7729 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7730 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7731 	 */
   7732 	if (mutex_tryenter(txq->txq_lock)) {
   7733 		if (!txq->txq_stopping)
   7734 			wm_nq_transmit_locked(ifp, txq);
   7735 		mutex_exit(txq->txq_lock);
   7736 	}
   7737 
   7738 	return 0;
   7739 }
   7740 
   7741 static void
   7742 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7743 {
   7744 
   7745 	wm_nq_send_common_locked(ifp, txq, true);
   7746 }
   7747 
   7748 static void
   7749 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7750     bool is_transmit)
   7751 {
   7752 	struct wm_softc *sc = ifp->if_softc;
   7753 	struct mbuf *m0;
   7754 	struct wm_txsoft *txs;
   7755 	bus_dmamap_t dmamap;
   7756 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7757 	bool do_csum, sent;
   7758 
   7759 	KASSERT(mutex_owned(txq->txq_lock));
   7760 
   7761 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7762 		return;
   7763 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7764 		return;
   7765 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7766 		return;
   7767 
   7768 	sent = false;
   7769 
   7770 	/*
   7771 	 * Loop through the send queue, setting up transmit descriptors
   7772 	 * until we drain the queue, or use up all available transmit
   7773 	 * descriptors.
   7774 	 */
   7775 	for (;;) {
   7776 		m0 = NULL;
   7777 
   7778 		/* Get a work queue entry. */
   7779 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7780 			wm_txeof(txq, UINT_MAX);
   7781 			if (txq->txq_sfree == 0) {
   7782 				DPRINTF(WM_DEBUG_TX,
   7783 				    ("%s: TX: no free job descriptors\n",
   7784 					device_xname(sc->sc_dev)));
   7785 				WM_Q_EVCNT_INCR(txq, txsstall);
   7786 				break;
   7787 			}
   7788 		}
   7789 
   7790 		/* Grab a packet off the queue. */
   7791 		if (is_transmit)
   7792 			m0 = pcq_get(txq->txq_interq);
   7793 		else
   7794 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7795 		if (m0 == NULL)
   7796 			break;
   7797 
   7798 		DPRINTF(WM_DEBUG_TX,
   7799 		    ("%s: TX: have packet to transmit: %p\n",
   7800 		    device_xname(sc->sc_dev), m0));
   7801 
   7802 		txs = &txq->txq_soft[txq->txq_snext];
   7803 		dmamap = txs->txs_dmamap;
   7804 
   7805 		/*
   7806 		 * Load the DMA map.  If this fails, the packet either
   7807 		 * didn't fit in the allotted number of segments, or we
   7808 		 * were short on resources.  For the too-many-segments
   7809 		 * case, we simply report an error and drop the packet,
   7810 		 * since we can't sanely copy a jumbo packet to a single
   7811 		 * buffer.
   7812 		 */
   7813 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7814 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7815 		if (error) {
   7816 			if (error == EFBIG) {
   7817 				WM_Q_EVCNT_INCR(txq, txdrop);
   7818 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7819 				    "DMA segments, dropping...\n",
   7820 				    device_xname(sc->sc_dev));
   7821 				wm_dump_mbuf_chain(sc, m0);
   7822 				m_freem(m0);
   7823 				continue;
   7824 			}
   7825 			/* Short on resources, just stop for now. */
   7826 			DPRINTF(WM_DEBUG_TX,
   7827 			    ("%s: TX: dmamap load failed: %d\n",
   7828 			    device_xname(sc->sc_dev), error));
   7829 			break;
   7830 		}
   7831 
   7832 		segs_needed = dmamap->dm_nsegs;
   7833 
   7834 		/*
   7835 		 * Ensure we have enough descriptors free to describe
   7836 		 * the packet.  Note, we always reserve one descriptor
   7837 		 * at the end of the ring due to the semantics of the
   7838 		 * TDT register, plus one more in the event we need
   7839 		 * to load offload context.
   7840 		 */
   7841 		if (segs_needed > txq->txq_free - 2) {
   7842 			/*
   7843 			 * Not enough free descriptors to transmit this
   7844 			 * packet.  We haven't committed anything yet,
   7845 			 * so just unload the DMA map, put the packet
   7846 			 * pack on the queue, and punt.  Notify the upper
   7847 			 * layer that there are no more slots left.
   7848 			 */
   7849 			DPRINTF(WM_DEBUG_TX,
   7850 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7851 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7852 			    segs_needed, txq->txq_free - 1));
   7853 			if (!is_transmit)
   7854 				ifp->if_flags |= IFF_OACTIVE;
   7855 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7856 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7857 			WM_Q_EVCNT_INCR(txq, txdstall);
   7858 			break;
   7859 		}
   7860 
   7861 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7862 
   7863 		DPRINTF(WM_DEBUG_TX,
   7864 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7865 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7866 
   7867 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7868 
   7869 		/*
   7870 		 * Store a pointer to the packet so that we can free it
   7871 		 * later.
   7872 		 *
   7873 		 * Initially, we consider the number of descriptors the
   7874 		 * packet uses the number of DMA segments.  This may be
   7875 		 * incremented by 1 if we do checksum offload (a descriptor
   7876 		 * is used to set the checksum context).
   7877 		 */
   7878 		txs->txs_mbuf = m0;
   7879 		txs->txs_firstdesc = txq->txq_next;
   7880 		txs->txs_ndesc = segs_needed;
   7881 
   7882 		/* Set up offload parameters for this packet. */
   7883 		uint32_t cmdlen, fields, dcmdlen;
   7884 		if (m0->m_pkthdr.csum_flags &
   7885 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7886 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7887 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7888 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7889 			    &do_csum) != 0) {
   7890 				/* Error message already displayed. */
   7891 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7892 				continue;
   7893 			}
   7894 		} else {
   7895 			do_csum = false;
   7896 			cmdlen = 0;
   7897 			fields = 0;
   7898 		}
   7899 
   7900 		/* Sync the DMA map. */
   7901 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7902 		    BUS_DMASYNC_PREWRITE);
   7903 
   7904 		/* Initialize the first transmit descriptor. */
   7905 		nexttx = txq->txq_next;
   7906 		if (!do_csum) {
   7907 			/* setup a legacy descriptor */
   7908 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7909 			    dmamap->dm_segs[0].ds_addr);
   7910 			txq->txq_descs[nexttx].wtx_cmdlen =
   7911 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7912 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7913 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7914 			if (vlan_has_tag(m0)) {
   7915 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7916 				    htole32(WTX_CMD_VLE);
   7917 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7918 				    htole16(vlan_get_tag(m0));
   7919 			} else {
   7920 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7921 			}
   7922 			dcmdlen = 0;
   7923 		} else {
   7924 			/* setup an advanced data descriptor */
   7925 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7926 			    htole64(dmamap->dm_segs[0].ds_addr);
   7927 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7928 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7929 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7930 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7931 			    htole32(fields);
   7932 			DPRINTF(WM_DEBUG_TX,
   7933 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7934 			    device_xname(sc->sc_dev), nexttx,
   7935 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7936 			DPRINTF(WM_DEBUG_TX,
   7937 			    ("\t 0x%08x%08x\n", fields,
   7938 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7939 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7940 		}
   7941 
   7942 		lasttx = nexttx;
   7943 		nexttx = WM_NEXTTX(txq, nexttx);
   7944 		/*
   7945 		 * fill in the next descriptors. legacy or advanced format
   7946 		 * is the same here
   7947 		 */
   7948 		for (seg = 1; seg < dmamap->dm_nsegs;
   7949 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7950 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7951 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7952 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7953 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7954 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7955 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7956 			lasttx = nexttx;
   7957 
   7958 			DPRINTF(WM_DEBUG_TX,
   7959 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7960 			     "len %#04zx\n",
   7961 			    device_xname(sc->sc_dev), nexttx,
   7962 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7963 			    dmamap->dm_segs[seg].ds_len));
   7964 		}
   7965 
   7966 		KASSERT(lasttx != -1);
   7967 
   7968 		/*
   7969 		 * Set up the command byte on the last descriptor of
   7970 		 * the packet.  If we're in the interrupt delay window,
   7971 		 * delay the interrupt.
   7972 		 */
   7973 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7974 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7975 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7976 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7977 
   7978 		txs->txs_lastdesc = lasttx;
   7979 
   7980 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7981 		    device_xname(sc->sc_dev),
   7982 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7983 
   7984 		/* Sync the descriptors we're using. */
   7985 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7986 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7987 
   7988 		/* Give the packet to the chip. */
   7989 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7990 		sent = true;
   7991 
   7992 		DPRINTF(WM_DEBUG_TX,
   7993 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7994 
   7995 		DPRINTF(WM_DEBUG_TX,
   7996 		    ("%s: TX: finished transmitting packet, job %d\n",
   7997 		    device_xname(sc->sc_dev), txq->txq_snext));
   7998 
   7999 		/* Advance the tx pointer. */
   8000 		txq->txq_free -= txs->txs_ndesc;
   8001 		txq->txq_next = nexttx;
   8002 
   8003 		txq->txq_sfree--;
   8004 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8005 
   8006 		/* Pass the packet to any BPF listeners. */
   8007 		bpf_mtap(ifp, m0);
   8008 	}
   8009 
   8010 	if (m0 != NULL) {
   8011 		if (!is_transmit)
   8012 			ifp->if_flags |= IFF_OACTIVE;
   8013 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8014 		WM_Q_EVCNT_INCR(txq, txdrop);
   8015 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8016 			__func__));
   8017 		m_freem(m0);
   8018 	}
   8019 
   8020 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8021 		/* No more slots; notify upper layer. */
   8022 		if (!is_transmit)
   8023 			ifp->if_flags |= IFF_OACTIVE;
   8024 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8025 	}
   8026 
   8027 	if (sent) {
   8028 		/* Set a watchdog timer in case the chip flakes out. */
   8029 		txq->txq_lastsent = time_uptime;
   8030 		txq->txq_watchdog = true;
   8031 	}
   8032 }
   8033 
   8034 static void
   8035 wm_deferred_start_locked(struct wm_txqueue *txq)
   8036 {
   8037 	struct wm_softc *sc = txq->txq_sc;
   8038 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8039 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8040 	int qid = wmq->wmq_id;
   8041 
   8042 	KASSERT(mutex_owned(txq->txq_lock));
   8043 
   8044 	if (txq->txq_stopping) {
   8045 		mutex_exit(txq->txq_lock);
   8046 		return;
   8047 	}
   8048 
   8049 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8050 		/* XXX need for ALTQ or one CPU system */
   8051 		if (qid == 0)
   8052 			wm_nq_start_locked(ifp);
   8053 		wm_nq_transmit_locked(ifp, txq);
   8054 	} else {
   8055 		/* XXX need for ALTQ or one CPU system */
   8056 		if (qid == 0)
   8057 			wm_start_locked(ifp);
   8058 		wm_transmit_locked(ifp, txq);
   8059 	}
   8060 }
   8061 
   8062 /* Interrupt */
   8063 
   8064 /*
   8065  * wm_txeof:
   8066  *
   8067  *	Helper; handle transmit interrupts.
   8068  */
   8069 static bool
   8070 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8071 {
   8072 	struct wm_softc *sc = txq->txq_sc;
   8073 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8074 	struct wm_txsoft *txs;
   8075 	int count = 0;
   8076 	int i;
   8077 	uint8_t status;
   8078 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8079 	bool more = false;
   8080 
   8081 	KASSERT(mutex_owned(txq->txq_lock));
   8082 
   8083 	if (txq->txq_stopping)
   8084 		return false;
   8085 
   8086 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8087 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8088 	if (wmq->wmq_id == 0)
   8089 		ifp->if_flags &= ~IFF_OACTIVE;
   8090 
   8091 	/*
   8092 	 * Go through the Tx list and free mbufs for those
   8093 	 * frames which have been transmitted.
   8094 	 */
   8095 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8096 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8097 		if (limit-- == 0) {
   8098 			more = true;
   8099 			DPRINTF(WM_DEBUG_TX,
   8100 			    ("%s: TX: loop limited, job %d is not processed\n",
   8101 				device_xname(sc->sc_dev), i));
   8102 			break;
   8103 		}
   8104 
   8105 		txs = &txq->txq_soft[i];
   8106 
   8107 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8108 			device_xname(sc->sc_dev), i));
   8109 
   8110 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8111 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8112 
   8113 		status =
   8114 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8115 		if ((status & WTX_ST_DD) == 0) {
   8116 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8117 			    BUS_DMASYNC_PREREAD);
   8118 			break;
   8119 		}
   8120 
   8121 		count++;
   8122 		DPRINTF(WM_DEBUG_TX,
   8123 		    ("%s: TX: job %d done: descs %d..%d\n",
   8124 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8125 		    txs->txs_lastdesc));
   8126 
   8127 		/*
   8128 		 * XXX We should probably be using the statistics
   8129 		 * XXX registers, but I don't know if they exist
   8130 		 * XXX on chips before the i82544.
   8131 		 */
   8132 
   8133 #ifdef WM_EVENT_COUNTERS
   8134 		if (status & WTX_ST_TU)
   8135 			WM_Q_EVCNT_INCR(txq, tu);
   8136 #endif /* WM_EVENT_COUNTERS */
   8137 
   8138 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8139 			ifp->if_oerrors++;
   8140 			if (status & WTX_ST_LC)
   8141 				log(LOG_WARNING, "%s: late collision\n",
   8142 				    device_xname(sc->sc_dev));
   8143 			else if (status & WTX_ST_EC) {
   8144 				ifp->if_collisions += 16;
   8145 				log(LOG_WARNING, "%s: excessive collisions\n",
   8146 				    device_xname(sc->sc_dev));
   8147 			}
   8148 		} else
   8149 			ifp->if_opackets++;
   8150 
   8151 		txq->txq_packets++;
   8152 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8153 
   8154 		txq->txq_free += txs->txs_ndesc;
   8155 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8156 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8157 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8158 		m_freem(txs->txs_mbuf);
   8159 		txs->txs_mbuf = NULL;
   8160 	}
   8161 
   8162 	/* Update the dirty transmit buffer pointer. */
   8163 	txq->txq_sdirty = i;
   8164 	DPRINTF(WM_DEBUG_TX,
   8165 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8166 
   8167 	if (count != 0)
   8168 		rnd_add_uint32(&sc->rnd_source, count);
   8169 
   8170 	/*
   8171 	 * If there are no more pending transmissions, cancel the watchdog
   8172 	 * timer.
   8173 	 */
   8174 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8175 		txq->txq_watchdog = false;
   8176 
   8177 	return more;
   8178 }
   8179 
   8180 static inline uint32_t
   8181 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8182 {
   8183 	struct wm_softc *sc = rxq->rxq_sc;
   8184 
   8185 	if (sc->sc_type == WM_T_82574)
   8186 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8187 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8188 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8189 	else
   8190 		return rxq->rxq_descs[idx].wrx_status;
   8191 }
   8192 
   8193 static inline uint32_t
   8194 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8195 {
   8196 	struct wm_softc *sc = rxq->rxq_sc;
   8197 
   8198 	if (sc->sc_type == WM_T_82574)
   8199 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8200 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8201 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8202 	else
   8203 		return rxq->rxq_descs[idx].wrx_errors;
   8204 }
   8205 
   8206 static inline uint16_t
   8207 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8208 {
   8209 	struct wm_softc *sc = rxq->rxq_sc;
   8210 
   8211 	if (sc->sc_type == WM_T_82574)
   8212 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8213 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8214 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8215 	else
   8216 		return rxq->rxq_descs[idx].wrx_special;
   8217 }
   8218 
   8219 static inline int
   8220 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8221 {
   8222 	struct wm_softc *sc = rxq->rxq_sc;
   8223 
   8224 	if (sc->sc_type == WM_T_82574)
   8225 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8226 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8227 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8228 	else
   8229 		return rxq->rxq_descs[idx].wrx_len;
   8230 }
   8231 
   8232 #ifdef WM_DEBUG
   8233 static inline uint32_t
   8234 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8235 {
   8236 	struct wm_softc *sc = rxq->rxq_sc;
   8237 
   8238 	if (sc->sc_type == WM_T_82574)
   8239 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8240 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8241 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8242 	else
   8243 		return 0;
   8244 }
   8245 
   8246 static inline uint8_t
   8247 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8248 {
   8249 	struct wm_softc *sc = rxq->rxq_sc;
   8250 
   8251 	if (sc->sc_type == WM_T_82574)
   8252 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8253 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8254 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8255 	else
   8256 		return 0;
   8257 }
   8258 #endif /* WM_DEBUG */
   8259 
   8260 static inline bool
   8261 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8262     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8263 {
   8264 
   8265 	if (sc->sc_type == WM_T_82574)
   8266 		return (status & ext_bit) != 0;
   8267 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8268 		return (status & nq_bit) != 0;
   8269 	else
   8270 		return (status & legacy_bit) != 0;
   8271 }
   8272 
   8273 static inline bool
   8274 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8275     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8276 {
   8277 
   8278 	if (sc->sc_type == WM_T_82574)
   8279 		return (error & ext_bit) != 0;
   8280 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8281 		return (error & nq_bit) != 0;
   8282 	else
   8283 		return (error & legacy_bit) != 0;
   8284 }
   8285 
   8286 static inline bool
   8287 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8288 {
   8289 
   8290 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8291 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8292 		return true;
   8293 	else
   8294 		return false;
   8295 }
   8296 
   8297 static inline bool
   8298 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8299 {
   8300 	struct wm_softc *sc = rxq->rxq_sc;
   8301 
   8302 	/* XXXX missing error bit for newqueue? */
   8303 	if (wm_rxdesc_is_set_error(sc, errors,
   8304 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8305 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8306 		NQRXC_ERROR_RXE)) {
   8307 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8308 			log(LOG_WARNING, "%s: symbol error\n",
   8309 			    device_xname(sc->sc_dev));
   8310 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8311 			log(LOG_WARNING, "%s: receive sequence error\n",
   8312 			    device_xname(sc->sc_dev));
   8313 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8314 			log(LOG_WARNING, "%s: CRC error\n",
   8315 			    device_xname(sc->sc_dev));
   8316 		return true;
   8317 	}
   8318 
   8319 	return false;
   8320 }
   8321 
   8322 static inline bool
   8323 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8324 {
   8325 	struct wm_softc *sc = rxq->rxq_sc;
   8326 
   8327 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8328 		NQRXC_STATUS_DD)) {
   8329 		/* We have processed all of the receive descriptors. */
   8330 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8331 		return false;
   8332 	}
   8333 
   8334 	return true;
   8335 }
   8336 
   8337 static inline bool
   8338 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8339     struct mbuf *m)
   8340 {
   8341 
   8342 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8343 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8344 		vlan_set_tag(m, le16toh(vlantag));
   8345 	}
   8346 
   8347 	return true;
   8348 }
   8349 
   8350 static inline void
   8351 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8352     uint32_t errors, struct mbuf *m)
   8353 {
   8354 	struct wm_softc *sc = rxq->rxq_sc;
   8355 
   8356 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8357 		if (wm_rxdesc_is_set_status(sc, status,
   8358 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8359 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8360 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8361 			if (wm_rxdesc_is_set_error(sc, errors,
   8362 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8363 				m->m_pkthdr.csum_flags |=
   8364 					M_CSUM_IPv4_BAD;
   8365 		}
   8366 		if (wm_rxdesc_is_set_status(sc, status,
   8367 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8368 			/*
   8369 			 * Note: we don't know if this was TCP or UDP,
   8370 			 * so we just set both bits, and expect the
   8371 			 * upper layers to deal.
   8372 			 */
   8373 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8374 			m->m_pkthdr.csum_flags |=
   8375 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8376 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8377 			if (wm_rxdesc_is_set_error(sc, errors,
   8378 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8379 				m->m_pkthdr.csum_flags |=
   8380 					M_CSUM_TCP_UDP_BAD;
   8381 		}
   8382 	}
   8383 }
   8384 
   8385 /*
   8386  * wm_rxeof:
   8387  *
   8388  *	Helper; handle receive interrupts.
   8389  */
   8390 static bool
   8391 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8392 {
   8393 	struct wm_softc *sc = rxq->rxq_sc;
   8394 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8395 	struct wm_rxsoft *rxs;
   8396 	struct mbuf *m;
   8397 	int i, len;
   8398 	int count = 0;
   8399 	uint32_t status, errors;
   8400 	uint16_t vlantag;
   8401 	bool more = false;
   8402 
   8403 	KASSERT(mutex_owned(rxq->rxq_lock));
   8404 
   8405 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8406 		if (limit-- == 0) {
   8407 			rxq->rxq_ptr = i;
   8408 			more = true;
   8409 			DPRINTF(WM_DEBUG_RX,
   8410 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8411 				device_xname(sc->sc_dev), i));
   8412 			break;
   8413 		}
   8414 
   8415 		rxs = &rxq->rxq_soft[i];
   8416 
   8417 		DPRINTF(WM_DEBUG_RX,
   8418 		    ("%s: RX: checking descriptor %d\n",
   8419 		    device_xname(sc->sc_dev), i));
   8420 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8421 
   8422 		status = wm_rxdesc_get_status(rxq, i);
   8423 		errors = wm_rxdesc_get_errors(rxq, i);
   8424 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8425 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8426 #ifdef WM_DEBUG
   8427 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8428 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8429 #endif
   8430 
   8431 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8432 			/*
   8433 			 * Update the receive pointer holding rxq_lock
   8434 			 * consistent with increment counter.
   8435 			 */
   8436 			rxq->rxq_ptr = i;
   8437 			break;
   8438 		}
   8439 
   8440 		count++;
   8441 		if (__predict_false(rxq->rxq_discard)) {
   8442 			DPRINTF(WM_DEBUG_RX,
   8443 			    ("%s: RX: discarding contents of descriptor %d\n",
   8444 			    device_xname(sc->sc_dev), i));
   8445 			wm_init_rxdesc(rxq, i);
   8446 			if (wm_rxdesc_is_eop(rxq, status)) {
   8447 				/* Reset our state. */
   8448 				DPRINTF(WM_DEBUG_RX,
   8449 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8450 				    device_xname(sc->sc_dev)));
   8451 				rxq->rxq_discard = 0;
   8452 			}
   8453 			continue;
   8454 		}
   8455 
   8456 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8457 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8458 
   8459 		m = rxs->rxs_mbuf;
   8460 
   8461 		/*
   8462 		 * Add a new receive buffer to the ring, unless of
   8463 		 * course the length is zero. Treat the latter as a
   8464 		 * failed mapping.
   8465 		 */
   8466 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8467 			/*
   8468 			 * Failed, throw away what we've done so
   8469 			 * far, and discard the rest of the packet.
   8470 			 */
   8471 			ifp->if_ierrors++;
   8472 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8473 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8474 			wm_init_rxdesc(rxq, i);
   8475 			if (!wm_rxdesc_is_eop(rxq, status))
   8476 				rxq->rxq_discard = 1;
   8477 			if (rxq->rxq_head != NULL)
   8478 				m_freem(rxq->rxq_head);
   8479 			WM_RXCHAIN_RESET(rxq);
   8480 			DPRINTF(WM_DEBUG_RX,
   8481 			    ("%s: RX: Rx buffer allocation failed, "
   8482 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8483 			    rxq->rxq_discard ? " (discard)" : ""));
   8484 			continue;
   8485 		}
   8486 
   8487 		m->m_len = len;
   8488 		rxq->rxq_len += len;
   8489 		DPRINTF(WM_DEBUG_RX,
   8490 		    ("%s: RX: buffer at %p len %d\n",
   8491 		    device_xname(sc->sc_dev), m->m_data, len));
   8492 
   8493 		/* If this is not the end of the packet, keep looking. */
   8494 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8495 			WM_RXCHAIN_LINK(rxq, m);
   8496 			DPRINTF(WM_DEBUG_RX,
   8497 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8498 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8499 			continue;
   8500 		}
   8501 
   8502 		/*
   8503 		 * Okay, we have the entire packet now.  The chip is
   8504 		 * configured to include the FCS except I350 and I21[01]
   8505 		 * (not all chips can be configured to strip it),
   8506 		 * so we need to trim it.
   8507 		 * May need to adjust length of previous mbuf in the
   8508 		 * chain if the current mbuf is too short.
   8509 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8510 		 * is always set in I350, so we don't trim it.
   8511 		 */
   8512 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8513 		    && (sc->sc_type != WM_T_I210)
   8514 		    && (sc->sc_type != WM_T_I211)) {
   8515 			if (m->m_len < ETHER_CRC_LEN) {
   8516 				rxq->rxq_tail->m_len
   8517 				    -= (ETHER_CRC_LEN - m->m_len);
   8518 				m->m_len = 0;
   8519 			} else
   8520 				m->m_len -= ETHER_CRC_LEN;
   8521 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8522 		} else
   8523 			len = rxq->rxq_len;
   8524 
   8525 		WM_RXCHAIN_LINK(rxq, m);
   8526 
   8527 		*rxq->rxq_tailp = NULL;
   8528 		m = rxq->rxq_head;
   8529 
   8530 		WM_RXCHAIN_RESET(rxq);
   8531 
   8532 		DPRINTF(WM_DEBUG_RX,
   8533 		    ("%s: RX: have entire packet, len -> %d\n",
   8534 		    device_xname(sc->sc_dev), len));
   8535 
   8536 		/* If an error occurred, update stats and drop the packet. */
   8537 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8538 			m_freem(m);
   8539 			continue;
   8540 		}
   8541 
   8542 		/* No errors.  Receive the packet. */
   8543 		m_set_rcvif(m, ifp);
   8544 		m->m_pkthdr.len = len;
   8545 		/*
   8546 		 * TODO
   8547 		 * should be save rsshash and rsstype to this mbuf.
   8548 		 */
   8549 		DPRINTF(WM_DEBUG_RX,
   8550 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8551 			device_xname(sc->sc_dev), rsstype, rsshash));
   8552 
   8553 		/*
   8554 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8555 		 * for us.  Associate the tag with the packet.
   8556 		 */
   8557 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8558 			continue;
   8559 
   8560 		/* Set up checksum info for this packet. */
   8561 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8562 		/*
   8563 		 * Update the receive pointer holding rxq_lock consistent with
   8564 		 * increment counter.
   8565 		 */
   8566 		rxq->rxq_ptr = i;
   8567 		rxq->rxq_packets++;
   8568 		rxq->rxq_bytes += len;
   8569 		mutex_exit(rxq->rxq_lock);
   8570 
   8571 		/* Pass it on. */
   8572 		if_percpuq_enqueue(sc->sc_ipq, m);
   8573 
   8574 		mutex_enter(rxq->rxq_lock);
   8575 
   8576 		if (rxq->rxq_stopping)
   8577 			break;
   8578 	}
   8579 
   8580 	if (count != 0)
   8581 		rnd_add_uint32(&sc->rnd_source, count);
   8582 
   8583 	DPRINTF(WM_DEBUG_RX,
   8584 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8585 
   8586 	return more;
   8587 }
   8588 
   8589 /*
   8590  * wm_linkintr_gmii:
   8591  *
   8592  *	Helper; handle link interrupts for GMII.
   8593  */
   8594 static void
   8595 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8596 {
   8597 
   8598 	KASSERT(WM_CORE_LOCKED(sc));
   8599 
   8600 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8601 		__func__));
   8602 
   8603 	if (icr & ICR_LSC) {
   8604 		uint32_t reg;
   8605 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8606 
   8607 		if ((status & STATUS_LU) != 0) {
   8608 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8609 				device_xname(sc->sc_dev),
   8610 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8611 		} else {
   8612 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8613 				device_xname(sc->sc_dev)));
   8614 		}
   8615 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8616 			wm_gig_downshift_workaround_ich8lan(sc);
   8617 
   8618 		if ((sc->sc_type == WM_T_ICH8)
   8619 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8620 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8621 		}
   8622 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8623 			device_xname(sc->sc_dev)));
   8624 		mii_pollstat(&sc->sc_mii);
   8625 		if (sc->sc_type == WM_T_82543) {
   8626 			int miistatus, active;
   8627 
   8628 			/*
   8629 			 * With 82543, we need to force speed and
   8630 			 * duplex on the MAC equal to what the PHY
   8631 			 * speed and duplex configuration is.
   8632 			 */
   8633 			miistatus = sc->sc_mii.mii_media_status;
   8634 
   8635 			if (miistatus & IFM_ACTIVE) {
   8636 				active = sc->sc_mii.mii_media_active;
   8637 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8638 				switch (IFM_SUBTYPE(active)) {
   8639 				case IFM_10_T:
   8640 					sc->sc_ctrl |= CTRL_SPEED_10;
   8641 					break;
   8642 				case IFM_100_TX:
   8643 					sc->sc_ctrl |= CTRL_SPEED_100;
   8644 					break;
   8645 				case IFM_1000_T:
   8646 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8647 					break;
   8648 				default:
   8649 					/*
   8650 					 * fiber?
   8651 					 * Shoud not enter here.
   8652 					 */
   8653 					printf("unknown media (%x)\n", active);
   8654 					break;
   8655 				}
   8656 				if (active & IFM_FDX)
   8657 					sc->sc_ctrl |= CTRL_FD;
   8658 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8659 			}
   8660 		} else if (sc->sc_type == WM_T_PCH) {
   8661 			wm_k1_gig_workaround_hv(sc,
   8662 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8663 		}
   8664 
   8665 		if ((sc->sc_phytype == WMPHY_82578)
   8666 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8667 			== IFM_1000_T)) {
   8668 
   8669 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8670 				delay(200*1000); /* XXX too big */
   8671 
   8672 				/* Link stall fix for link up */
   8673 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8674 				    HV_MUX_DATA_CTRL,
   8675 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8676 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8677 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8678 				    HV_MUX_DATA_CTRL,
   8679 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8680 			}
   8681 		}
   8682 		/*
   8683 		 * I217 Packet Loss issue:
   8684 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8685 		 * on power up.
   8686 		 * Set the Beacon Duration for I217 to 8 usec
   8687 		 */
   8688 		if ((sc->sc_type == WM_T_PCH_LPT)
   8689 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8690 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8691 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8692 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8693 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8694 		}
   8695 
   8696 		/* XXX Work-around I218 hang issue */
   8697 		/* e1000_k1_workaround_lpt_lp() */
   8698 
   8699 		if ((sc->sc_type == WM_T_PCH_LPT)
   8700 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8701 			/*
   8702 			 * Set platform power management values for Latency
   8703 			 * Tolerance Reporting (LTR)
   8704 			 */
   8705 			wm_platform_pm_pch_lpt(sc,
   8706 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8707 				    != 0));
   8708 		}
   8709 
   8710 		/* FEXTNVM6 K1-off workaround */
   8711 		if (sc->sc_type == WM_T_PCH_SPT) {
   8712 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8713 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8714 			    & FEXTNVM6_K1_OFF_ENABLE)
   8715 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8716 			else
   8717 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8718 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8719 		}
   8720 	} else if (icr & ICR_RXSEQ) {
   8721 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8722 			device_xname(sc->sc_dev)));
   8723 	}
   8724 }
   8725 
   8726 /*
   8727  * wm_linkintr_tbi:
   8728  *
   8729  *	Helper; handle link interrupts for TBI mode.
   8730  */
   8731 static void
   8732 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8733 {
   8734 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8735 	uint32_t status;
   8736 
   8737 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8738 		__func__));
   8739 
   8740 	status = CSR_READ(sc, WMREG_STATUS);
   8741 	if (icr & ICR_LSC) {
   8742 		if (status & STATUS_LU) {
   8743 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8744 			    device_xname(sc->sc_dev),
   8745 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8746 			/*
   8747 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8748 			 * so we should update sc->sc_ctrl
   8749 			 */
   8750 
   8751 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8752 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8753 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8754 			if (status & STATUS_FD)
   8755 				sc->sc_tctl |=
   8756 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8757 			else
   8758 				sc->sc_tctl |=
   8759 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8760 			if (sc->sc_ctrl & CTRL_TFCE)
   8761 				sc->sc_fcrtl |= FCRTL_XONE;
   8762 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8763 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8764 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8765 				      sc->sc_fcrtl);
   8766 			sc->sc_tbi_linkup = 1;
   8767 			if_link_state_change(ifp, LINK_STATE_UP);
   8768 		} else {
   8769 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8770 			    device_xname(sc->sc_dev)));
   8771 			sc->sc_tbi_linkup = 0;
   8772 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8773 		}
   8774 		/* Update LED */
   8775 		wm_tbi_serdes_set_linkled(sc);
   8776 	} else if (icr & ICR_RXSEQ) {
   8777 		DPRINTF(WM_DEBUG_LINK,
   8778 		    ("%s: LINK: Receive sequence error\n",
   8779 		    device_xname(sc->sc_dev)));
   8780 	}
   8781 }
   8782 
   8783 /*
   8784  * wm_linkintr_serdes:
   8785  *
   8786  *	Helper; handle link interrupts for TBI mode.
   8787  */
   8788 static void
   8789 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8790 {
   8791 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8792 	struct mii_data *mii = &sc->sc_mii;
   8793 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8794 	uint32_t pcs_adv, pcs_lpab, reg;
   8795 
   8796 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8797 		__func__));
   8798 
   8799 	if (icr & ICR_LSC) {
   8800 		/* Check PCS */
   8801 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8802 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8803 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8804 				device_xname(sc->sc_dev)));
   8805 			mii->mii_media_status |= IFM_ACTIVE;
   8806 			sc->sc_tbi_linkup = 1;
   8807 			if_link_state_change(ifp, LINK_STATE_UP);
   8808 		} else {
   8809 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8810 				device_xname(sc->sc_dev)));
   8811 			mii->mii_media_status |= IFM_NONE;
   8812 			sc->sc_tbi_linkup = 0;
   8813 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8814 			wm_tbi_serdes_set_linkled(sc);
   8815 			return;
   8816 		}
   8817 		mii->mii_media_active |= IFM_1000_SX;
   8818 		if ((reg & PCS_LSTS_FDX) != 0)
   8819 			mii->mii_media_active |= IFM_FDX;
   8820 		else
   8821 			mii->mii_media_active |= IFM_HDX;
   8822 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8823 			/* Check flow */
   8824 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8825 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8826 				DPRINTF(WM_DEBUG_LINK,
   8827 				    ("XXX LINKOK but not ACOMP\n"));
   8828 				return;
   8829 			}
   8830 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8831 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8832 			DPRINTF(WM_DEBUG_LINK,
   8833 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8834 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8835 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8836 				mii->mii_media_active |= IFM_FLOW
   8837 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8838 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8839 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8840 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8841 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8842 				mii->mii_media_active |= IFM_FLOW
   8843 				    | IFM_ETH_TXPAUSE;
   8844 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8845 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8846 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8847 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8848 				mii->mii_media_active |= IFM_FLOW
   8849 				    | IFM_ETH_RXPAUSE;
   8850 		}
   8851 		/* Update LED */
   8852 		wm_tbi_serdes_set_linkled(sc);
   8853 	} else {
   8854 		DPRINTF(WM_DEBUG_LINK,
   8855 		    ("%s: LINK: Receive sequence error\n",
   8856 		    device_xname(sc->sc_dev)));
   8857 	}
   8858 }
   8859 
   8860 /*
   8861  * wm_linkintr:
   8862  *
   8863  *	Helper; handle link interrupts.
   8864  */
   8865 static void
   8866 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8867 {
   8868 
   8869 	KASSERT(WM_CORE_LOCKED(sc));
   8870 
   8871 	if (sc->sc_flags & WM_F_HAS_MII)
   8872 		wm_linkintr_gmii(sc, icr);
   8873 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8874 	    && (sc->sc_type >= WM_T_82575))
   8875 		wm_linkintr_serdes(sc, icr);
   8876 	else
   8877 		wm_linkintr_tbi(sc, icr);
   8878 }
   8879 
   8880 /*
   8881  * wm_intr_legacy:
   8882  *
   8883  *	Interrupt service routine for INTx and MSI.
   8884  */
   8885 static int
   8886 wm_intr_legacy(void *arg)
   8887 {
   8888 	struct wm_softc *sc = arg;
   8889 	struct wm_queue *wmq = &sc->sc_queue[0];
   8890 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8891 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8892 	uint32_t icr, rndval = 0;
   8893 	int handled = 0;
   8894 
   8895 	while (1 /* CONSTCOND */) {
   8896 		icr = CSR_READ(sc, WMREG_ICR);
   8897 		if ((icr & sc->sc_icr) == 0)
   8898 			break;
   8899 		if (handled == 0) {
   8900 			DPRINTF(WM_DEBUG_TX,
   8901 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8902 		}
   8903 		if (rndval == 0)
   8904 			rndval = icr;
   8905 
   8906 		mutex_enter(rxq->rxq_lock);
   8907 
   8908 		if (rxq->rxq_stopping) {
   8909 			mutex_exit(rxq->rxq_lock);
   8910 			break;
   8911 		}
   8912 
   8913 		handled = 1;
   8914 
   8915 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8916 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8917 			DPRINTF(WM_DEBUG_RX,
   8918 			    ("%s: RX: got Rx intr 0x%08x\n",
   8919 			    device_xname(sc->sc_dev),
   8920 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8921 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8922 		}
   8923 #endif
   8924 		/*
   8925 		 * wm_rxeof() does *not* call upper layer functions directly,
   8926 		 * as if_percpuq_enqueue() just call softint_schedule().
   8927 		 * So, we can call wm_rxeof() in interrupt context.
   8928 		 */
   8929 		wm_rxeof(rxq, UINT_MAX);
   8930 
   8931 		mutex_exit(rxq->rxq_lock);
   8932 		mutex_enter(txq->txq_lock);
   8933 
   8934 		if (txq->txq_stopping) {
   8935 			mutex_exit(txq->txq_lock);
   8936 			break;
   8937 		}
   8938 
   8939 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8940 		if (icr & ICR_TXDW) {
   8941 			DPRINTF(WM_DEBUG_TX,
   8942 			    ("%s: TX: got TXDW interrupt\n",
   8943 			    device_xname(sc->sc_dev)));
   8944 			WM_Q_EVCNT_INCR(txq, txdw);
   8945 		}
   8946 #endif
   8947 		wm_txeof(txq, UINT_MAX);
   8948 
   8949 		mutex_exit(txq->txq_lock);
   8950 		WM_CORE_LOCK(sc);
   8951 
   8952 		if (sc->sc_core_stopping) {
   8953 			WM_CORE_UNLOCK(sc);
   8954 			break;
   8955 		}
   8956 
   8957 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8958 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8959 			wm_linkintr(sc, icr);
   8960 		}
   8961 
   8962 		WM_CORE_UNLOCK(sc);
   8963 
   8964 		if (icr & ICR_RXO) {
   8965 #if defined(WM_DEBUG)
   8966 			log(LOG_WARNING, "%s: Receive overrun\n",
   8967 			    device_xname(sc->sc_dev));
   8968 #endif /* defined(WM_DEBUG) */
   8969 		}
   8970 	}
   8971 
   8972 	rnd_add_uint32(&sc->rnd_source, rndval);
   8973 
   8974 	if (handled) {
   8975 		/* Try to get more packets going. */
   8976 		softint_schedule(wmq->wmq_si);
   8977 	}
   8978 
   8979 	return handled;
   8980 }
   8981 
   8982 static inline void
   8983 wm_txrxintr_disable(struct wm_queue *wmq)
   8984 {
   8985 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8986 
   8987 	if (sc->sc_type == WM_T_82574)
   8988 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8989 	else if (sc->sc_type == WM_T_82575)
   8990 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8991 	else
   8992 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8993 }
   8994 
   8995 static inline void
   8996 wm_txrxintr_enable(struct wm_queue *wmq)
   8997 {
   8998 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8999 
   9000 	wm_itrs_calculate(sc, wmq);
   9001 
   9002 	/*
   9003 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9004 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9005 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9006 	 * while each wm_handle_queue(wmq) is runnig.
   9007 	 */
   9008 	if (sc->sc_type == WM_T_82574)
   9009 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9010 	else if (sc->sc_type == WM_T_82575)
   9011 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9012 	else
   9013 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9014 }
   9015 
   9016 static int
   9017 wm_txrxintr_msix(void *arg)
   9018 {
   9019 	struct wm_queue *wmq = arg;
   9020 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9021 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9022 	struct wm_softc *sc = txq->txq_sc;
   9023 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9024 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9025 	bool txmore;
   9026 	bool rxmore;
   9027 
   9028 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9029 
   9030 	DPRINTF(WM_DEBUG_TX,
   9031 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9032 
   9033 	wm_txrxintr_disable(wmq);
   9034 
   9035 	mutex_enter(txq->txq_lock);
   9036 
   9037 	if (txq->txq_stopping) {
   9038 		mutex_exit(txq->txq_lock);
   9039 		return 0;
   9040 	}
   9041 
   9042 	WM_Q_EVCNT_INCR(txq, txdw);
   9043 	txmore = wm_txeof(txq, txlimit);
   9044 	/* wm_deferred start() is done in wm_handle_queue(). */
   9045 	mutex_exit(txq->txq_lock);
   9046 
   9047 	DPRINTF(WM_DEBUG_RX,
   9048 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9049 	mutex_enter(rxq->rxq_lock);
   9050 
   9051 	if (rxq->rxq_stopping) {
   9052 		mutex_exit(rxq->rxq_lock);
   9053 		return 0;
   9054 	}
   9055 
   9056 	WM_Q_EVCNT_INCR(rxq, rxintr);
   9057 	rxmore = wm_rxeof(rxq, rxlimit);
   9058 	mutex_exit(rxq->rxq_lock);
   9059 
   9060 	wm_itrs_writereg(sc, wmq);
   9061 
   9062 	if (txmore || rxmore)
   9063 		softint_schedule(wmq->wmq_si);
   9064 	else
   9065 		wm_txrxintr_enable(wmq);
   9066 
   9067 	return 1;
   9068 }
   9069 
   9070 static void
   9071 wm_handle_queue(void *arg)
   9072 {
   9073 	struct wm_queue *wmq = arg;
   9074 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9075 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9076 	struct wm_softc *sc = txq->txq_sc;
   9077 	u_int txlimit = sc->sc_tx_process_limit;
   9078 	u_int rxlimit = sc->sc_rx_process_limit;
   9079 	bool txmore;
   9080 	bool rxmore;
   9081 
   9082 	mutex_enter(txq->txq_lock);
   9083 	if (txq->txq_stopping) {
   9084 		mutex_exit(txq->txq_lock);
   9085 		return;
   9086 	}
   9087 	txmore = wm_txeof(txq, txlimit);
   9088 	wm_deferred_start_locked(txq);
   9089 	mutex_exit(txq->txq_lock);
   9090 
   9091 	mutex_enter(rxq->rxq_lock);
   9092 	if (rxq->rxq_stopping) {
   9093 		mutex_exit(rxq->rxq_lock);
   9094 		return;
   9095 	}
   9096 	WM_Q_EVCNT_INCR(rxq, rxdefer);
   9097 	rxmore = wm_rxeof(rxq, rxlimit);
   9098 	mutex_exit(rxq->rxq_lock);
   9099 
   9100 	if (txmore || rxmore)
   9101 		softint_schedule(wmq->wmq_si);
   9102 	else
   9103 		wm_txrxintr_enable(wmq);
   9104 }
   9105 
   9106 /*
   9107  * wm_linkintr_msix:
   9108  *
   9109  *	Interrupt service routine for link status change for MSI-X.
   9110  */
   9111 static int
   9112 wm_linkintr_msix(void *arg)
   9113 {
   9114 	struct wm_softc *sc = arg;
   9115 	uint32_t reg;
   9116 	bool has_rxo;
   9117 
   9118 	DPRINTF(WM_DEBUG_LINK,
   9119 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9120 
   9121 	reg = CSR_READ(sc, WMREG_ICR);
   9122 	WM_CORE_LOCK(sc);
   9123 	if (sc->sc_core_stopping)
   9124 		goto out;
   9125 
   9126 	if((reg & ICR_LSC) != 0) {
   9127 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9128 		wm_linkintr(sc, ICR_LSC);
   9129 	}
   9130 
   9131 	/*
   9132 	 * XXX 82574 MSI-X mode workaround
   9133 	 *
   9134 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9135 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9136 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9137 	 * interrupts by writing WMREG_ICS to process receive packets.
   9138 	 */
   9139 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9140 #if defined(WM_DEBUG)
   9141 		log(LOG_WARNING, "%s: Receive overrun\n",
   9142 		    device_xname(sc->sc_dev));
   9143 #endif /* defined(WM_DEBUG) */
   9144 
   9145 		has_rxo = true;
   9146 		/*
   9147 		 * The RXO interrupt is very high rate when receive traffic is
   9148 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9149 		 * interrupts. ICR_OTHER will be enabled at the end of
   9150 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9151 		 * ICR_RXQ(1) interrupts.
   9152 		 */
   9153 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9154 
   9155 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9156 	}
   9157 
   9158 
   9159 
   9160 out:
   9161 	WM_CORE_UNLOCK(sc);
   9162 
   9163 	if (sc->sc_type == WM_T_82574) {
   9164 		if (!has_rxo)
   9165 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9166 		else
   9167 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9168 	} else if (sc->sc_type == WM_T_82575)
   9169 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9170 	else
   9171 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9172 
   9173 	return 1;
   9174 }
   9175 
   9176 /*
   9177  * Media related.
   9178  * GMII, SGMII, TBI (and SERDES)
   9179  */
   9180 
   9181 /* Common */
   9182 
   9183 /*
   9184  * wm_tbi_serdes_set_linkled:
   9185  *
   9186  *	Update the link LED on TBI and SERDES devices.
   9187  */
   9188 static void
   9189 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9190 {
   9191 
   9192 	if (sc->sc_tbi_linkup)
   9193 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9194 	else
   9195 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9196 
   9197 	/* 82540 or newer devices are active low */
   9198 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9199 
   9200 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9201 }
   9202 
   9203 /* GMII related */
   9204 
   9205 /*
   9206  * wm_gmii_reset:
   9207  *
   9208  *	Reset the PHY.
   9209  */
   9210 static void
   9211 wm_gmii_reset(struct wm_softc *sc)
   9212 {
   9213 	uint32_t reg;
   9214 	int rv;
   9215 
   9216 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9217 		device_xname(sc->sc_dev), __func__));
   9218 
   9219 	rv = sc->phy.acquire(sc);
   9220 	if (rv != 0) {
   9221 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9222 		    __func__);
   9223 		return;
   9224 	}
   9225 
   9226 	switch (sc->sc_type) {
   9227 	case WM_T_82542_2_0:
   9228 	case WM_T_82542_2_1:
   9229 		/* null */
   9230 		break;
   9231 	case WM_T_82543:
   9232 		/*
   9233 		 * With 82543, we need to force speed and duplex on the MAC
   9234 		 * equal to what the PHY speed and duplex configuration is.
   9235 		 * In addition, we need to perform a hardware reset on the PHY
   9236 		 * to take it out of reset.
   9237 		 */
   9238 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9239 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9240 
   9241 		/* The PHY reset pin is active-low. */
   9242 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9243 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9244 		    CTRL_EXT_SWDPIN(4));
   9245 		reg |= CTRL_EXT_SWDPIO(4);
   9246 
   9247 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9248 		CSR_WRITE_FLUSH(sc);
   9249 		delay(10*1000);
   9250 
   9251 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9252 		CSR_WRITE_FLUSH(sc);
   9253 		delay(150);
   9254 #if 0
   9255 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9256 #endif
   9257 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9258 		break;
   9259 	case WM_T_82544:	/* reset 10000us */
   9260 	case WM_T_82540:
   9261 	case WM_T_82545:
   9262 	case WM_T_82545_3:
   9263 	case WM_T_82546:
   9264 	case WM_T_82546_3:
   9265 	case WM_T_82541:
   9266 	case WM_T_82541_2:
   9267 	case WM_T_82547:
   9268 	case WM_T_82547_2:
   9269 	case WM_T_82571:	/* reset 100us */
   9270 	case WM_T_82572:
   9271 	case WM_T_82573:
   9272 	case WM_T_82574:
   9273 	case WM_T_82575:
   9274 	case WM_T_82576:
   9275 	case WM_T_82580:
   9276 	case WM_T_I350:
   9277 	case WM_T_I354:
   9278 	case WM_T_I210:
   9279 	case WM_T_I211:
   9280 	case WM_T_82583:
   9281 	case WM_T_80003:
   9282 		/* generic reset */
   9283 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9284 		CSR_WRITE_FLUSH(sc);
   9285 		delay(20000);
   9286 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9287 		CSR_WRITE_FLUSH(sc);
   9288 		delay(20000);
   9289 
   9290 		if ((sc->sc_type == WM_T_82541)
   9291 		    || (sc->sc_type == WM_T_82541_2)
   9292 		    || (sc->sc_type == WM_T_82547)
   9293 		    || (sc->sc_type == WM_T_82547_2)) {
   9294 			/* workaround for igp are done in igp_reset() */
   9295 			/* XXX add code to set LED after phy reset */
   9296 		}
   9297 		break;
   9298 	case WM_T_ICH8:
   9299 	case WM_T_ICH9:
   9300 	case WM_T_ICH10:
   9301 	case WM_T_PCH:
   9302 	case WM_T_PCH2:
   9303 	case WM_T_PCH_LPT:
   9304 	case WM_T_PCH_SPT:
   9305 		/* generic reset */
   9306 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9307 		CSR_WRITE_FLUSH(sc);
   9308 		delay(100);
   9309 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9310 		CSR_WRITE_FLUSH(sc);
   9311 		delay(150);
   9312 		break;
   9313 	default:
   9314 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9315 		    __func__);
   9316 		break;
   9317 	}
   9318 
   9319 	sc->phy.release(sc);
   9320 
   9321 	/* get_cfg_done */
   9322 	wm_get_cfg_done(sc);
   9323 
   9324 	/* extra setup */
   9325 	switch (sc->sc_type) {
   9326 	case WM_T_82542_2_0:
   9327 	case WM_T_82542_2_1:
   9328 	case WM_T_82543:
   9329 	case WM_T_82544:
   9330 	case WM_T_82540:
   9331 	case WM_T_82545:
   9332 	case WM_T_82545_3:
   9333 	case WM_T_82546:
   9334 	case WM_T_82546_3:
   9335 	case WM_T_82541_2:
   9336 	case WM_T_82547_2:
   9337 	case WM_T_82571:
   9338 	case WM_T_82572:
   9339 	case WM_T_82573:
   9340 	case WM_T_82574:
   9341 	case WM_T_82583:
   9342 	case WM_T_82575:
   9343 	case WM_T_82576:
   9344 	case WM_T_82580:
   9345 	case WM_T_I350:
   9346 	case WM_T_I354:
   9347 	case WM_T_I210:
   9348 	case WM_T_I211:
   9349 	case WM_T_80003:
   9350 		/* null */
   9351 		break;
   9352 	case WM_T_82541:
   9353 	case WM_T_82547:
   9354 		/* XXX Configure actively LED after PHY reset */
   9355 		break;
   9356 	case WM_T_ICH8:
   9357 	case WM_T_ICH9:
   9358 	case WM_T_ICH10:
   9359 	case WM_T_PCH:
   9360 	case WM_T_PCH2:
   9361 	case WM_T_PCH_LPT:
   9362 	case WM_T_PCH_SPT:
   9363 		wm_phy_post_reset(sc);
   9364 		break;
   9365 	default:
   9366 		panic("%s: unknown type\n", __func__);
   9367 		break;
   9368 	}
   9369 }
   9370 
   9371 /*
   9372  * Setup sc_phytype and mii_{read|write}reg.
   9373  *
   9374  *  To identify PHY type, correct read/write function should be selected.
   9375  * To select correct read/write function, PCI ID or MAC type are required
   9376  * without accessing PHY registers.
   9377  *
   9378  *  On the first call of this function, PHY ID is not known yet. Check
   9379  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9380  * result might be incorrect.
   9381  *
   9382  *  In the second call, PHY OUI and model is used to identify PHY type.
   9383  * It might not be perfpect because of the lack of compared entry, but it
   9384  * would be better than the first call.
   9385  *
   9386  *  If the detected new result and previous assumption is different,
   9387  * diagnous message will be printed.
   9388  */
   9389 static void
   9390 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9391     uint16_t phy_model)
   9392 {
   9393 	device_t dev = sc->sc_dev;
   9394 	struct mii_data *mii = &sc->sc_mii;
   9395 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9396 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9397 	mii_readreg_t new_readreg;
   9398 	mii_writereg_t new_writereg;
   9399 
   9400 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9401 		device_xname(sc->sc_dev), __func__));
   9402 
   9403 	if (mii->mii_readreg == NULL) {
   9404 		/*
   9405 		 *  This is the first call of this function. For ICH and PCH
   9406 		 * variants, it's difficult to determine the PHY access method
   9407 		 * by sc_type, so use the PCI product ID for some devices.
   9408 		 */
   9409 
   9410 		switch (sc->sc_pcidevid) {
   9411 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9412 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9413 			/* 82577 */
   9414 			new_phytype = WMPHY_82577;
   9415 			break;
   9416 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9417 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9418 			/* 82578 */
   9419 			new_phytype = WMPHY_82578;
   9420 			break;
   9421 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9422 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9423 			/* 82579 */
   9424 			new_phytype = WMPHY_82579;
   9425 			break;
   9426 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9427 		case PCI_PRODUCT_INTEL_82801I_BM:
   9428 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9429 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9430 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9431 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9432 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9433 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9434 			/* ICH8, 9, 10 with 82567 */
   9435 			new_phytype = WMPHY_BM;
   9436 			break;
   9437 		default:
   9438 			break;
   9439 		}
   9440 	} else {
   9441 		/* It's not the first call. Use PHY OUI and model */
   9442 		switch (phy_oui) {
   9443 		case MII_OUI_ATHEROS: /* XXX ??? */
   9444 			switch (phy_model) {
   9445 			case 0x0004: /* XXX */
   9446 				new_phytype = WMPHY_82578;
   9447 				break;
   9448 			default:
   9449 				break;
   9450 			}
   9451 			break;
   9452 		case MII_OUI_xxMARVELL:
   9453 			switch (phy_model) {
   9454 			case MII_MODEL_xxMARVELL_I210:
   9455 				new_phytype = WMPHY_I210;
   9456 				break;
   9457 			case MII_MODEL_xxMARVELL_E1011:
   9458 			case MII_MODEL_xxMARVELL_E1000_3:
   9459 			case MII_MODEL_xxMARVELL_E1000_5:
   9460 			case MII_MODEL_xxMARVELL_E1112:
   9461 				new_phytype = WMPHY_M88;
   9462 				break;
   9463 			case MII_MODEL_xxMARVELL_E1149:
   9464 				new_phytype = WMPHY_BM;
   9465 				break;
   9466 			case MII_MODEL_xxMARVELL_E1111:
   9467 			case MII_MODEL_xxMARVELL_I347:
   9468 			case MII_MODEL_xxMARVELL_E1512:
   9469 			case MII_MODEL_xxMARVELL_E1340M:
   9470 			case MII_MODEL_xxMARVELL_E1543:
   9471 				new_phytype = WMPHY_M88;
   9472 				break;
   9473 			case MII_MODEL_xxMARVELL_I82563:
   9474 				new_phytype = WMPHY_GG82563;
   9475 				break;
   9476 			default:
   9477 				break;
   9478 			}
   9479 			break;
   9480 		case MII_OUI_INTEL:
   9481 			switch (phy_model) {
   9482 			case MII_MODEL_INTEL_I82577:
   9483 				new_phytype = WMPHY_82577;
   9484 				break;
   9485 			case MII_MODEL_INTEL_I82579:
   9486 				new_phytype = WMPHY_82579;
   9487 				break;
   9488 			case MII_MODEL_INTEL_I217:
   9489 				new_phytype = WMPHY_I217;
   9490 				break;
   9491 			case MII_MODEL_INTEL_I82580:
   9492 			case MII_MODEL_INTEL_I350:
   9493 				new_phytype = WMPHY_82580;
   9494 				break;
   9495 			default:
   9496 				break;
   9497 			}
   9498 			break;
   9499 		case MII_OUI_yyINTEL:
   9500 			switch (phy_model) {
   9501 			case MII_MODEL_yyINTEL_I82562G:
   9502 			case MII_MODEL_yyINTEL_I82562EM:
   9503 			case MII_MODEL_yyINTEL_I82562ET:
   9504 				new_phytype = WMPHY_IFE;
   9505 				break;
   9506 			case MII_MODEL_yyINTEL_IGP01E1000:
   9507 				new_phytype = WMPHY_IGP;
   9508 				break;
   9509 			case MII_MODEL_yyINTEL_I82566:
   9510 				new_phytype = WMPHY_IGP_3;
   9511 				break;
   9512 			default:
   9513 				break;
   9514 			}
   9515 			break;
   9516 		default:
   9517 			break;
   9518 		}
   9519 		if (new_phytype == WMPHY_UNKNOWN)
   9520 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9521 			    __func__);
   9522 
   9523 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9524 		    && (sc->sc_phytype != new_phytype )) {
   9525 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9526 			    "was incorrect. PHY type from PHY ID = %u\n",
   9527 			    sc->sc_phytype, new_phytype);
   9528 		}
   9529 	}
   9530 
   9531 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9532 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9533 		/* SGMII */
   9534 		new_readreg = wm_sgmii_readreg;
   9535 		new_writereg = wm_sgmii_writereg;
   9536 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9537 		/* BM2 (phyaddr == 1) */
   9538 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9539 		    && (new_phytype != WMPHY_BM)
   9540 		    && (new_phytype != WMPHY_UNKNOWN))
   9541 			doubt_phytype = new_phytype;
   9542 		new_phytype = WMPHY_BM;
   9543 		new_readreg = wm_gmii_bm_readreg;
   9544 		new_writereg = wm_gmii_bm_writereg;
   9545 	} else if (sc->sc_type >= WM_T_PCH) {
   9546 		/* All PCH* use _hv_ */
   9547 		new_readreg = wm_gmii_hv_readreg;
   9548 		new_writereg = wm_gmii_hv_writereg;
   9549 	} else if (sc->sc_type >= WM_T_ICH8) {
   9550 		/* non-82567 ICH8, 9 and 10 */
   9551 		new_readreg = wm_gmii_i82544_readreg;
   9552 		new_writereg = wm_gmii_i82544_writereg;
   9553 	} else if (sc->sc_type >= WM_T_80003) {
   9554 		/* 80003 */
   9555 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9556 		    && (new_phytype != WMPHY_GG82563)
   9557 		    && (new_phytype != WMPHY_UNKNOWN))
   9558 			doubt_phytype = new_phytype;
   9559 		new_phytype = WMPHY_GG82563;
   9560 		new_readreg = wm_gmii_i80003_readreg;
   9561 		new_writereg = wm_gmii_i80003_writereg;
   9562 	} else if (sc->sc_type >= WM_T_I210) {
   9563 		/* I210 and I211 */
   9564 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9565 		    && (new_phytype != WMPHY_I210)
   9566 		    && (new_phytype != WMPHY_UNKNOWN))
   9567 			doubt_phytype = new_phytype;
   9568 		new_phytype = WMPHY_I210;
   9569 		new_readreg = wm_gmii_gs40g_readreg;
   9570 		new_writereg = wm_gmii_gs40g_writereg;
   9571 	} else if (sc->sc_type >= WM_T_82580) {
   9572 		/* 82580, I350 and I354 */
   9573 		new_readreg = wm_gmii_82580_readreg;
   9574 		new_writereg = wm_gmii_82580_writereg;
   9575 	} else if (sc->sc_type >= WM_T_82544) {
   9576 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9577 		new_readreg = wm_gmii_i82544_readreg;
   9578 		new_writereg = wm_gmii_i82544_writereg;
   9579 	} else {
   9580 		new_readreg = wm_gmii_i82543_readreg;
   9581 		new_writereg = wm_gmii_i82543_writereg;
   9582 	}
   9583 
   9584 	if (new_phytype == WMPHY_BM) {
   9585 		/* All BM use _bm_ */
   9586 		new_readreg = wm_gmii_bm_readreg;
   9587 		new_writereg = wm_gmii_bm_writereg;
   9588 	}
   9589 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9590 		/* All PCH* use _hv_ */
   9591 		new_readreg = wm_gmii_hv_readreg;
   9592 		new_writereg = wm_gmii_hv_writereg;
   9593 	}
   9594 
   9595 	/* Diag output */
   9596 	if (doubt_phytype != WMPHY_UNKNOWN)
   9597 		aprint_error_dev(dev, "Assumed new PHY type was "
   9598 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9599 		    new_phytype);
   9600 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9601 	    && (sc->sc_phytype != new_phytype ))
   9602 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9603 		    "was incorrect. New PHY type = %u\n",
   9604 		    sc->sc_phytype, new_phytype);
   9605 
   9606 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9607 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9608 
   9609 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9610 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9611 		    "function was incorrect.\n");
   9612 
   9613 	/* Update now */
   9614 	sc->sc_phytype = new_phytype;
   9615 	mii->mii_readreg = new_readreg;
   9616 	mii->mii_writereg = new_writereg;
   9617 }
   9618 
   9619 /*
   9620  * wm_get_phy_id_82575:
   9621  *
   9622  * Return PHY ID. Return -1 if it failed.
   9623  */
   9624 static int
   9625 wm_get_phy_id_82575(struct wm_softc *sc)
   9626 {
   9627 	uint32_t reg;
   9628 	int phyid = -1;
   9629 
   9630 	/* XXX */
   9631 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9632 		return -1;
   9633 
   9634 	if (wm_sgmii_uses_mdio(sc)) {
   9635 		switch (sc->sc_type) {
   9636 		case WM_T_82575:
   9637 		case WM_T_82576:
   9638 			reg = CSR_READ(sc, WMREG_MDIC);
   9639 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9640 			break;
   9641 		case WM_T_82580:
   9642 		case WM_T_I350:
   9643 		case WM_T_I354:
   9644 		case WM_T_I210:
   9645 		case WM_T_I211:
   9646 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9647 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9648 			break;
   9649 		default:
   9650 			return -1;
   9651 		}
   9652 	}
   9653 
   9654 	return phyid;
   9655 }
   9656 
   9657 
   9658 /*
   9659  * wm_gmii_mediainit:
   9660  *
   9661  *	Initialize media for use on 1000BASE-T devices.
   9662  */
   9663 static void
   9664 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9665 {
   9666 	device_t dev = sc->sc_dev;
   9667 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9668 	struct mii_data *mii = &sc->sc_mii;
   9669 	uint32_t reg;
   9670 
   9671 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9672 		device_xname(sc->sc_dev), __func__));
   9673 
   9674 	/* We have GMII. */
   9675 	sc->sc_flags |= WM_F_HAS_MII;
   9676 
   9677 	if (sc->sc_type == WM_T_80003)
   9678 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9679 	else
   9680 		sc->sc_tipg = TIPG_1000T_DFLT;
   9681 
   9682 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9683 	if ((sc->sc_type == WM_T_82580)
   9684 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9685 	    || (sc->sc_type == WM_T_I211)) {
   9686 		reg = CSR_READ(sc, WMREG_PHPM);
   9687 		reg &= ~PHPM_GO_LINK_D;
   9688 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9689 	}
   9690 
   9691 	/*
   9692 	 * Let the chip set speed/duplex on its own based on
   9693 	 * signals from the PHY.
   9694 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9695 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9696 	 */
   9697 	sc->sc_ctrl |= CTRL_SLU;
   9698 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9699 
   9700 	/* Initialize our media structures and probe the GMII. */
   9701 	mii->mii_ifp = ifp;
   9702 
   9703 	mii->mii_statchg = wm_gmii_statchg;
   9704 
   9705 	/* get PHY control from SMBus to PCIe */
   9706 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9707 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9708 		wm_smbustopci(sc);
   9709 
   9710 	wm_gmii_reset(sc);
   9711 
   9712 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9713 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9714 	    wm_gmii_mediastatus);
   9715 
   9716 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9717 	    || (sc->sc_type == WM_T_82580)
   9718 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9719 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9720 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9721 			/* Attach only one port */
   9722 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9723 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9724 		} else {
   9725 			int i, id;
   9726 			uint32_t ctrl_ext;
   9727 
   9728 			id = wm_get_phy_id_82575(sc);
   9729 			if (id != -1) {
   9730 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9731 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9732 			}
   9733 			if ((id == -1)
   9734 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9735 				/* Power on sgmii phy if it is disabled */
   9736 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9737 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9738 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9739 				CSR_WRITE_FLUSH(sc);
   9740 				delay(300*1000); /* XXX too long */
   9741 
   9742 				/* from 1 to 8 */
   9743 				for (i = 1; i < 8; i++)
   9744 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9745 					    0xffffffff, i, MII_OFFSET_ANY,
   9746 					    MIIF_DOPAUSE);
   9747 
   9748 				/* restore previous sfp cage power state */
   9749 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9750 			}
   9751 		}
   9752 	} else {
   9753 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9754 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9755 	}
   9756 
   9757 	/*
   9758 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9759 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9760 	 */
   9761 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9762 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9763 		wm_set_mdio_slow_mode_hv(sc);
   9764 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9765 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9766 	}
   9767 
   9768 	/*
   9769 	 * (For ICH8 variants)
   9770 	 * If PHY detection failed, use BM's r/w function and retry.
   9771 	 */
   9772 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9773 		/* if failed, retry with *_bm_* */
   9774 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9775 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9776 		    sc->sc_phytype);
   9777 		sc->sc_phytype = WMPHY_BM;
   9778 		mii->mii_readreg = wm_gmii_bm_readreg;
   9779 		mii->mii_writereg = wm_gmii_bm_writereg;
   9780 
   9781 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9782 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9783 	}
   9784 
   9785 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9786 		/* Any PHY wasn't find */
   9787 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9788 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9789 		sc->sc_phytype = WMPHY_NONE;
   9790 	} else {
   9791 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9792 
   9793 		/*
   9794 		 * PHY Found! Check PHY type again by the second call of
   9795 		 * wm_gmii_setup_phytype.
   9796 		 */
   9797 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9798 		    child->mii_mpd_model);
   9799 
   9800 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9801 	}
   9802 }
   9803 
   9804 /*
   9805  * wm_gmii_mediachange:	[ifmedia interface function]
   9806  *
   9807  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9808  */
   9809 static int
   9810 wm_gmii_mediachange(struct ifnet *ifp)
   9811 {
   9812 	struct wm_softc *sc = ifp->if_softc;
   9813 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9814 	int rc;
   9815 
   9816 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9817 		device_xname(sc->sc_dev), __func__));
   9818 	if ((ifp->if_flags & IFF_UP) == 0)
   9819 		return 0;
   9820 
   9821 	/* Disable D0 LPLU. */
   9822 	wm_lplu_d0_disable(sc);
   9823 
   9824 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9825 	sc->sc_ctrl |= CTRL_SLU;
   9826 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9827 	    || (sc->sc_type > WM_T_82543)) {
   9828 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9829 	} else {
   9830 		sc->sc_ctrl &= ~CTRL_ASDE;
   9831 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9832 		if (ife->ifm_media & IFM_FDX)
   9833 			sc->sc_ctrl |= CTRL_FD;
   9834 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9835 		case IFM_10_T:
   9836 			sc->sc_ctrl |= CTRL_SPEED_10;
   9837 			break;
   9838 		case IFM_100_TX:
   9839 			sc->sc_ctrl |= CTRL_SPEED_100;
   9840 			break;
   9841 		case IFM_1000_T:
   9842 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9843 			break;
   9844 		default:
   9845 			panic("wm_gmii_mediachange: bad media 0x%x",
   9846 			    ife->ifm_media);
   9847 		}
   9848 	}
   9849 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9850 	CSR_WRITE_FLUSH(sc);
   9851 	if (sc->sc_type <= WM_T_82543)
   9852 		wm_gmii_reset(sc);
   9853 
   9854 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9855 		return 0;
   9856 	return rc;
   9857 }
   9858 
   9859 /*
   9860  * wm_gmii_mediastatus:	[ifmedia interface function]
   9861  *
   9862  *	Get the current interface media status on a 1000BASE-T device.
   9863  */
   9864 static void
   9865 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9866 {
   9867 	struct wm_softc *sc = ifp->if_softc;
   9868 
   9869 	ether_mediastatus(ifp, ifmr);
   9870 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9871 	    | sc->sc_flowflags;
   9872 }
   9873 
   9874 #define	MDI_IO		CTRL_SWDPIN(2)
   9875 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9876 #define	MDI_CLK		CTRL_SWDPIN(3)
   9877 
   9878 static void
   9879 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9880 {
   9881 	uint32_t i, v;
   9882 
   9883 	v = CSR_READ(sc, WMREG_CTRL);
   9884 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9885 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9886 
   9887 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9888 		if (data & i)
   9889 			v |= MDI_IO;
   9890 		else
   9891 			v &= ~MDI_IO;
   9892 		CSR_WRITE(sc, WMREG_CTRL, v);
   9893 		CSR_WRITE_FLUSH(sc);
   9894 		delay(10);
   9895 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9896 		CSR_WRITE_FLUSH(sc);
   9897 		delay(10);
   9898 		CSR_WRITE(sc, WMREG_CTRL, v);
   9899 		CSR_WRITE_FLUSH(sc);
   9900 		delay(10);
   9901 	}
   9902 }
   9903 
   9904 static uint32_t
   9905 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9906 {
   9907 	uint32_t v, i, data = 0;
   9908 
   9909 	v = CSR_READ(sc, WMREG_CTRL);
   9910 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9911 	v |= CTRL_SWDPIO(3);
   9912 
   9913 	CSR_WRITE(sc, WMREG_CTRL, v);
   9914 	CSR_WRITE_FLUSH(sc);
   9915 	delay(10);
   9916 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9917 	CSR_WRITE_FLUSH(sc);
   9918 	delay(10);
   9919 	CSR_WRITE(sc, WMREG_CTRL, v);
   9920 	CSR_WRITE_FLUSH(sc);
   9921 	delay(10);
   9922 
   9923 	for (i = 0; i < 16; i++) {
   9924 		data <<= 1;
   9925 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9926 		CSR_WRITE_FLUSH(sc);
   9927 		delay(10);
   9928 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9929 			data |= 1;
   9930 		CSR_WRITE(sc, WMREG_CTRL, v);
   9931 		CSR_WRITE_FLUSH(sc);
   9932 		delay(10);
   9933 	}
   9934 
   9935 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9936 	CSR_WRITE_FLUSH(sc);
   9937 	delay(10);
   9938 	CSR_WRITE(sc, WMREG_CTRL, v);
   9939 	CSR_WRITE_FLUSH(sc);
   9940 	delay(10);
   9941 
   9942 	return data;
   9943 }
   9944 
   9945 #undef MDI_IO
   9946 #undef MDI_DIR
   9947 #undef MDI_CLK
   9948 
   9949 /*
   9950  * wm_gmii_i82543_readreg:	[mii interface function]
   9951  *
   9952  *	Read a PHY register on the GMII (i82543 version).
   9953  */
   9954 static int
   9955 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9956 {
   9957 	struct wm_softc *sc = device_private(dev);
   9958 	int rv;
   9959 
   9960 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9961 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9962 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9963 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9964 
   9965 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9966 	    device_xname(dev), phy, reg, rv));
   9967 
   9968 	return rv;
   9969 }
   9970 
   9971 /*
   9972  * wm_gmii_i82543_writereg:	[mii interface function]
   9973  *
   9974  *	Write a PHY register on the GMII (i82543 version).
   9975  */
   9976 static void
   9977 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9978 {
   9979 	struct wm_softc *sc = device_private(dev);
   9980 
   9981 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9982 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9983 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9984 	    (MII_COMMAND_START << 30), 32);
   9985 }
   9986 
   9987 /*
   9988  * wm_gmii_mdic_readreg:	[mii interface function]
   9989  *
   9990  *	Read a PHY register on the GMII.
   9991  */
   9992 static int
   9993 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9994 {
   9995 	struct wm_softc *sc = device_private(dev);
   9996 	uint32_t mdic = 0;
   9997 	int i, rv;
   9998 
   9999 	if (reg > MII_ADDRMASK) {
   10000 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10001 		    __func__, sc->sc_phytype, reg);
   10002 		reg &= MII_ADDRMASK;
   10003 	}
   10004 
   10005 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10006 	    MDIC_REGADD(reg));
   10007 
   10008 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10009 		mdic = CSR_READ(sc, WMREG_MDIC);
   10010 		if (mdic & MDIC_READY)
   10011 			break;
   10012 		delay(50);
   10013 	}
   10014 
   10015 	if ((mdic & MDIC_READY) == 0) {
   10016 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10017 		    device_xname(dev), phy, reg);
   10018 		rv = 0;
   10019 	} else if (mdic & MDIC_E) {
   10020 #if 0 /* This is normal if no PHY is present. */
   10021 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10022 		    device_xname(dev), phy, reg);
   10023 #endif
   10024 		rv = 0;
   10025 	} else {
   10026 		rv = MDIC_DATA(mdic);
   10027 		if (rv == 0xffff)
   10028 			rv = 0;
   10029 	}
   10030 
   10031 	return rv;
   10032 }
   10033 
   10034 /*
   10035  * wm_gmii_mdic_writereg:	[mii interface function]
   10036  *
   10037  *	Write a PHY register on the GMII.
   10038  */
   10039 static void
   10040 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10041 {
   10042 	struct wm_softc *sc = device_private(dev);
   10043 	uint32_t mdic = 0;
   10044 	int i;
   10045 
   10046 	if (reg > MII_ADDRMASK) {
   10047 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10048 		    __func__, sc->sc_phytype, reg);
   10049 		reg &= MII_ADDRMASK;
   10050 	}
   10051 
   10052 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10053 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10054 
   10055 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10056 		mdic = CSR_READ(sc, WMREG_MDIC);
   10057 		if (mdic & MDIC_READY)
   10058 			break;
   10059 		delay(50);
   10060 	}
   10061 
   10062 	if ((mdic & MDIC_READY) == 0)
   10063 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10064 		    device_xname(dev), phy, reg);
   10065 	else if (mdic & MDIC_E)
   10066 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10067 		    device_xname(dev), phy, reg);
   10068 }
   10069 
   10070 /*
   10071  * wm_gmii_i82544_readreg:	[mii interface function]
   10072  *
   10073  *	Read a PHY register on the GMII.
   10074  */
   10075 static int
   10076 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10077 {
   10078 	struct wm_softc *sc = device_private(dev);
   10079 	int rv;
   10080 
   10081 	if (sc->phy.acquire(sc)) {
   10082 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10083 		return 0;
   10084 	}
   10085 
   10086 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10087 		switch (sc->sc_phytype) {
   10088 		case WMPHY_IGP:
   10089 		case WMPHY_IGP_2:
   10090 		case WMPHY_IGP_3:
   10091 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   10092 			break;
   10093 		default:
   10094 #ifdef WM_DEBUG
   10095 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10096 			    __func__, sc->sc_phytype, reg);
   10097 #endif
   10098 			break;
   10099 		}
   10100 	}
   10101 
   10102 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10103 	sc->phy.release(sc);
   10104 
   10105 	return rv;
   10106 }
   10107 
   10108 /*
   10109  * wm_gmii_i82544_writereg:	[mii interface function]
   10110  *
   10111  *	Write a PHY register on the GMII.
   10112  */
   10113 static void
   10114 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10115 {
   10116 	struct wm_softc *sc = device_private(dev);
   10117 
   10118 	if (sc->phy.acquire(sc)) {
   10119 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10120 		return;
   10121 	}
   10122 
   10123 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10124 		switch (sc->sc_phytype) {
   10125 		case WMPHY_IGP:
   10126 		case WMPHY_IGP_2:
   10127 		case WMPHY_IGP_3:
   10128 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   10129 			break;
   10130 		default:
   10131 #ifdef WM_DEBUG
   10132 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10133 			    __func__, sc->sc_phytype, reg);
   10134 #endif
   10135 			break;
   10136 		}
   10137 	}
   10138 
   10139 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10140 	sc->phy.release(sc);
   10141 }
   10142 
   10143 /*
   10144  * wm_gmii_i80003_readreg:	[mii interface function]
   10145  *
   10146  *	Read a PHY register on the kumeran
   10147  * This could be handled by the PHY layer if we didn't have to lock the
   10148  * ressource ...
   10149  */
   10150 static int
   10151 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10152 {
   10153 	struct wm_softc *sc = device_private(dev);
   10154 	int page_select, temp;
   10155 	int rv;
   10156 
   10157 	if (phy != 1) /* only one PHY on kumeran bus */
   10158 		return 0;
   10159 
   10160 	if (sc->phy.acquire(sc)) {
   10161 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10162 		return 0;
   10163 	}
   10164 
   10165 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10166 		page_select = GG82563_PHY_PAGE_SELECT;
   10167 	else {
   10168 		/*
   10169 		 * Use Alternative Page Select register to access registers
   10170 		 * 30 and 31.
   10171 		 */
   10172 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10173 	}
   10174 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10175 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10176 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10177 		/*
   10178 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10179 		 * register.
   10180 		 */
   10181 		delay(200);
   10182 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10183 			device_printf(dev, "%s failed\n", __func__);
   10184 			rv = 0; /* XXX */
   10185 			goto out;
   10186 		}
   10187 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10188 		delay(200);
   10189 	} else
   10190 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10191 
   10192 out:
   10193 	sc->phy.release(sc);
   10194 	return rv;
   10195 }
   10196 
   10197 /*
   10198  * wm_gmii_i80003_writereg:	[mii interface function]
   10199  *
   10200  *	Write a PHY register on the kumeran.
   10201  * This could be handled by the PHY layer if we didn't have to lock the
   10202  * ressource ...
   10203  */
   10204 static void
   10205 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10206 {
   10207 	struct wm_softc *sc = device_private(dev);
   10208 	int page_select, temp;
   10209 
   10210 	if (phy != 1) /* only one PHY on kumeran bus */
   10211 		return;
   10212 
   10213 	if (sc->phy.acquire(sc)) {
   10214 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10215 		return;
   10216 	}
   10217 
   10218 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10219 		page_select = GG82563_PHY_PAGE_SELECT;
   10220 	else {
   10221 		/*
   10222 		 * Use Alternative Page Select register to access registers
   10223 		 * 30 and 31.
   10224 		 */
   10225 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10226 	}
   10227 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10228 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10229 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10230 		/*
   10231 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10232 		 * register.
   10233 		 */
   10234 		delay(200);
   10235 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10236 			device_printf(dev, "%s failed\n", __func__);
   10237 			goto out;
   10238 		}
   10239 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10240 		delay(200);
   10241 	} else
   10242 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10243 
   10244 out:
   10245 	sc->phy.release(sc);
   10246 }
   10247 
   10248 /*
   10249  * wm_gmii_bm_readreg:	[mii interface function]
   10250  *
   10251  *	Read a PHY register on the kumeran
   10252  * This could be handled by the PHY layer if we didn't have to lock the
   10253  * ressource ...
   10254  */
   10255 static int
   10256 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10257 {
   10258 	struct wm_softc *sc = device_private(dev);
   10259 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10260 	uint16_t val;
   10261 	int rv;
   10262 
   10263 	if (sc->phy.acquire(sc)) {
   10264 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10265 		return 0;
   10266 	}
   10267 
   10268 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10269 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10270 		    || (reg == 31)) ? 1 : phy;
   10271 	/* Page 800 works differently than the rest so it has its own func */
   10272 	if (page == BM_WUC_PAGE) {
   10273 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10274 		rv = val;
   10275 		goto release;
   10276 	}
   10277 
   10278 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10279 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10280 		    && (sc->sc_type != WM_T_82583))
   10281 			wm_gmii_mdic_writereg(dev, phy,
   10282 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10283 		else
   10284 			wm_gmii_mdic_writereg(dev, phy,
   10285 			    BME1000_PHY_PAGE_SELECT, page);
   10286 	}
   10287 
   10288 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10289 
   10290 release:
   10291 	sc->phy.release(sc);
   10292 	return rv;
   10293 }
   10294 
   10295 /*
   10296  * wm_gmii_bm_writereg:	[mii interface function]
   10297  *
   10298  *	Write a PHY register on the kumeran.
   10299  * This could be handled by the PHY layer if we didn't have to lock the
   10300  * ressource ...
   10301  */
   10302 static void
   10303 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10304 {
   10305 	struct wm_softc *sc = device_private(dev);
   10306 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10307 
   10308 	if (sc->phy.acquire(sc)) {
   10309 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10310 		return;
   10311 	}
   10312 
   10313 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10314 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10315 		    || (reg == 31)) ? 1 : phy;
   10316 	/* Page 800 works differently than the rest so it has its own func */
   10317 	if (page == BM_WUC_PAGE) {
   10318 		uint16_t tmp;
   10319 
   10320 		tmp = val;
   10321 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10322 		goto release;
   10323 	}
   10324 
   10325 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10326 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10327 		    && (sc->sc_type != WM_T_82583))
   10328 			wm_gmii_mdic_writereg(dev, phy,
   10329 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10330 		else
   10331 			wm_gmii_mdic_writereg(dev, phy,
   10332 			    BME1000_PHY_PAGE_SELECT, page);
   10333 	}
   10334 
   10335 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10336 
   10337 release:
   10338 	sc->phy.release(sc);
   10339 }
   10340 
   10341 static void
   10342 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10343 {
   10344 	struct wm_softc *sc = device_private(dev);
   10345 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10346 	uint16_t wuce, reg;
   10347 
   10348 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10349 		device_xname(dev), __func__));
   10350 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10351 	if (sc->sc_type == WM_T_PCH) {
   10352 		/* XXX e1000 driver do nothing... why? */
   10353 	}
   10354 
   10355 	/*
   10356 	 * 1) Enable PHY wakeup register first.
   10357 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10358 	 */
   10359 
   10360 	/* Set page 769 */
   10361 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10362 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10363 
   10364 	/* Read WUCE and save it */
   10365 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10366 
   10367 	reg = wuce | BM_WUC_ENABLE_BIT;
   10368 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10369 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10370 
   10371 	/* Select page 800 */
   10372 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10373 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10374 
   10375 	/*
   10376 	 * 2) Access PHY wakeup register.
   10377 	 * See e1000_access_phy_wakeup_reg_bm.
   10378 	 */
   10379 
   10380 	/* Write page 800 */
   10381 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10382 
   10383 	if (rd)
   10384 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10385 	else
   10386 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10387 
   10388 	/*
   10389 	 * 3) Disable PHY wakeup register.
   10390 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10391 	 */
   10392 	/* Set page 769 */
   10393 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10394 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10395 
   10396 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10397 }
   10398 
   10399 /*
   10400  * wm_gmii_hv_readreg:	[mii interface function]
   10401  *
   10402  *	Read a PHY register on the kumeran
   10403  * This could be handled by the PHY layer if we didn't have to lock the
   10404  * ressource ...
   10405  */
   10406 static int
   10407 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10408 {
   10409 	struct wm_softc *sc = device_private(dev);
   10410 	int rv;
   10411 
   10412 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10413 		device_xname(dev), __func__));
   10414 	if (sc->phy.acquire(sc)) {
   10415 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10416 		return 0;
   10417 	}
   10418 
   10419 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10420 	sc->phy.release(sc);
   10421 	return rv;
   10422 }
   10423 
   10424 static int
   10425 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10426 {
   10427 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10428 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10429 	uint16_t val;
   10430 	int rv;
   10431 
   10432 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10433 
   10434 	/* Page 800 works differently than the rest so it has its own func */
   10435 	if (page == BM_WUC_PAGE) {
   10436 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10437 		return val;
   10438 	}
   10439 
   10440 	/*
   10441 	 * Lower than page 768 works differently than the rest so it has its
   10442 	 * own func
   10443 	 */
   10444 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10445 		printf("gmii_hv_readreg!!!\n");
   10446 		return 0;
   10447 	}
   10448 
   10449 	/*
   10450 	 * XXX I21[789] documents say that the SMBus Address register is at
   10451 	 * PHY address 01, Page 0 (not 768), Register 26.
   10452 	 */
   10453 	if (page == HV_INTC_FC_PAGE_START)
   10454 		page = 0;
   10455 
   10456 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10457 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10458 		    page << BME1000_PAGE_SHIFT);
   10459 	}
   10460 
   10461 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10462 	return rv;
   10463 }
   10464 
   10465 /*
   10466  * wm_gmii_hv_writereg:	[mii interface function]
   10467  *
   10468  *	Write a PHY register on the kumeran.
   10469  * This could be handled by the PHY layer if we didn't have to lock the
   10470  * ressource ...
   10471  */
   10472 static void
   10473 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10474 {
   10475 	struct wm_softc *sc = device_private(dev);
   10476 
   10477 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10478 		device_xname(dev), __func__));
   10479 
   10480 	if (sc->phy.acquire(sc)) {
   10481 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10482 		return;
   10483 	}
   10484 
   10485 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10486 	sc->phy.release(sc);
   10487 }
   10488 
   10489 static void
   10490 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10491 {
   10492 	struct wm_softc *sc = device_private(dev);
   10493 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10494 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10495 
   10496 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10497 
   10498 	/* Page 800 works differently than the rest so it has its own func */
   10499 	if (page == BM_WUC_PAGE) {
   10500 		uint16_t tmp;
   10501 
   10502 		tmp = val;
   10503 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10504 		return;
   10505 	}
   10506 
   10507 	/*
   10508 	 * Lower than page 768 works differently than the rest so it has its
   10509 	 * own func
   10510 	 */
   10511 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10512 		printf("gmii_hv_writereg!!!\n");
   10513 		return;
   10514 	}
   10515 
   10516 	{
   10517 		/*
   10518 		 * XXX I21[789] documents say that the SMBus Address register
   10519 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10520 		 */
   10521 		if (page == HV_INTC_FC_PAGE_START)
   10522 			page = 0;
   10523 
   10524 		/*
   10525 		 * XXX Workaround MDIO accesses being disabled after entering
   10526 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10527 		 * register is set)
   10528 		 */
   10529 		if (sc->sc_phytype == WMPHY_82578) {
   10530 			struct mii_softc *child;
   10531 
   10532 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10533 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10534 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10535 			    && ((val & (1 << 11)) != 0)) {
   10536 				printf("XXX need workaround\n");
   10537 			}
   10538 		}
   10539 
   10540 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10541 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10542 			    page << BME1000_PAGE_SHIFT);
   10543 		}
   10544 	}
   10545 
   10546 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10547 }
   10548 
   10549 /*
   10550  * wm_gmii_82580_readreg:	[mii interface function]
   10551  *
   10552  *	Read a PHY register on the 82580 and I350.
   10553  * This could be handled by the PHY layer if we didn't have to lock the
   10554  * ressource ...
   10555  */
   10556 static int
   10557 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10558 {
   10559 	struct wm_softc *sc = device_private(dev);
   10560 	int rv;
   10561 
   10562 	if (sc->phy.acquire(sc) != 0) {
   10563 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10564 		return 0;
   10565 	}
   10566 
   10567 #ifdef DIAGNOSTIC
   10568 	if (reg > MII_ADDRMASK) {
   10569 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10570 		    __func__, sc->sc_phytype, reg);
   10571 		reg &= MII_ADDRMASK;
   10572 	}
   10573 #endif
   10574 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10575 
   10576 	sc->phy.release(sc);
   10577 	return rv;
   10578 }
   10579 
   10580 /*
   10581  * wm_gmii_82580_writereg:	[mii interface function]
   10582  *
   10583  *	Write a PHY register on the 82580 and I350.
   10584  * This could be handled by the PHY layer if we didn't have to lock the
   10585  * ressource ...
   10586  */
   10587 static void
   10588 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10589 {
   10590 	struct wm_softc *sc = device_private(dev);
   10591 
   10592 	if (sc->phy.acquire(sc) != 0) {
   10593 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10594 		return;
   10595 	}
   10596 
   10597 #ifdef DIAGNOSTIC
   10598 	if (reg > MII_ADDRMASK) {
   10599 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10600 		    __func__, sc->sc_phytype, reg);
   10601 		reg &= MII_ADDRMASK;
   10602 	}
   10603 #endif
   10604 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10605 
   10606 	sc->phy.release(sc);
   10607 }
   10608 
   10609 /*
   10610  * wm_gmii_gs40g_readreg:	[mii interface function]
   10611  *
   10612  *	Read a PHY register on the I2100 and I211.
   10613  * This could be handled by the PHY layer if we didn't have to lock the
   10614  * ressource ...
   10615  */
   10616 static int
   10617 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10618 {
   10619 	struct wm_softc *sc = device_private(dev);
   10620 	int page, offset;
   10621 	int rv;
   10622 
   10623 	/* Acquire semaphore */
   10624 	if (sc->phy.acquire(sc)) {
   10625 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10626 		return 0;
   10627 	}
   10628 
   10629 	/* Page select */
   10630 	page = reg >> GS40G_PAGE_SHIFT;
   10631 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10632 
   10633 	/* Read reg */
   10634 	offset = reg & GS40G_OFFSET_MASK;
   10635 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10636 
   10637 	sc->phy.release(sc);
   10638 	return rv;
   10639 }
   10640 
   10641 /*
   10642  * wm_gmii_gs40g_writereg:	[mii interface function]
   10643  *
   10644  *	Write a PHY register on the I210 and I211.
   10645  * This could be handled by the PHY layer if we didn't have to lock the
   10646  * ressource ...
   10647  */
   10648 static void
   10649 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10650 {
   10651 	struct wm_softc *sc = device_private(dev);
   10652 	int page, offset;
   10653 
   10654 	/* Acquire semaphore */
   10655 	if (sc->phy.acquire(sc)) {
   10656 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10657 		return;
   10658 	}
   10659 
   10660 	/* Page select */
   10661 	page = reg >> GS40G_PAGE_SHIFT;
   10662 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10663 
   10664 	/* Write reg */
   10665 	offset = reg & GS40G_OFFSET_MASK;
   10666 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10667 
   10668 	/* Release semaphore */
   10669 	sc->phy.release(sc);
   10670 }
   10671 
   10672 /*
   10673  * wm_gmii_statchg:	[mii interface function]
   10674  *
   10675  *	Callback from MII layer when media changes.
   10676  */
   10677 static void
   10678 wm_gmii_statchg(struct ifnet *ifp)
   10679 {
   10680 	struct wm_softc *sc = ifp->if_softc;
   10681 	struct mii_data *mii = &sc->sc_mii;
   10682 
   10683 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10684 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10685 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10686 
   10687 	/*
   10688 	 * Get flow control negotiation result.
   10689 	 */
   10690 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10691 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10692 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10693 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10694 	}
   10695 
   10696 	if (sc->sc_flowflags & IFM_FLOW) {
   10697 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10698 			sc->sc_ctrl |= CTRL_TFCE;
   10699 			sc->sc_fcrtl |= FCRTL_XONE;
   10700 		}
   10701 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10702 			sc->sc_ctrl |= CTRL_RFCE;
   10703 	}
   10704 
   10705 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10706 		DPRINTF(WM_DEBUG_LINK,
   10707 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10708 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10709 	} else {
   10710 		DPRINTF(WM_DEBUG_LINK,
   10711 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10712 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10713 	}
   10714 
   10715 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10716 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10717 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10718 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10719 	if (sc->sc_type == WM_T_80003) {
   10720 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10721 		case IFM_1000_T:
   10722 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10723 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10724 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10725 			break;
   10726 		default:
   10727 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10728 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10729 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10730 			break;
   10731 		}
   10732 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10733 	}
   10734 }
   10735 
   10736 /* kumeran related (80003, ICH* and PCH*) */
   10737 
   10738 /*
   10739  * wm_kmrn_readreg:
   10740  *
   10741  *	Read a kumeran register
   10742  */
   10743 static int
   10744 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10745 {
   10746 	int rv;
   10747 
   10748 	if (sc->sc_type == WM_T_80003)
   10749 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10750 	else
   10751 		rv = sc->phy.acquire(sc);
   10752 	if (rv != 0) {
   10753 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10754 		    __func__);
   10755 		return rv;
   10756 	}
   10757 
   10758 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10759 
   10760 	if (sc->sc_type == WM_T_80003)
   10761 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10762 	else
   10763 		sc->phy.release(sc);
   10764 
   10765 	return rv;
   10766 }
   10767 
   10768 static int
   10769 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10770 {
   10771 
   10772 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10773 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10774 	    KUMCTRLSTA_REN);
   10775 	CSR_WRITE_FLUSH(sc);
   10776 	delay(2);
   10777 
   10778 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10779 
   10780 	return 0;
   10781 }
   10782 
   10783 /*
   10784  * wm_kmrn_writereg:
   10785  *
   10786  *	Write a kumeran register
   10787  */
   10788 static int
   10789 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10790 {
   10791 	int rv;
   10792 
   10793 	if (sc->sc_type == WM_T_80003)
   10794 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10795 	else
   10796 		rv = sc->phy.acquire(sc);
   10797 	if (rv != 0) {
   10798 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10799 		    __func__);
   10800 		return rv;
   10801 	}
   10802 
   10803 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10804 
   10805 	if (sc->sc_type == WM_T_80003)
   10806 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10807 	else
   10808 		sc->phy.release(sc);
   10809 
   10810 	return rv;
   10811 }
   10812 
   10813 static int
   10814 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10815 {
   10816 
   10817 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10818 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10819 
   10820 	return 0;
   10821 }
   10822 
   10823 /* SGMII related */
   10824 
   10825 /*
   10826  * wm_sgmii_uses_mdio
   10827  *
   10828  * Check whether the transaction is to the internal PHY or the external
   10829  * MDIO interface. Return true if it's MDIO.
   10830  */
   10831 static bool
   10832 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10833 {
   10834 	uint32_t reg;
   10835 	bool ismdio = false;
   10836 
   10837 	switch (sc->sc_type) {
   10838 	case WM_T_82575:
   10839 	case WM_T_82576:
   10840 		reg = CSR_READ(sc, WMREG_MDIC);
   10841 		ismdio = ((reg & MDIC_DEST) != 0);
   10842 		break;
   10843 	case WM_T_82580:
   10844 	case WM_T_I350:
   10845 	case WM_T_I354:
   10846 	case WM_T_I210:
   10847 	case WM_T_I211:
   10848 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10849 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10850 		break;
   10851 	default:
   10852 		break;
   10853 	}
   10854 
   10855 	return ismdio;
   10856 }
   10857 
   10858 /*
   10859  * wm_sgmii_readreg:	[mii interface function]
   10860  *
   10861  *	Read a PHY register on the SGMII
   10862  * This could be handled by the PHY layer if we didn't have to lock the
   10863  * ressource ...
   10864  */
   10865 static int
   10866 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10867 {
   10868 	struct wm_softc *sc = device_private(dev);
   10869 	uint32_t i2ccmd;
   10870 	int i, rv;
   10871 
   10872 	if (sc->phy.acquire(sc)) {
   10873 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10874 		return 0;
   10875 	}
   10876 
   10877 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10878 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10879 	    | I2CCMD_OPCODE_READ;
   10880 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10881 
   10882 	/* Poll the ready bit */
   10883 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10884 		delay(50);
   10885 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10886 		if (i2ccmd & I2CCMD_READY)
   10887 			break;
   10888 	}
   10889 	if ((i2ccmd & I2CCMD_READY) == 0)
   10890 		device_printf(dev, "I2CCMD Read did not complete\n");
   10891 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10892 		device_printf(dev, "I2CCMD Error bit set\n");
   10893 
   10894 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10895 
   10896 	sc->phy.release(sc);
   10897 	return rv;
   10898 }
   10899 
   10900 /*
   10901  * wm_sgmii_writereg:	[mii interface function]
   10902  *
   10903  *	Write a PHY register on the SGMII.
   10904  * This could be handled by the PHY layer if we didn't have to lock the
   10905  * ressource ...
   10906  */
   10907 static void
   10908 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10909 {
   10910 	struct wm_softc *sc = device_private(dev);
   10911 	uint32_t i2ccmd;
   10912 	int i;
   10913 	int val_swapped;
   10914 
   10915 	if (sc->phy.acquire(sc) != 0) {
   10916 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10917 		return;
   10918 	}
   10919 	/* Swap the data bytes for the I2C interface */
   10920 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10921 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10922 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10923 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10924 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10925 
   10926 	/* Poll the ready bit */
   10927 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10928 		delay(50);
   10929 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10930 		if (i2ccmd & I2CCMD_READY)
   10931 			break;
   10932 	}
   10933 	if ((i2ccmd & I2CCMD_READY) == 0)
   10934 		device_printf(dev, "I2CCMD Write did not complete\n");
   10935 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10936 		device_printf(dev, "I2CCMD Error bit set\n");
   10937 
   10938 	sc->phy.release(sc);
   10939 }
   10940 
   10941 /* TBI related */
   10942 
   10943 /*
   10944  * wm_tbi_mediainit:
   10945  *
   10946  *	Initialize media for use on 1000BASE-X devices.
   10947  */
   10948 static void
   10949 wm_tbi_mediainit(struct wm_softc *sc)
   10950 {
   10951 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10952 	const char *sep = "";
   10953 
   10954 	if (sc->sc_type < WM_T_82543)
   10955 		sc->sc_tipg = TIPG_WM_DFLT;
   10956 	else
   10957 		sc->sc_tipg = TIPG_LG_DFLT;
   10958 
   10959 	sc->sc_tbi_serdes_anegticks = 5;
   10960 
   10961 	/* Initialize our media structures */
   10962 	sc->sc_mii.mii_ifp = ifp;
   10963 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10964 
   10965 	if ((sc->sc_type >= WM_T_82575)
   10966 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10967 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10968 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10969 	else
   10970 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10971 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10972 
   10973 	/*
   10974 	 * SWD Pins:
   10975 	 *
   10976 	 *	0 = Link LED (output)
   10977 	 *	1 = Loss Of Signal (input)
   10978 	 */
   10979 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10980 
   10981 	/* XXX Perhaps this is only for TBI */
   10982 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10983 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10984 
   10985 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10986 		sc->sc_ctrl &= ~CTRL_LRST;
   10987 
   10988 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10989 
   10990 #define	ADD(ss, mm, dd)							\
   10991 do {									\
   10992 	aprint_normal("%s%s", sep, ss);					\
   10993 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10994 	sep = ", ";							\
   10995 } while (/*CONSTCOND*/0)
   10996 
   10997 	aprint_normal_dev(sc->sc_dev, "");
   10998 
   10999 	if (sc->sc_type == WM_T_I354) {
   11000 		uint32_t status;
   11001 
   11002 		status = CSR_READ(sc, WMREG_STATUS);
   11003 		if (((status & STATUS_2P5_SKU) != 0)
   11004 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11005 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11006 		} else
   11007 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11008 	} else if (sc->sc_type == WM_T_82545) {
   11009 		/* Only 82545 is LX (XXX except SFP) */
   11010 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11011 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11012 	} else {
   11013 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11014 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11015 	}
   11016 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11017 	aprint_normal("\n");
   11018 
   11019 #undef ADD
   11020 
   11021 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11022 }
   11023 
   11024 /*
   11025  * wm_tbi_mediachange:	[ifmedia interface function]
   11026  *
   11027  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11028  */
   11029 static int
   11030 wm_tbi_mediachange(struct ifnet *ifp)
   11031 {
   11032 	struct wm_softc *sc = ifp->if_softc;
   11033 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11034 	uint32_t status;
   11035 	int i;
   11036 
   11037 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11038 		/* XXX need some work for >= 82571 and < 82575 */
   11039 		if (sc->sc_type < WM_T_82575)
   11040 			return 0;
   11041 	}
   11042 
   11043 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11044 	    || (sc->sc_type >= WM_T_82575))
   11045 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11046 
   11047 	sc->sc_ctrl &= ~CTRL_LRST;
   11048 	sc->sc_txcw = TXCW_ANE;
   11049 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11050 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11051 	else if (ife->ifm_media & IFM_FDX)
   11052 		sc->sc_txcw |= TXCW_FD;
   11053 	else
   11054 		sc->sc_txcw |= TXCW_HD;
   11055 
   11056 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11057 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11058 
   11059 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11060 		    device_xname(sc->sc_dev), sc->sc_txcw));
   11061 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11062 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11063 	CSR_WRITE_FLUSH(sc);
   11064 	delay(1000);
   11065 
   11066 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   11067 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   11068 
   11069 	/*
   11070 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   11071 	 * optics detect a signal, 0 if they don't.
   11072 	 */
   11073 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   11074 		/* Have signal; wait for the link to come up. */
   11075 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11076 			delay(10000);
   11077 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11078 				break;
   11079 		}
   11080 
   11081 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11082 			    device_xname(sc->sc_dev),i));
   11083 
   11084 		status = CSR_READ(sc, WMREG_STATUS);
   11085 		DPRINTF(WM_DEBUG_LINK,
   11086 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11087 			device_xname(sc->sc_dev),status, STATUS_LU));
   11088 		if (status & STATUS_LU) {
   11089 			/* Link is up. */
   11090 			DPRINTF(WM_DEBUG_LINK,
   11091 			    ("%s: LINK: set media -> link up %s\n",
   11092 			    device_xname(sc->sc_dev),
   11093 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   11094 
   11095 			/*
   11096 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11097 			 * so we should update sc->sc_ctrl
   11098 			 */
   11099 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11100 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11101 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11102 			if (status & STATUS_FD)
   11103 				sc->sc_tctl |=
   11104 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11105 			else
   11106 				sc->sc_tctl |=
   11107 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11108 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11109 				sc->sc_fcrtl |= FCRTL_XONE;
   11110 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11111 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11112 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   11113 				      sc->sc_fcrtl);
   11114 			sc->sc_tbi_linkup = 1;
   11115 		} else {
   11116 			if (i == WM_LINKUP_TIMEOUT)
   11117 				wm_check_for_link(sc);
   11118 			/* Link is down. */
   11119 			DPRINTF(WM_DEBUG_LINK,
   11120 			    ("%s: LINK: set media -> link down\n",
   11121 			    device_xname(sc->sc_dev)));
   11122 			sc->sc_tbi_linkup = 0;
   11123 		}
   11124 	} else {
   11125 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11126 		    device_xname(sc->sc_dev)));
   11127 		sc->sc_tbi_linkup = 0;
   11128 	}
   11129 
   11130 	wm_tbi_serdes_set_linkled(sc);
   11131 
   11132 	return 0;
   11133 }
   11134 
   11135 /*
   11136  * wm_tbi_mediastatus:	[ifmedia interface function]
   11137  *
   11138  *	Get the current interface media status on a 1000BASE-X device.
   11139  */
   11140 static void
   11141 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11142 {
   11143 	struct wm_softc *sc = ifp->if_softc;
   11144 	uint32_t ctrl, status;
   11145 
   11146 	ifmr->ifm_status = IFM_AVALID;
   11147 	ifmr->ifm_active = IFM_ETHER;
   11148 
   11149 	status = CSR_READ(sc, WMREG_STATUS);
   11150 	if ((status & STATUS_LU) == 0) {
   11151 		ifmr->ifm_active |= IFM_NONE;
   11152 		return;
   11153 	}
   11154 
   11155 	ifmr->ifm_status |= IFM_ACTIVE;
   11156 	/* Only 82545 is LX */
   11157 	if (sc->sc_type == WM_T_82545)
   11158 		ifmr->ifm_active |= IFM_1000_LX;
   11159 	else
   11160 		ifmr->ifm_active |= IFM_1000_SX;
   11161 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11162 		ifmr->ifm_active |= IFM_FDX;
   11163 	else
   11164 		ifmr->ifm_active |= IFM_HDX;
   11165 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11166 	if (ctrl & CTRL_RFCE)
   11167 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11168 	if (ctrl & CTRL_TFCE)
   11169 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11170 }
   11171 
   11172 /* XXX TBI only */
   11173 static int
   11174 wm_check_for_link(struct wm_softc *sc)
   11175 {
   11176 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11177 	uint32_t rxcw;
   11178 	uint32_t ctrl;
   11179 	uint32_t status;
   11180 	uint32_t sig;
   11181 
   11182 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11183 		/* XXX need some work for >= 82571 */
   11184 		if (sc->sc_type >= WM_T_82571) {
   11185 			sc->sc_tbi_linkup = 1;
   11186 			return 0;
   11187 		}
   11188 	}
   11189 
   11190 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11191 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11192 	status = CSR_READ(sc, WMREG_STATUS);
   11193 
   11194 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11195 
   11196 	DPRINTF(WM_DEBUG_LINK,
   11197 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11198 		device_xname(sc->sc_dev), __func__,
   11199 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11200 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11201 
   11202 	/*
   11203 	 * SWDPIN   LU RXCW
   11204 	 *      0    0    0
   11205 	 *      0    0    1	(should not happen)
   11206 	 *      0    1    0	(should not happen)
   11207 	 *      0    1    1	(should not happen)
   11208 	 *      1    0    0	Disable autonego and force linkup
   11209 	 *      1    0    1	got /C/ but not linkup yet
   11210 	 *      1    1    0	(linkup)
   11211 	 *      1    1    1	If IFM_AUTO, back to autonego
   11212 	 *
   11213 	 */
   11214 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11215 	    && ((status & STATUS_LU) == 0)
   11216 	    && ((rxcw & RXCW_C) == 0)) {
   11217 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11218 			__func__));
   11219 		sc->sc_tbi_linkup = 0;
   11220 		/* Disable auto-negotiation in the TXCW register */
   11221 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11222 
   11223 		/*
   11224 		 * Force link-up and also force full-duplex.
   11225 		 *
   11226 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11227 		 * so we should update sc->sc_ctrl
   11228 		 */
   11229 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11230 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11231 	} else if (((status & STATUS_LU) != 0)
   11232 	    && ((rxcw & RXCW_C) != 0)
   11233 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11234 		sc->sc_tbi_linkup = 1;
   11235 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11236 			__func__));
   11237 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11238 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11239 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11240 	    && ((rxcw & RXCW_C) != 0)) {
   11241 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11242 	} else {
   11243 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11244 			status));
   11245 	}
   11246 
   11247 	return 0;
   11248 }
   11249 
   11250 /*
   11251  * wm_tbi_tick:
   11252  *
   11253  *	Check the link on TBI devices.
   11254  *	This function acts as mii_tick().
   11255  */
   11256 static void
   11257 wm_tbi_tick(struct wm_softc *sc)
   11258 {
   11259 	struct mii_data *mii = &sc->sc_mii;
   11260 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11261 	uint32_t status;
   11262 
   11263 	KASSERT(WM_CORE_LOCKED(sc));
   11264 
   11265 	status = CSR_READ(sc, WMREG_STATUS);
   11266 
   11267 	/* XXX is this needed? */
   11268 	(void)CSR_READ(sc, WMREG_RXCW);
   11269 	(void)CSR_READ(sc, WMREG_CTRL);
   11270 
   11271 	/* set link status */
   11272 	if ((status & STATUS_LU) == 0) {
   11273 		DPRINTF(WM_DEBUG_LINK,
   11274 		    ("%s: LINK: checklink -> down\n",
   11275 			device_xname(sc->sc_dev)));
   11276 		sc->sc_tbi_linkup = 0;
   11277 	} else if (sc->sc_tbi_linkup == 0) {
   11278 		DPRINTF(WM_DEBUG_LINK,
   11279 		    ("%s: LINK: checklink -> up %s\n",
   11280 			device_xname(sc->sc_dev),
   11281 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11282 		sc->sc_tbi_linkup = 1;
   11283 		sc->sc_tbi_serdes_ticks = 0;
   11284 	}
   11285 
   11286 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11287 		goto setled;
   11288 
   11289 	if ((status & STATUS_LU) == 0) {
   11290 		sc->sc_tbi_linkup = 0;
   11291 		/* If the timer expired, retry autonegotiation */
   11292 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11293 		    && (++sc->sc_tbi_serdes_ticks
   11294 			>= sc->sc_tbi_serdes_anegticks)) {
   11295 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11296 			sc->sc_tbi_serdes_ticks = 0;
   11297 			/*
   11298 			 * Reset the link, and let autonegotiation do
   11299 			 * its thing
   11300 			 */
   11301 			sc->sc_ctrl |= CTRL_LRST;
   11302 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11303 			CSR_WRITE_FLUSH(sc);
   11304 			delay(1000);
   11305 			sc->sc_ctrl &= ~CTRL_LRST;
   11306 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11307 			CSR_WRITE_FLUSH(sc);
   11308 			delay(1000);
   11309 			CSR_WRITE(sc, WMREG_TXCW,
   11310 			    sc->sc_txcw & ~TXCW_ANE);
   11311 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11312 		}
   11313 	}
   11314 
   11315 setled:
   11316 	wm_tbi_serdes_set_linkled(sc);
   11317 }
   11318 
   11319 /* SERDES related */
   11320 static void
   11321 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11322 {
   11323 	uint32_t reg;
   11324 
   11325 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11326 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11327 		return;
   11328 
   11329 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11330 	reg |= PCS_CFG_PCS_EN;
   11331 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11332 
   11333 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11334 	reg &= ~CTRL_EXT_SWDPIN(3);
   11335 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11336 	CSR_WRITE_FLUSH(sc);
   11337 }
   11338 
   11339 static int
   11340 wm_serdes_mediachange(struct ifnet *ifp)
   11341 {
   11342 	struct wm_softc *sc = ifp->if_softc;
   11343 	bool pcs_autoneg = true; /* XXX */
   11344 	uint32_t ctrl_ext, pcs_lctl, reg;
   11345 
   11346 	/* XXX Currently, this function is not called on 8257[12] */
   11347 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11348 	    || (sc->sc_type >= WM_T_82575))
   11349 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11350 
   11351 	wm_serdes_power_up_link_82575(sc);
   11352 
   11353 	sc->sc_ctrl |= CTRL_SLU;
   11354 
   11355 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11356 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11357 
   11358 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11359 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11360 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11361 	case CTRL_EXT_LINK_MODE_SGMII:
   11362 		pcs_autoneg = true;
   11363 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11364 		break;
   11365 	case CTRL_EXT_LINK_MODE_1000KX:
   11366 		pcs_autoneg = false;
   11367 		/* FALLTHROUGH */
   11368 	default:
   11369 		if ((sc->sc_type == WM_T_82575)
   11370 		    || (sc->sc_type == WM_T_82576)) {
   11371 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11372 				pcs_autoneg = false;
   11373 		}
   11374 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11375 		    | CTRL_FRCFDX;
   11376 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11377 	}
   11378 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11379 
   11380 	if (pcs_autoneg) {
   11381 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11382 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11383 
   11384 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11385 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11386 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11387 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11388 	} else
   11389 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11390 
   11391 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11392 
   11393 
   11394 	return 0;
   11395 }
   11396 
   11397 static void
   11398 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11399 {
   11400 	struct wm_softc *sc = ifp->if_softc;
   11401 	struct mii_data *mii = &sc->sc_mii;
   11402 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11403 	uint32_t pcs_adv, pcs_lpab, reg;
   11404 
   11405 	ifmr->ifm_status = IFM_AVALID;
   11406 	ifmr->ifm_active = IFM_ETHER;
   11407 
   11408 	/* Check PCS */
   11409 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11410 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11411 		ifmr->ifm_active |= IFM_NONE;
   11412 		sc->sc_tbi_linkup = 0;
   11413 		goto setled;
   11414 	}
   11415 
   11416 	sc->sc_tbi_linkup = 1;
   11417 	ifmr->ifm_status |= IFM_ACTIVE;
   11418 	if (sc->sc_type == WM_T_I354) {
   11419 		uint32_t status;
   11420 
   11421 		status = CSR_READ(sc, WMREG_STATUS);
   11422 		if (((status & STATUS_2P5_SKU) != 0)
   11423 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11424 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11425 		} else
   11426 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11427 	} else {
   11428 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11429 		case PCS_LSTS_SPEED_10:
   11430 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11431 			break;
   11432 		case PCS_LSTS_SPEED_100:
   11433 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11434 			break;
   11435 		case PCS_LSTS_SPEED_1000:
   11436 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11437 			break;
   11438 		default:
   11439 			device_printf(sc->sc_dev, "Unknown speed\n");
   11440 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11441 			break;
   11442 		}
   11443 	}
   11444 	if ((reg & PCS_LSTS_FDX) != 0)
   11445 		ifmr->ifm_active |= IFM_FDX;
   11446 	else
   11447 		ifmr->ifm_active |= IFM_HDX;
   11448 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11449 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11450 		/* Check flow */
   11451 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11452 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11453 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11454 			goto setled;
   11455 		}
   11456 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11457 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11458 		DPRINTF(WM_DEBUG_LINK,
   11459 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11460 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11461 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11462 			mii->mii_media_active |= IFM_FLOW
   11463 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11464 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11465 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11466 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11467 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11468 			mii->mii_media_active |= IFM_FLOW
   11469 			    | IFM_ETH_TXPAUSE;
   11470 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11471 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11472 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11473 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11474 			mii->mii_media_active |= IFM_FLOW
   11475 			    | IFM_ETH_RXPAUSE;
   11476 		}
   11477 	}
   11478 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11479 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11480 setled:
   11481 	wm_tbi_serdes_set_linkled(sc);
   11482 }
   11483 
   11484 /*
   11485  * wm_serdes_tick:
   11486  *
   11487  *	Check the link on serdes devices.
   11488  */
   11489 static void
   11490 wm_serdes_tick(struct wm_softc *sc)
   11491 {
   11492 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11493 	struct mii_data *mii = &sc->sc_mii;
   11494 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11495 	uint32_t reg;
   11496 
   11497 	KASSERT(WM_CORE_LOCKED(sc));
   11498 
   11499 	mii->mii_media_status = IFM_AVALID;
   11500 	mii->mii_media_active = IFM_ETHER;
   11501 
   11502 	/* Check PCS */
   11503 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11504 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11505 		mii->mii_media_status |= IFM_ACTIVE;
   11506 		sc->sc_tbi_linkup = 1;
   11507 		sc->sc_tbi_serdes_ticks = 0;
   11508 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11509 		if ((reg & PCS_LSTS_FDX) != 0)
   11510 			mii->mii_media_active |= IFM_FDX;
   11511 		else
   11512 			mii->mii_media_active |= IFM_HDX;
   11513 	} else {
   11514 		mii->mii_media_status |= IFM_NONE;
   11515 		sc->sc_tbi_linkup = 0;
   11516 		/* If the timer expired, retry autonegotiation */
   11517 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11518 		    && (++sc->sc_tbi_serdes_ticks
   11519 			>= sc->sc_tbi_serdes_anegticks)) {
   11520 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11521 			sc->sc_tbi_serdes_ticks = 0;
   11522 			/* XXX */
   11523 			wm_serdes_mediachange(ifp);
   11524 		}
   11525 	}
   11526 
   11527 	wm_tbi_serdes_set_linkled(sc);
   11528 }
   11529 
   11530 /* SFP related */
   11531 
   11532 static int
   11533 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11534 {
   11535 	uint32_t i2ccmd;
   11536 	int i;
   11537 
   11538 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11539 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11540 
   11541 	/* Poll the ready bit */
   11542 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11543 		delay(50);
   11544 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11545 		if (i2ccmd & I2CCMD_READY)
   11546 			break;
   11547 	}
   11548 	if ((i2ccmd & I2CCMD_READY) == 0)
   11549 		return -1;
   11550 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11551 		return -1;
   11552 
   11553 	*data = i2ccmd & 0x00ff;
   11554 
   11555 	return 0;
   11556 }
   11557 
   11558 static uint32_t
   11559 wm_sfp_get_media_type(struct wm_softc *sc)
   11560 {
   11561 	uint32_t ctrl_ext;
   11562 	uint8_t val = 0;
   11563 	int timeout = 3;
   11564 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11565 	int rv = -1;
   11566 
   11567 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11568 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11569 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11570 	CSR_WRITE_FLUSH(sc);
   11571 
   11572 	/* Read SFP module data */
   11573 	while (timeout) {
   11574 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11575 		if (rv == 0)
   11576 			break;
   11577 		delay(100*1000); /* XXX too big */
   11578 		timeout--;
   11579 	}
   11580 	if (rv != 0)
   11581 		goto out;
   11582 	switch (val) {
   11583 	case SFF_SFP_ID_SFF:
   11584 		aprint_normal_dev(sc->sc_dev,
   11585 		    "Module/Connector soldered to board\n");
   11586 		break;
   11587 	case SFF_SFP_ID_SFP:
   11588 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11589 		break;
   11590 	case SFF_SFP_ID_UNKNOWN:
   11591 		goto out;
   11592 	default:
   11593 		break;
   11594 	}
   11595 
   11596 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11597 	if (rv != 0) {
   11598 		goto out;
   11599 	}
   11600 
   11601 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11602 		mediatype = WM_MEDIATYPE_SERDES;
   11603 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11604 		sc->sc_flags |= WM_F_SGMII;
   11605 		mediatype = WM_MEDIATYPE_COPPER;
   11606 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11607 		sc->sc_flags |= WM_F_SGMII;
   11608 		mediatype = WM_MEDIATYPE_SERDES;
   11609 	}
   11610 
   11611 out:
   11612 	/* Restore I2C interface setting */
   11613 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11614 
   11615 	return mediatype;
   11616 }
   11617 
   11618 /*
   11619  * NVM related.
   11620  * Microwire, SPI (w/wo EERD) and Flash.
   11621  */
   11622 
   11623 /* Both spi and uwire */
   11624 
   11625 /*
   11626  * wm_eeprom_sendbits:
   11627  *
   11628  *	Send a series of bits to the EEPROM.
   11629  */
   11630 static void
   11631 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11632 {
   11633 	uint32_t reg;
   11634 	int x;
   11635 
   11636 	reg = CSR_READ(sc, WMREG_EECD);
   11637 
   11638 	for (x = nbits; x > 0; x--) {
   11639 		if (bits & (1U << (x - 1)))
   11640 			reg |= EECD_DI;
   11641 		else
   11642 			reg &= ~EECD_DI;
   11643 		CSR_WRITE(sc, WMREG_EECD, reg);
   11644 		CSR_WRITE_FLUSH(sc);
   11645 		delay(2);
   11646 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11647 		CSR_WRITE_FLUSH(sc);
   11648 		delay(2);
   11649 		CSR_WRITE(sc, WMREG_EECD, reg);
   11650 		CSR_WRITE_FLUSH(sc);
   11651 		delay(2);
   11652 	}
   11653 }
   11654 
   11655 /*
   11656  * wm_eeprom_recvbits:
   11657  *
   11658  *	Receive a series of bits from the EEPROM.
   11659  */
   11660 static void
   11661 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11662 {
   11663 	uint32_t reg, val;
   11664 	int x;
   11665 
   11666 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11667 
   11668 	val = 0;
   11669 	for (x = nbits; x > 0; x--) {
   11670 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11671 		CSR_WRITE_FLUSH(sc);
   11672 		delay(2);
   11673 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11674 			val |= (1U << (x - 1));
   11675 		CSR_WRITE(sc, WMREG_EECD, reg);
   11676 		CSR_WRITE_FLUSH(sc);
   11677 		delay(2);
   11678 	}
   11679 	*valp = val;
   11680 }
   11681 
   11682 /* Microwire */
   11683 
   11684 /*
   11685  * wm_nvm_read_uwire:
   11686  *
   11687  *	Read a word from the EEPROM using the MicroWire protocol.
   11688  */
   11689 static int
   11690 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11691 {
   11692 	uint32_t reg, val;
   11693 	int i;
   11694 
   11695 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11696 		device_xname(sc->sc_dev), __func__));
   11697 
   11698 	if (sc->nvm.acquire(sc) != 0)
   11699 		return -1;
   11700 
   11701 	for (i = 0; i < wordcnt; i++) {
   11702 		/* Clear SK and DI. */
   11703 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11704 		CSR_WRITE(sc, WMREG_EECD, reg);
   11705 
   11706 		/*
   11707 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11708 		 * and Xen.
   11709 		 *
   11710 		 * We use this workaround only for 82540 because qemu's
   11711 		 * e1000 act as 82540.
   11712 		 */
   11713 		if (sc->sc_type == WM_T_82540) {
   11714 			reg |= EECD_SK;
   11715 			CSR_WRITE(sc, WMREG_EECD, reg);
   11716 			reg &= ~EECD_SK;
   11717 			CSR_WRITE(sc, WMREG_EECD, reg);
   11718 			CSR_WRITE_FLUSH(sc);
   11719 			delay(2);
   11720 		}
   11721 		/* XXX: end of workaround */
   11722 
   11723 		/* Set CHIP SELECT. */
   11724 		reg |= EECD_CS;
   11725 		CSR_WRITE(sc, WMREG_EECD, reg);
   11726 		CSR_WRITE_FLUSH(sc);
   11727 		delay(2);
   11728 
   11729 		/* Shift in the READ command. */
   11730 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11731 
   11732 		/* Shift in address. */
   11733 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11734 
   11735 		/* Shift out the data. */
   11736 		wm_eeprom_recvbits(sc, &val, 16);
   11737 		data[i] = val & 0xffff;
   11738 
   11739 		/* Clear CHIP SELECT. */
   11740 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11741 		CSR_WRITE(sc, WMREG_EECD, reg);
   11742 		CSR_WRITE_FLUSH(sc);
   11743 		delay(2);
   11744 	}
   11745 
   11746 	sc->nvm.release(sc);
   11747 	return 0;
   11748 }
   11749 
   11750 /* SPI */
   11751 
   11752 /*
   11753  * Set SPI and FLASH related information from the EECD register.
   11754  * For 82541 and 82547, the word size is taken from EEPROM.
   11755  */
   11756 static int
   11757 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11758 {
   11759 	int size;
   11760 	uint32_t reg;
   11761 	uint16_t data;
   11762 
   11763 	reg = CSR_READ(sc, WMREG_EECD);
   11764 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11765 
   11766 	/* Read the size of NVM from EECD by default */
   11767 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11768 	switch (sc->sc_type) {
   11769 	case WM_T_82541:
   11770 	case WM_T_82541_2:
   11771 	case WM_T_82547:
   11772 	case WM_T_82547_2:
   11773 		/* Set dummy value to access EEPROM */
   11774 		sc->sc_nvm_wordsize = 64;
   11775 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11776 			aprint_error_dev(sc->sc_dev,
   11777 			    "%s: failed to read EEPROM size\n", __func__);
   11778 		}
   11779 		reg = data;
   11780 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11781 		if (size == 0)
   11782 			size = 6; /* 64 word size */
   11783 		else
   11784 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11785 		break;
   11786 	case WM_T_80003:
   11787 	case WM_T_82571:
   11788 	case WM_T_82572:
   11789 	case WM_T_82573: /* SPI case */
   11790 	case WM_T_82574: /* SPI case */
   11791 	case WM_T_82583: /* SPI case */
   11792 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11793 		if (size > 14)
   11794 			size = 14;
   11795 		break;
   11796 	case WM_T_82575:
   11797 	case WM_T_82576:
   11798 	case WM_T_82580:
   11799 	case WM_T_I350:
   11800 	case WM_T_I354:
   11801 	case WM_T_I210:
   11802 	case WM_T_I211:
   11803 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11804 		if (size > 15)
   11805 			size = 15;
   11806 		break;
   11807 	default:
   11808 		aprint_error_dev(sc->sc_dev,
   11809 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11810 		return -1;
   11811 		break;
   11812 	}
   11813 
   11814 	sc->sc_nvm_wordsize = 1 << size;
   11815 
   11816 	return 0;
   11817 }
   11818 
   11819 /*
   11820  * wm_nvm_ready_spi:
   11821  *
   11822  *	Wait for a SPI EEPROM to be ready for commands.
   11823  */
   11824 static int
   11825 wm_nvm_ready_spi(struct wm_softc *sc)
   11826 {
   11827 	uint32_t val;
   11828 	int usec;
   11829 
   11830 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11831 		device_xname(sc->sc_dev), __func__));
   11832 
   11833 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11834 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11835 		wm_eeprom_recvbits(sc, &val, 8);
   11836 		if ((val & SPI_SR_RDY) == 0)
   11837 			break;
   11838 	}
   11839 	if (usec >= SPI_MAX_RETRIES) {
   11840 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11841 		return -1;
   11842 	}
   11843 	return 0;
   11844 }
   11845 
   11846 /*
   11847  * wm_nvm_read_spi:
   11848  *
   11849  *	Read a work from the EEPROM using the SPI protocol.
   11850  */
   11851 static int
   11852 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11853 {
   11854 	uint32_t reg, val;
   11855 	int i;
   11856 	uint8_t opc;
   11857 	int rv = 0;
   11858 
   11859 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11860 		device_xname(sc->sc_dev), __func__));
   11861 
   11862 	if (sc->nvm.acquire(sc) != 0)
   11863 		return -1;
   11864 
   11865 	/* Clear SK and CS. */
   11866 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11867 	CSR_WRITE(sc, WMREG_EECD, reg);
   11868 	CSR_WRITE_FLUSH(sc);
   11869 	delay(2);
   11870 
   11871 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11872 		goto out;
   11873 
   11874 	/* Toggle CS to flush commands. */
   11875 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11876 	CSR_WRITE_FLUSH(sc);
   11877 	delay(2);
   11878 	CSR_WRITE(sc, WMREG_EECD, reg);
   11879 	CSR_WRITE_FLUSH(sc);
   11880 	delay(2);
   11881 
   11882 	opc = SPI_OPC_READ;
   11883 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11884 		opc |= SPI_OPC_A8;
   11885 
   11886 	wm_eeprom_sendbits(sc, opc, 8);
   11887 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11888 
   11889 	for (i = 0; i < wordcnt; i++) {
   11890 		wm_eeprom_recvbits(sc, &val, 16);
   11891 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11892 	}
   11893 
   11894 	/* Raise CS and clear SK. */
   11895 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11896 	CSR_WRITE(sc, WMREG_EECD, reg);
   11897 	CSR_WRITE_FLUSH(sc);
   11898 	delay(2);
   11899 
   11900 out:
   11901 	sc->nvm.release(sc);
   11902 	return rv;
   11903 }
   11904 
   11905 /* Using with EERD */
   11906 
   11907 static int
   11908 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11909 {
   11910 	uint32_t attempts = 100000;
   11911 	uint32_t i, reg = 0;
   11912 	int32_t done = -1;
   11913 
   11914 	for (i = 0; i < attempts; i++) {
   11915 		reg = CSR_READ(sc, rw);
   11916 
   11917 		if (reg & EERD_DONE) {
   11918 			done = 0;
   11919 			break;
   11920 		}
   11921 		delay(5);
   11922 	}
   11923 
   11924 	return done;
   11925 }
   11926 
   11927 static int
   11928 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11929     uint16_t *data)
   11930 {
   11931 	int i, eerd = 0;
   11932 	int rv = 0;
   11933 
   11934 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11935 		device_xname(sc->sc_dev), __func__));
   11936 
   11937 	if (sc->nvm.acquire(sc) != 0)
   11938 		return -1;
   11939 
   11940 	for (i = 0; i < wordcnt; i++) {
   11941 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11942 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11943 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11944 		if (rv != 0) {
   11945 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   11946 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   11947 			break;
   11948 		}
   11949 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11950 	}
   11951 
   11952 	sc->nvm.release(sc);
   11953 	return rv;
   11954 }
   11955 
   11956 /* Flash */
   11957 
   11958 static int
   11959 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11960 {
   11961 	uint32_t eecd;
   11962 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11963 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11964 	uint8_t sig_byte = 0;
   11965 
   11966 	switch (sc->sc_type) {
   11967 	case WM_T_PCH_SPT:
   11968 		/*
   11969 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11970 		 * sector valid bits from the NVM.
   11971 		 */
   11972 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11973 		if ((*bank == 0) || (*bank == 1)) {
   11974 			aprint_error_dev(sc->sc_dev,
   11975 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11976 				*bank);
   11977 			return -1;
   11978 		} else {
   11979 			*bank = *bank - 2;
   11980 			return 0;
   11981 		}
   11982 	case WM_T_ICH8:
   11983 	case WM_T_ICH9:
   11984 		eecd = CSR_READ(sc, WMREG_EECD);
   11985 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11986 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11987 			return 0;
   11988 		}
   11989 		/* FALLTHROUGH */
   11990 	default:
   11991 		/* Default to 0 */
   11992 		*bank = 0;
   11993 
   11994 		/* Check bank 0 */
   11995 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11996 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11997 			*bank = 0;
   11998 			return 0;
   11999 		}
   12000 
   12001 		/* Check bank 1 */
   12002 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12003 		    &sig_byte);
   12004 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12005 			*bank = 1;
   12006 			return 0;
   12007 		}
   12008 	}
   12009 
   12010 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12011 		device_xname(sc->sc_dev)));
   12012 	return -1;
   12013 }
   12014 
   12015 /******************************************************************************
   12016  * This function does initial flash setup so that a new read/write/erase cycle
   12017  * can be started.
   12018  *
   12019  * sc - The pointer to the hw structure
   12020  ****************************************************************************/
   12021 static int32_t
   12022 wm_ich8_cycle_init(struct wm_softc *sc)
   12023 {
   12024 	uint16_t hsfsts;
   12025 	int32_t error = 1;
   12026 	int32_t i     = 0;
   12027 
   12028 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12029 
   12030 	/* May be check the Flash Des Valid bit in Hw status */
   12031 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   12032 		return error;
   12033 	}
   12034 
   12035 	/* Clear FCERR in Hw status by writing 1 */
   12036 	/* Clear DAEL in Hw status by writing a 1 */
   12037 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12038 
   12039 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12040 
   12041 	/*
   12042 	 * Either we should have a hardware SPI cycle in progress bit to check
   12043 	 * against, in order to start a new cycle or FDONE bit should be
   12044 	 * changed in the hardware so that it is 1 after harware reset, which
   12045 	 * can then be used as an indication whether a cycle is in progress or
   12046 	 * has been completed .. we should also have some software semaphore
   12047 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12048 	 * threads access to those bits can be sequentiallized or a way so that
   12049 	 * 2 threads dont start the cycle at the same time
   12050 	 */
   12051 
   12052 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12053 		/*
   12054 		 * There is no cycle running at present, so we can start a
   12055 		 * cycle
   12056 		 */
   12057 
   12058 		/* Begin by setting Flash Cycle Done. */
   12059 		hsfsts |= HSFSTS_DONE;
   12060 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12061 		error = 0;
   12062 	} else {
   12063 		/*
   12064 		 * otherwise poll for sometime so the current cycle has a
   12065 		 * chance to end before giving up.
   12066 		 */
   12067 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12068 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12069 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12070 				error = 0;
   12071 				break;
   12072 			}
   12073 			delay(1);
   12074 		}
   12075 		if (error == 0) {
   12076 			/*
   12077 			 * Successful in waiting for previous cycle to timeout,
   12078 			 * now set the Flash Cycle Done.
   12079 			 */
   12080 			hsfsts |= HSFSTS_DONE;
   12081 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12082 		}
   12083 	}
   12084 	return error;
   12085 }
   12086 
   12087 /******************************************************************************
   12088  * This function starts a flash cycle and waits for its completion
   12089  *
   12090  * sc - The pointer to the hw structure
   12091  ****************************************************************************/
   12092 static int32_t
   12093 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12094 {
   12095 	uint16_t hsflctl;
   12096 	uint16_t hsfsts;
   12097 	int32_t error = 1;
   12098 	uint32_t i = 0;
   12099 
   12100 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12101 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12102 	hsflctl |= HSFCTL_GO;
   12103 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12104 
   12105 	/* Wait till FDONE bit is set to 1 */
   12106 	do {
   12107 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12108 		if (hsfsts & HSFSTS_DONE)
   12109 			break;
   12110 		delay(1);
   12111 		i++;
   12112 	} while (i < timeout);
   12113 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12114 		error = 0;
   12115 
   12116 	return error;
   12117 }
   12118 
   12119 /******************************************************************************
   12120  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12121  *
   12122  * sc - The pointer to the hw structure
   12123  * index - The index of the byte or word to read.
   12124  * size - Size of data to read, 1=byte 2=word, 4=dword
   12125  * data - Pointer to the word to store the value read.
   12126  *****************************************************************************/
   12127 static int32_t
   12128 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12129     uint32_t size, uint32_t *data)
   12130 {
   12131 	uint16_t hsfsts;
   12132 	uint16_t hsflctl;
   12133 	uint32_t flash_linear_address;
   12134 	uint32_t flash_data = 0;
   12135 	int32_t error = 1;
   12136 	int32_t count = 0;
   12137 
   12138 	if (size < 1  || size > 4 || data == 0x0 ||
   12139 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12140 		return error;
   12141 
   12142 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12143 	    sc->sc_ich8_flash_base;
   12144 
   12145 	do {
   12146 		delay(1);
   12147 		/* Steps */
   12148 		error = wm_ich8_cycle_init(sc);
   12149 		if (error)
   12150 			break;
   12151 
   12152 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12153 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12154 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12155 		    & HSFCTL_BCOUNT_MASK;
   12156 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12157 		if (sc->sc_type == WM_T_PCH_SPT) {
   12158 			/*
   12159 			 * In SPT, This register is in Lan memory space, not
   12160 			 * flash. Therefore, only 32 bit access is supported.
   12161 			 */
   12162 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   12163 			    (uint32_t)hsflctl);
   12164 		} else
   12165 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12166 
   12167 		/*
   12168 		 * Write the last 24 bits of index into Flash Linear address
   12169 		 * field in Flash Address
   12170 		 */
   12171 		/* TODO: TBD maybe check the index against the size of flash */
   12172 
   12173 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12174 
   12175 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12176 
   12177 		/*
   12178 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12179 		 * the whole sequence a few more times, else read in (shift in)
   12180 		 * the Flash Data0, the order is least significant byte first
   12181 		 * msb to lsb
   12182 		 */
   12183 		if (error == 0) {
   12184 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12185 			if (size == 1)
   12186 				*data = (uint8_t)(flash_data & 0x000000FF);
   12187 			else if (size == 2)
   12188 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12189 			else if (size == 4)
   12190 				*data = (uint32_t)flash_data;
   12191 			break;
   12192 		} else {
   12193 			/*
   12194 			 * If we've gotten here, then things are probably
   12195 			 * completely hosed, but if the error condition is
   12196 			 * detected, it won't hurt to give it another try...
   12197 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12198 			 */
   12199 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12200 			if (hsfsts & HSFSTS_ERR) {
   12201 				/* Repeat for some time before giving up. */
   12202 				continue;
   12203 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12204 				break;
   12205 		}
   12206 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12207 
   12208 	return error;
   12209 }
   12210 
   12211 /******************************************************************************
   12212  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12213  *
   12214  * sc - pointer to wm_hw structure
   12215  * index - The index of the byte to read.
   12216  * data - Pointer to a byte to store the value read.
   12217  *****************************************************************************/
   12218 static int32_t
   12219 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12220 {
   12221 	int32_t status;
   12222 	uint32_t word = 0;
   12223 
   12224 	status = wm_read_ich8_data(sc, index, 1, &word);
   12225 	if (status == 0)
   12226 		*data = (uint8_t)word;
   12227 	else
   12228 		*data = 0;
   12229 
   12230 	return status;
   12231 }
   12232 
   12233 /******************************************************************************
   12234  * Reads a word from the NVM using the ICH8 flash access registers.
   12235  *
   12236  * sc - pointer to wm_hw structure
   12237  * index - The starting byte index of the word to read.
   12238  * data - Pointer to a word to store the value read.
   12239  *****************************************************************************/
   12240 static int32_t
   12241 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12242 {
   12243 	int32_t status;
   12244 	uint32_t word = 0;
   12245 
   12246 	status = wm_read_ich8_data(sc, index, 2, &word);
   12247 	if (status == 0)
   12248 		*data = (uint16_t)word;
   12249 	else
   12250 		*data = 0;
   12251 
   12252 	return status;
   12253 }
   12254 
   12255 /******************************************************************************
   12256  * Reads a dword from the NVM using the ICH8 flash access registers.
   12257  *
   12258  * sc - pointer to wm_hw structure
   12259  * index - The starting byte index of the word to read.
   12260  * data - Pointer to a word to store the value read.
   12261  *****************************************************************************/
   12262 static int32_t
   12263 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12264 {
   12265 	int32_t status;
   12266 
   12267 	status = wm_read_ich8_data(sc, index, 4, data);
   12268 	return status;
   12269 }
   12270 
   12271 /******************************************************************************
   12272  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12273  * register.
   12274  *
   12275  * sc - Struct containing variables accessed by shared code
   12276  * offset - offset of word in the EEPROM to read
   12277  * data - word read from the EEPROM
   12278  * words - number of words to read
   12279  *****************************************************************************/
   12280 static int
   12281 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12282 {
   12283 	int32_t  rv = 0;
   12284 	uint32_t flash_bank = 0;
   12285 	uint32_t act_offset = 0;
   12286 	uint32_t bank_offset = 0;
   12287 	uint16_t word = 0;
   12288 	uint16_t i = 0;
   12289 
   12290 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12291 		device_xname(sc->sc_dev), __func__));
   12292 
   12293 	if (sc->nvm.acquire(sc) != 0)
   12294 		return -1;
   12295 
   12296 	/*
   12297 	 * We need to know which is the valid flash bank.  In the event
   12298 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12299 	 * managing flash_bank.  So it cannot be trusted and needs
   12300 	 * to be updated with each read.
   12301 	 */
   12302 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12303 	if (rv) {
   12304 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12305 			device_xname(sc->sc_dev)));
   12306 		flash_bank = 0;
   12307 	}
   12308 
   12309 	/*
   12310 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12311 	 * size
   12312 	 */
   12313 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12314 
   12315 	for (i = 0; i < words; i++) {
   12316 		/* The NVM part needs a byte offset, hence * 2 */
   12317 		act_offset = bank_offset + ((offset + i) * 2);
   12318 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12319 		if (rv) {
   12320 			aprint_error_dev(sc->sc_dev,
   12321 			    "%s: failed to read NVM\n", __func__);
   12322 			break;
   12323 		}
   12324 		data[i] = word;
   12325 	}
   12326 
   12327 	sc->nvm.release(sc);
   12328 	return rv;
   12329 }
   12330 
   12331 /******************************************************************************
   12332  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12333  * register.
   12334  *
   12335  * sc - Struct containing variables accessed by shared code
   12336  * offset - offset of word in the EEPROM to read
   12337  * data - word read from the EEPROM
   12338  * words - number of words to read
   12339  *****************************************************************************/
   12340 static int
   12341 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12342 {
   12343 	int32_t  rv = 0;
   12344 	uint32_t flash_bank = 0;
   12345 	uint32_t act_offset = 0;
   12346 	uint32_t bank_offset = 0;
   12347 	uint32_t dword = 0;
   12348 	uint16_t i = 0;
   12349 
   12350 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12351 		device_xname(sc->sc_dev), __func__));
   12352 
   12353 	if (sc->nvm.acquire(sc) != 0)
   12354 		return -1;
   12355 
   12356 	/*
   12357 	 * We need to know which is the valid flash bank.  In the event
   12358 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12359 	 * managing flash_bank.  So it cannot be trusted and needs
   12360 	 * to be updated with each read.
   12361 	 */
   12362 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12363 	if (rv) {
   12364 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12365 			device_xname(sc->sc_dev)));
   12366 		flash_bank = 0;
   12367 	}
   12368 
   12369 	/*
   12370 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12371 	 * size
   12372 	 */
   12373 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12374 
   12375 	for (i = 0; i < words; i++) {
   12376 		/* The NVM part needs a byte offset, hence * 2 */
   12377 		act_offset = bank_offset + ((offset + i) * 2);
   12378 		/* but we must read dword aligned, so mask ... */
   12379 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12380 		if (rv) {
   12381 			aprint_error_dev(sc->sc_dev,
   12382 			    "%s: failed to read NVM\n", __func__);
   12383 			break;
   12384 		}
   12385 		/* ... and pick out low or high word */
   12386 		if ((act_offset & 0x2) == 0)
   12387 			data[i] = (uint16_t)(dword & 0xFFFF);
   12388 		else
   12389 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12390 	}
   12391 
   12392 	sc->nvm.release(sc);
   12393 	return rv;
   12394 }
   12395 
   12396 /* iNVM */
   12397 
   12398 static int
   12399 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12400 {
   12401 	int32_t  rv = 0;
   12402 	uint32_t invm_dword;
   12403 	uint16_t i;
   12404 	uint8_t record_type, word_address;
   12405 
   12406 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12407 		device_xname(sc->sc_dev), __func__));
   12408 
   12409 	for (i = 0; i < INVM_SIZE; i++) {
   12410 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12411 		/* Get record type */
   12412 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12413 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12414 			break;
   12415 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12416 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12417 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12418 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12419 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12420 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12421 			if (word_address == address) {
   12422 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12423 				rv = 0;
   12424 				break;
   12425 			}
   12426 		}
   12427 	}
   12428 
   12429 	return rv;
   12430 }
   12431 
   12432 static int
   12433 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12434 {
   12435 	int rv = 0;
   12436 	int i;
   12437 
   12438 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12439 		device_xname(sc->sc_dev), __func__));
   12440 
   12441 	if (sc->nvm.acquire(sc) != 0)
   12442 		return -1;
   12443 
   12444 	for (i = 0; i < words; i++) {
   12445 		switch (offset + i) {
   12446 		case NVM_OFF_MACADDR:
   12447 		case NVM_OFF_MACADDR1:
   12448 		case NVM_OFF_MACADDR2:
   12449 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12450 			if (rv != 0) {
   12451 				data[i] = 0xffff;
   12452 				rv = -1;
   12453 			}
   12454 			break;
   12455 		case NVM_OFF_CFG2:
   12456 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12457 			if (rv != 0) {
   12458 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12459 				rv = 0;
   12460 			}
   12461 			break;
   12462 		case NVM_OFF_CFG4:
   12463 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12464 			if (rv != 0) {
   12465 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12466 				rv = 0;
   12467 			}
   12468 			break;
   12469 		case NVM_OFF_LED_1_CFG:
   12470 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12471 			if (rv != 0) {
   12472 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12473 				rv = 0;
   12474 			}
   12475 			break;
   12476 		case NVM_OFF_LED_0_2_CFG:
   12477 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12478 			if (rv != 0) {
   12479 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12480 				rv = 0;
   12481 			}
   12482 			break;
   12483 		case NVM_OFF_ID_LED_SETTINGS:
   12484 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12485 			if (rv != 0) {
   12486 				*data = ID_LED_RESERVED_FFFF;
   12487 				rv = 0;
   12488 			}
   12489 			break;
   12490 		default:
   12491 			DPRINTF(WM_DEBUG_NVM,
   12492 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12493 			*data = NVM_RESERVED_WORD;
   12494 			break;
   12495 		}
   12496 	}
   12497 
   12498 	sc->nvm.release(sc);
   12499 	return rv;
   12500 }
   12501 
   12502 /* Lock, detecting NVM type, validate checksum, version and read */
   12503 
   12504 static int
   12505 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12506 {
   12507 	uint32_t eecd = 0;
   12508 
   12509 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12510 	    || sc->sc_type == WM_T_82583) {
   12511 		eecd = CSR_READ(sc, WMREG_EECD);
   12512 
   12513 		/* Isolate bits 15 & 16 */
   12514 		eecd = ((eecd >> 15) & 0x03);
   12515 
   12516 		/* If both bits are set, device is Flash type */
   12517 		if (eecd == 0x03)
   12518 			return 0;
   12519 	}
   12520 	return 1;
   12521 }
   12522 
   12523 static int
   12524 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12525 {
   12526 	uint32_t eec;
   12527 
   12528 	eec = CSR_READ(sc, WMREG_EEC);
   12529 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12530 		return 1;
   12531 
   12532 	return 0;
   12533 }
   12534 
   12535 /*
   12536  * wm_nvm_validate_checksum
   12537  *
   12538  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12539  */
   12540 static int
   12541 wm_nvm_validate_checksum(struct wm_softc *sc)
   12542 {
   12543 	uint16_t checksum;
   12544 	uint16_t eeprom_data;
   12545 #ifdef WM_DEBUG
   12546 	uint16_t csum_wordaddr, valid_checksum;
   12547 #endif
   12548 	int i;
   12549 
   12550 	checksum = 0;
   12551 
   12552 	/* Don't check for I211 */
   12553 	if (sc->sc_type == WM_T_I211)
   12554 		return 0;
   12555 
   12556 #ifdef WM_DEBUG
   12557 	if (sc->sc_type == WM_T_PCH_LPT) {
   12558 		csum_wordaddr = NVM_OFF_COMPAT;
   12559 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12560 	} else {
   12561 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12562 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12563 	}
   12564 
   12565 	/* Dump EEPROM image for debug */
   12566 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12567 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12568 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12569 		/* XXX PCH_SPT? */
   12570 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12571 		if ((eeprom_data & valid_checksum) == 0) {
   12572 			DPRINTF(WM_DEBUG_NVM,
   12573 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12574 				device_xname(sc->sc_dev), eeprom_data,
   12575 				    valid_checksum));
   12576 		}
   12577 	}
   12578 
   12579 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12580 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12581 		for (i = 0; i < NVM_SIZE; i++) {
   12582 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12583 				printf("XXXX ");
   12584 			else
   12585 				printf("%04hx ", eeprom_data);
   12586 			if (i % 8 == 7)
   12587 				printf("\n");
   12588 		}
   12589 	}
   12590 
   12591 #endif /* WM_DEBUG */
   12592 
   12593 	for (i = 0; i < NVM_SIZE; i++) {
   12594 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12595 			return 1;
   12596 		checksum += eeprom_data;
   12597 	}
   12598 
   12599 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12600 #ifdef WM_DEBUG
   12601 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12602 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12603 #endif
   12604 	}
   12605 
   12606 	return 0;
   12607 }
   12608 
   12609 static void
   12610 wm_nvm_version_invm(struct wm_softc *sc)
   12611 {
   12612 	uint32_t dword;
   12613 
   12614 	/*
   12615 	 * Linux's code to decode version is very strange, so we don't
   12616 	 * obey that algorithm and just use word 61 as the document.
   12617 	 * Perhaps it's not perfect though...
   12618 	 *
   12619 	 * Example:
   12620 	 *
   12621 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12622 	 */
   12623 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12624 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12625 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12626 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12627 }
   12628 
   12629 static void
   12630 wm_nvm_version(struct wm_softc *sc)
   12631 {
   12632 	uint16_t major, minor, build, patch;
   12633 	uint16_t uid0, uid1;
   12634 	uint16_t nvm_data;
   12635 	uint16_t off;
   12636 	bool check_version = false;
   12637 	bool check_optionrom = false;
   12638 	bool have_build = false;
   12639 	bool have_uid = true;
   12640 
   12641 	/*
   12642 	 * Version format:
   12643 	 *
   12644 	 * XYYZ
   12645 	 * X0YZ
   12646 	 * X0YY
   12647 	 *
   12648 	 * Example:
   12649 	 *
   12650 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12651 	 *	82571	0x50a6	5.10.6?
   12652 	 *	82572	0x506a	5.6.10?
   12653 	 *	82572EI	0x5069	5.6.9?
   12654 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12655 	 *		0x2013	2.1.3?
   12656 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12657 	 */
   12658 
   12659 	/*
   12660 	 * XXX
   12661 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12662 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12663 	 */
   12664 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12665 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12666 		have_uid = false;
   12667 
   12668 	switch (sc->sc_type) {
   12669 	case WM_T_82571:
   12670 	case WM_T_82572:
   12671 	case WM_T_82574:
   12672 	case WM_T_82583:
   12673 		check_version = true;
   12674 		check_optionrom = true;
   12675 		have_build = true;
   12676 		break;
   12677 	case WM_T_82575:
   12678 	case WM_T_82576:
   12679 	case WM_T_82580:
   12680 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12681 			check_version = true;
   12682 		break;
   12683 	case WM_T_I211:
   12684 		wm_nvm_version_invm(sc);
   12685 		have_uid = false;
   12686 		goto printver;
   12687 	case WM_T_I210:
   12688 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12689 			wm_nvm_version_invm(sc);
   12690 			have_uid = false;
   12691 			goto printver;
   12692 		}
   12693 		/* FALLTHROUGH */
   12694 	case WM_T_I350:
   12695 	case WM_T_I354:
   12696 		check_version = true;
   12697 		check_optionrom = true;
   12698 		break;
   12699 	default:
   12700 		return;
   12701 	}
   12702 	if (check_version
   12703 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12704 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12705 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12706 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12707 			build = nvm_data & NVM_BUILD_MASK;
   12708 			have_build = true;
   12709 		} else
   12710 			minor = nvm_data & 0x00ff;
   12711 
   12712 		/* Decimal */
   12713 		minor = (minor / 16) * 10 + (minor % 16);
   12714 		sc->sc_nvm_ver_major = major;
   12715 		sc->sc_nvm_ver_minor = minor;
   12716 
   12717 printver:
   12718 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12719 		    sc->sc_nvm_ver_minor);
   12720 		if (have_build) {
   12721 			sc->sc_nvm_ver_build = build;
   12722 			aprint_verbose(".%d", build);
   12723 		}
   12724 	}
   12725 
   12726 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12727 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12728 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12729 		/* Option ROM Version */
   12730 		if ((off != 0x0000) && (off != 0xffff)) {
   12731 			int rv;
   12732 
   12733 			off += NVM_COMBO_VER_OFF;
   12734 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12735 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12736 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12737 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12738 				/* 16bits */
   12739 				major = uid0 >> 8;
   12740 				build = (uid0 << 8) | (uid1 >> 8);
   12741 				patch = uid1 & 0x00ff;
   12742 				aprint_verbose(", option ROM Version %d.%d.%d",
   12743 				    major, build, patch);
   12744 			}
   12745 		}
   12746 	}
   12747 
   12748 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12749 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12750 }
   12751 
   12752 /*
   12753  * wm_nvm_read:
   12754  *
   12755  *	Read data from the serial EEPROM.
   12756  */
   12757 static int
   12758 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12759 {
   12760 	int rv;
   12761 
   12762 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12763 		device_xname(sc->sc_dev), __func__));
   12764 
   12765 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12766 		return -1;
   12767 
   12768 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12769 
   12770 	return rv;
   12771 }
   12772 
   12773 /*
   12774  * Hardware semaphores.
   12775  * Very complexed...
   12776  */
   12777 
   12778 static int
   12779 wm_get_null(struct wm_softc *sc)
   12780 {
   12781 
   12782 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12783 		device_xname(sc->sc_dev), __func__));
   12784 	return 0;
   12785 }
   12786 
   12787 static void
   12788 wm_put_null(struct wm_softc *sc)
   12789 {
   12790 
   12791 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12792 		device_xname(sc->sc_dev), __func__));
   12793 	return;
   12794 }
   12795 
   12796 static int
   12797 wm_get_eecd(struct wm_softc *sc)
   12798 {
   12799 	uint32_t reg;
   12800 	int x;
   12801 
   12802 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12803 		device_xname(sc->sc_dev), __func__));
   12804 
   12805 	reg = CSR_READ(sc, WMREG_EECD);
   12806 
   12807 	/* Request EEPROM access. */
   12808 	reg |= EECD_EE_REQ;
   12809 	CSR_WRITE(sc, WMREG_EECD, reg);
   12810 
   12811 	/* ..and wait for it to be granted. */
   12812 	for (x = 0; x < 1000; x++) {
   12813 		reg = CSR_READ(sc, WMREG_EECD);
   12814 		if (reg & EECD_EE_GNT)
   12815 			break;
   12816 		delay(5);
   12817 	}
   12818 	if ((reg & EECD_EE_GNT) == 0) {
   12819 		aprint_error_dev(sc->sc_dev,
   12820 		    "could not acquire EEPROM GNT\n");
   12821 		reg &= ~EECD_EE_REQ;
   12822 		CSR_WRITE(sc, WMREG_EECD, reg);
   12823 		return -1;
   12824 	}
   12825 
   12826 	return 0;
   12827 }
   12828 
   12829 static void
   12830 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12831 {
   12832 
   12833 	*eecd |= EECD_SK;
   12834 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12835 	CSR_WRITE_FLUSH(sc);
   12836 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12837 		delay(1);
   12838 	else
   12839 		delay(50);
   12840 }
   12841 
   12842 static void
   12843 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12844 {
   12845 
   12846 	*eecd &= ~EECD_SK;
   12847 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12848 	CSR_WRITE_FLUSH(sc);
   12849 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12850 		delay(1);
   12851 	else
   12852 		delay(50);
   12853 }
   12854 
   12855 static void
   12856 wm_put_eecd(struct wm_softc *sc)
   12857 {
   12858 	uint32_t reg;
   12859 
   12860 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12861 		device_xname(sc->sc_dev), __func__));
   12862 
   12863 	/* Stop nvm */
   12864 	reg = CSR_READ(sc, WMREG_EECD);
   12865 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12866 		/* Pull CS high */
   12867 		reg |= EECD_CS;
   12868 		wm_nvm_eec_clock_lower(sc, &reg);
   12869 	} else {
   12870 		/* CS on Microwire is active-high */
   12871 		reg &= ~(EECD_CS | EECD_DI);
   12872 		CSR_WRITE(sc, WMREG_EECD, reg);
   12873 		wm_nvm_eec_clock_raise(sc, &reg);
   12874 		wm_nvm_eec_clock_lower(sc, &reg);
   12875 	}
   12876 
   12877 	reg = CSR_READ(sc, WMREG_EECD);
   12878 	reg &= ~EECD_EE_REQ;
   12879 	CSR_WRITE(sc, WMREG_EECD, reg);
   12880 
   12881 	return;
   12882 }
   12883 
   12884 /*
   12885  * Get hardware semaphore.
   12886  * Same as e1000_get_hw_semaphore_generic()
   12887  */
   12888 static int
   12889 wm_get_swsm_semaphore(struct wm_softc *sc)
   12890 {
   12891 	int32_t timeout;
   12892 	uint32_t swsm;
   12893 
   12894 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12895 		device_xname(sc->sc_dev), __func__));
   12896 	KASSERT(sc->sc_nvm_wordsize > 0);
   12897 
   12898 retry:
   12899 	/* Get the SW semaphore. */
   12900 	timeout = sc->sc_nvm_wordsize + 1;
   12901 	while (timeout) {
   12902 		swsm = CSR_READ(sc, WMREG_SWSM);
   12903 
   12904 		if ((swsm & SWSM_SMBI) == 0)
   12905 			break;
   12906 
   12907 		delay(50);
   12908 		timeout--;
   12909 	}
   12910 
   12911 	if (timeout == 0) {
   12912 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   12913 			/*
   12914 			 * In rare circumstances, the SW semaphore may already
   12915 			 * be held unintentionally. Clear the semaphore once
   12916 			 * before giving up.
   12917 			 */
   12918 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   12919 			wm_put_swsm_semaphore(sc);
   12920 			goto retry;
   12921 		}
   12922 		aprint_error_dev(sc->sc_dev,
   12923 		    "could not acquire SWSM SMBI\n");
   12924 		return 1;
   12925 	}
   12926 
   12927 	/* Get the FW semaphore. */
   12928 	timeout = sc->sc_nvm_wordsize + 1;
   12929 	while (timeout) {
   12930 		swsm = CSR_READ(sc, WMREG_SWSM);
   12931 		swsm |= SWSM_SWESMBI;
   12932 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12933 		/* If we managed to set the bit we got the semaphore. */
   12934 		swsm = CSR_READ(sc, WMREG_SWSM);
   12935 		if (swsm & SWSM_SWESMBI)
   12936 			break;
   12937 
   12938 		delay(50);
   12939 		timeout--;
   12940 	}
   12941 
   12942 	if (timeout == 0) {
   12943 		aprint_error_dev(sc->sc_dev,
   12944 		    "could not acquire SWSM SWESMBI\n");
   12945 		/* Release semaphores */
   12946 		wm_put_swsm_semaphore(sc);
   12947 		return 1;
   12948 	}
   12949 	return 0;
   12950 }
   12951 
   12952 /*
   12953  * Put hardware semaphore.
   12954  * Same as e1000_put_hw_semaphore_generic()
   12955  */
   12956 static void
   12957 wm_put_swsm_semaphore(struct wm_softc *sc)
   12958 {
   12959 	uint32_t swsm;
   12960 
   12961 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12962 		device_xname(sc->sc_dev), __func__));
   12963 
   12964 	swsm = CSR_READ(sc, WMREG_SWSM);
   12965 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12966 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12967 }
   12968 
   12969 /*
   12970  * Get SW/FW semaphore.
   12971  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   12972  */
   12973 static int
   12974 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12975 {
   12976 	uint32_t swfw_sync;
   12977 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12978 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12979 	int timeout;
   12980 
   12981 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12982 		device_xname(sc->sc_dev), __func__));
   12983 
   12984 	if (sc->sc_type == WM_T_80003)
   12985 		timeout = 50;
   12986 	else
   12987 		timeout = 200;
   12988 
   12989 	for (timeout = 0; timeout < 200; timeout++) {
   12990 		if (wm_get_swsm_semaphore(sc)) {
   12991 			aprint_error_dev(sc->sc_dev,
   12992 			    "%s: failed to get semaphore\n",
   12993 			    __func__);
   12994 			return 1;
   12995 		}
   12996 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12997 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12998 			swfw_sync |= swmask;
   12999 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13000 			wm_put_swsm_semaphore(sc);
   13001 			return 0;
   13002 		}
   13003 		wm_put_swsm_semaphore(sc);
   13004 		delay(5000);
   13005 	}
   13006 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13007 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13008 	return 1;
   13009 }
   13010 
   13011 static void
   13012 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13013 {
   13014 	uint32_t swfw_sync;
   13015 
   13016 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13017 		device_xname(sc->sc_dev), __func__));
   13018 
   13019 	while (wm_get_swsm_semaphore(sc) != 0)
   13020 		continue;
   13021 
   13022 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13023 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13024 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13025 
   13026 	wm_put_swsm_semaphore(sc);
   13027 }
   13028 
   13029 static int
   13030 wm_get_nvm_80003(struct wm_softc *sc)
   13031 {
   13032 	int rv;
   13033 
   13034 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13035 		device_xname(sc->sc_dev), __func__));
   13036 
   13037 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13038 		aprint_error_dev(sc->sc_dev,
   13039 		    "%s: failed to get semaphore(SWFW)\n",
   13040 		    __func__);
   13041 		return rv;
   13042 	}
   13043 
   13044 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13045 	    && (rv = wm_get_eecd(sc)) != 0) {
   13046 		aprint_error_dev(sc->sc_dev,
   13047 		    "%s: failed to get semaphore(EECD)\n",
   13048 		    __func__);
   13049 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13050 		return rv;
   13051 	}
   13052 
   13053 	return 0;
   13054 }
   13055 
   13056 static void
   13057 wm_put_nvm_80003(struct wm_softc *sc)
   13058 {
   13059 
   13060 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13061 		device_xname(sc->sc_dev), __func__));
   13062 
   13063 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13064 		wm_put_eecd(sc);
   13065 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13066 }
   13067 
   13068 static int
   13069 wm_get_nvm_82571(struct wm_softc *sc)
   13070 {
   13071 	int rv;
   13072 
   13073 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13074 		device_xname(sc->sc_dev), __func__));
   13075 
   13076 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13077 		return rv;
   13078 
   13079 	switch (sc->sc_type) {
   13080 	case WM_T_82573:
   13081 		break;
   13082 	default:
   13083 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13084 			rv = wm_get_eecd(sc);
   13085 		break;
   13086 	}
   13087 
   13088 	if (rv != 0) {
   13089 		aprint_error_dev(sc->sc_dev,
   13090 		    "%s: failed to get semaphore\n",
   13091 		    __func__);
   13092 		wm_put_swsm_semaphore(sc);
   13093 	}
   13094 
   13095 	return rv;
   13096 }
   13097 
   13098 static void
   13099 wm_put_nvm_82571(struct wm_softc *sc)
   13100 {
   13101 
   13102 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13103 		device_xname(sc->sc_dev), __func__));
   13104 
   13105 	switch (sc->sc_type) {
   13106 	case WM_T_82573:
   13107 		break;
   13108 	default:
   13109 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13110 			wm_put_eecd(sc);
   13111 		break;
   13112 	}
   13113 
   13114 	wm_put_swsm_semaphore(sc);
   13115 }
   13116 
   13117 static int
   13118 wm_get_phy_82575(struct wm_softc *sc)
   13119 {
   13120 
   13121 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13122 		device_xname(sc->sc_dev), __func__));
   13123 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13124 }
   13125 
   13126 static void
   13127 wm_put_phy_82575(struct wm_softc *sc)
   13128 {
   13129 
   13130 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13131 		device_xname(sc->sc_dev), __func__));
   13132 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13133 }
   13134 
   13135 static int
   13136 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13137 {
   13138 	uint32_t ext_ctrl;
   13139 	int timeout = 200;
   13140 
   13141 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13142 		device_xname(sc->sc_dev), __func__));
   13143 
   13144 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13145 	for (timeout = 0; timeout < 200; timeout++) {
   13146 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13147 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13148 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13149 
   13150 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13151 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13152 			return 0;
   13153 		delay(5000);
   13154 	}
   13155 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13156 	    device_xname(sc->sc_dev), ext_ctrl);
   13157 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13158 	return 1;
   13159 }
   13160 
   13161 static void
   13162 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13163 {
   13164 	uint32_t ext_ctrl;
   13165 
   13166 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13167 		device_xname(sc->sc_dev), __func__));
   13168 
   13169 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13170 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13171 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13172 
   13173 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13174 }
   13175 
   13176 static int
   13177 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13178 {
   13179 	uint32_t ext_ctrl;
   13180 	int timeout;
   13181 
   13182 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13183 		device_xname(sc->sc_dev), __func__));
   13184 	mutex_enter(sc->sc_ich_phymtx);
   13185 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13186 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13187 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13188 			break;
   13189 		delay(1000);
   13190 	}
   13191 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13192 		printf("%s: SW has already locked the resource\n",
   13193 		    device_xname(sc->sc_dev));
   13194 		goto out;
   13195 	}
   13196 
   13197 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13198 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13199 	for (timeout = 0; timeout < 1000; timeout++) {
   13200 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13201 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13202 			break;
   13203 		delay(1000);
   13204 	}
   13205 	if (timeout >= 1000) {
   13206 		printf("%s: failed to acquire semaphore\n",
   13207 		    device_xname(sc->sc_dev));
   13208 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13209 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13210 		goto out;
   13211 	}
   13212 	return 0;
   13213 
   13214 out:
   13215 	mutex_exit(sc->sc_ich_phymtx);
   13216 	return 1;
   13217 }
   13218 
   13219 static void
   13220 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13221 {
   13222 	uint32_t ext_ctrl;
   13223 
   13224 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13225 		device_xname(sc->sc_dev), __func__));
   13226 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13227 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13228 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13229 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13230 	} else {
   13231 		printf("%s: Semaphore unexpectedly released\n",
   13232 		    device_xname(sc->sc_dev));
   13233 	}
   13234 
   13235 	mutex_exit(sc->sc_ich_phymtx);
   13236 }
   13237 
   13238 static int
   13239 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13240 {
   13241 
   13242 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13243 		device_xname(sc->sc_dev), __func__));
   13244 	mutex_enter(sc->sc_ich_nvmmtx);
   13245 
   13246 	return 0;
   13247 }
   13248 
   13249 static void
   13250 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13251 {
   13252 
   13253 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13254 		device_xname(sc->sc_dev), __func__));
   13255 	mutex_exit(sc->sc_ich_nvmmtx);
   13256 }
   13257 
   13258 static int
   13259 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13260 {
   13261 	int i = 0;
   13262 	uint32_t reg;
   13263 
   13264 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13265 		device_xname(sc->sc_dev), __func__));
   13266 
   13267 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13268 	do {
   13269 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13270 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13271 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13272 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13273 			break;
   13274 		delay(2*1000);
   13275 		i++;
   13276 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13277 
   13278 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13279 		wm_put_hw_semaphore_82573(sc);
   13280 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13281 		    device_xname(sc->sc_dev));
   13282 		return -1;
   13283 	}
   13284 
   13285 	return 0;
   13286 }
   13287 
   13288 static void
   13289 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13290 {
   13291 	uint32_t reg;
   13292 
   13293 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13294 		device_xname(sc->sc_dev), __func__));
   13295 
   13296 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13297 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13298 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13299 }
   13300 
   13301 /*
   13302  * Management mode and power management related subroutines.
   13303  * BMC, AMT, suspend/resume and EEE.
   13304  */
   13305 
   13306 #ifdef WM_WOL
   13307 static int
   13308 wm_check_mng_mode(struct wm_softc *sc)
   13309 {
   13310 	int rv;
   13311 
   13312 	switch (sc->sc_type) {
   13313 	case WM_T_ICH8:
   13314 	case WM_T_ICH9:
   13315 	case WM_T_ICH10:
   13316 	case WM_T_PCH:
   13317 	case WM_T_PCH2:
   13318 	case WM_T_PCH_LPT:
   13319 	case WM_T_PCH_SPT:
   13320 		rv = wm_check_mng_mode_ich8lan(sc);
   13321 		break;
   13322 	case WM_T_82574:
   13323 	case WM_T_82583:
   13324 		rv = wm_check_mng_mode_82574(sc);
   13325 		break;
   13326 	case WM_T_82571:
   13327 	case WM_T_82572:
   13328 	case WM_T_82573:
   13329 	case WM_T_80003:
   13330 		rv = wm_check_mng_mode_generic(sc);
   13331 		break;
   13332 	default:
   13333 		/* noting to do */
   13334 		rv = 0;
   13335 		break;
   13336 	}
   13337 
   13338 	return rv;
   13339 }
   13340 
   13341 static int
   13342 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13343 {
   13344 	uint32_t fwsm;
   13345 
   13346 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13347 
   13348 	if (((fwsm & FWSM_FW_VALID) != 0)
   13349 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13350 		return 1;
   13351 
   13352 	return 0;
   13353 }
   13354 
   13355 static int
   13356 wm_check_mng_mode_82574(struct wm_softc *sc)
   13357 {
   13358 	uint16_t data;
   13359 
   13360 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13361 
   13362 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13363 		return 1;
   13364 
   13365 	return 0;
   13366 }
   13367 
   13368 static int
   13369 wm_check_mng_mode_generic(struct wm_softc *sc)
   13370 {
   13371 	uint32_t fwsm;
   13372 
   13373 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13374 
   13375 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13376 		return 1;
   13377 
   13378 	return 0;
   13379 }
   13380 #endif /* WM_WOL */
   13381 
   13382 static int
   13383 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13384 {
   13385 	uint32_t manc, fwsm, factps;
   13386 
   13387 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13388 		return 0;
   13389 
   13390 	manc = CSR_READ(sc, WMREG_MANC);
   13391 
   13392 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13393 		device_xname(sc->sc_dev), manc));
   13394 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13395 		return 0;
   13396 
   13397 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13398 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13399 		factps = CSR_READ(sc, WMREG_FACTPS);
   13400 		if (((factps & FACTPS_MNGCG) == 0)
   13401 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13402 			return 1;
   13403 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13404 		uint16_t data;
   13405 
   13406 		factps = CSR_READ(sc, WMREG_FACTPS);
   13407 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13408 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13409 			device_xname(sc->sc_dev), factps, data));
   13410 		if (((factps & FACTPS_MNGCG) == 0)
   13411 		    && ((data & NVM_CFG2_MNGM_MASK)
   13412 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13413 			return 1;
   13414 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13415 	    && ((manc & MANC_ASF_EN) == 0))
   13416 		return 1;
   13417 
   13418 	return 0;
   13419 }
   13420 
   13421 static bool
   13422 wm_phy_resetisblocked(struct wm_softc *sc)
   13423 {
   13424 	bool blocked = false;
   13425 	uint32_t reg;
   13426 	int i = 0;
   13427 
   13428 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13429 		device_xname(sc->sc_dev), __func__));
   13430 
   13431 	switch (sc->sc_type) {
   13432 	case WM_T_ICH8:
   13433 	case WM_T_ICH9:
   13434 	case WM_T_ICH10:
   13435 	case WM_T_PCH:
   13436 	case WM_T_PCH2:
   13437 	case WM_T_PCH_LPT:
   13438 	case WM_T_PCH_SPT:
   13439 		do {
   13440 			reg = CSR_READ(sc, WMREG_FWSM);
   13441 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13442 				blocked = true;
   13443 				delay(10*1000);
   13444 				continue;
   13445 			}
   13446 			blocked = false;
   13447 		} while (blocked && (i++ < 30));
   13448 		return blocked;
   13449 		break;
   13450 	case WM_T_82571:
   13451 	case WM_T_82572:
   13452 	case WM_T_82573:
   13453 	case WM_T_82574:
   13454 	case WM_T_82583:
   13455 	case WM_T_80003:
   13456 		reg = CSR_READ(sc, WMREG_MANC);
   13457 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13458 			return true;
   13459 		else
   13460 			return false;
   13461 		break;
   13462 	default:
   13463 		/* no problem */
   13464 		break;
   13465 	}
   13466 
   13467 	return false;
   13468 }
   13469 
   13470 static void
   13471 wm_get_hw_control(struct wm_softc *sc)
   13472 {
   13473 	uint32_t reg;
   13474 
   13475 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13476 		device_xname(sc->sc_dev), __func__));
   13477 
   13478 	if (sc->sc_type == WM_T_82573) {
   13479 		reg = CSR_READ(sc, WMREG_SWSM);
   13480 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13481 	} else if (sc->sc_type >= WM_T_82571) {
   13482 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13483 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13484 	}
   13485 }
   13486 
   13487 static void
   13488 wm_release_hw_control(struct wm_softc *sc)
   13489 {
   13490 	uint32_t reg;
   13491 
   13492 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13493 		device_xname(sc->sc_dev), __func__));
   13494 
   13495 	if (sc->sc_type == WM_T_82573) {
   13496 		reg = CSR_READ(sc, WMREG_SWSM);
   13497 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13498 	} else if (sc->sc_type >= WM_T_82571) {
   13499 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13500 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13501 	}
   13502 }
   13503 
   13504 static void
   13505 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13506 {
   13507 	uint32_t reg;
   13508 
   13509 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13510 		device_xname(sc->sc_dev), __func__));
   13511 
   13512 	if (sc->sc_type < WM_T_PCH2)
   13513 		return;
   13514 
   13515 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13516 
   13517 	if (gate)
   13518 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13519 	else
   13520 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13521 
   13522 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13523 }
   13524 
   13525 static void
   13526 wm_smbustopci(struct wm_softc *sc)
   13527 {
   13528 	uint32_t fwsm, reg;
   13529 	int rv = 0;
   13530 
   13531 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13532 		device_xname(sc->sc_dev), __func__));
   13533 
   13534 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13535 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13536 
   13537 	/* Disable ULP */
   13538 	wm_ulp_disable(sc);
   13539 
   13540 	/* Acquire PHY semaphore */
   13541 	sc->phy.acquire(sc);
   13542 
   13543 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13544 	switch (sc->sc_type) {
   13545 	case WM_T_PCH_LPT:
   13546 	case WM_T_PCH_SPT:
   13547 		if (wm_phy_is_accessible_pchlan(sc))
   13548 			break;
   13549 
   13550 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13551 		reg |= CTRL_EXT_FORCE_SMBUS;
   13552 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13553 #if 0
   13554 		/* XXX Isn't this required??? */
   13555 		CSR_WRITE_FLUSH(sc);
   13556 #endif
   13557 		delay(50 * 1000);
   13558 		/* FALLTHROUGH */
   13559 	case WM_T_PCH2:
   13560 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13561 			break;
   13562 		/* FALLTHROUGH */
   13563 	case WM_T_PCH:
   13564 		if (sc->sc_type == WM_T_PCH)
   13565 			if ((fwsm & FWSM_FW_VALID) != 0)
   13566 				break;
   13567 
   13568 		if (wm_phy_resetisblocked(sc) == true) {
   13569 			printf("XXX reset is blocked(3)\n");
   13570 			break;
   13571 		}
   13572 
   13573 		wm_toggle_lanphypc_pch_lpt(sc);
   13574 
   13575 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13576 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13577 				break;
   13578 
   13579 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13580 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13581 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13582 
   13583 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13584 				break;
   13585 			rv = -1;
   13586 		}
   13587 		break;
   13588 	default:
   13589 		break;
   13590 	}
   13591 
   13592 	/* Release semaphore */
   13593 	sc->phy.release(sc);
   13594 
   13595 	if (rv == 0) {
   13596 		if (wm_phy_resetisblocked(sc)) {
   13597 			printf("XXX reset is blocked(4)\n");
   13598 			goto out;
   13599 		}
   13600 		wm_reset_phy(sc);
   13601 		if (wm_phy_resetisblocked(sc))
   13602 			printf("XXX reset is blocked(4)\n");
   13603 	}
   13604 
   13605 out:
   13606 	/*
   13607 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13608 	 */
   13609 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13610 		delay(10*1000);
   13611 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13612 	}
   13613 }
   13614 
   13615 static void
   13616 wm_init_manageability(struct wm_softc *sc)
   13617 {
   13618 
   13619 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13620 		device_xname(sc->sc_dev), __func__));
   13621 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13622 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13623 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13624 
   13625 		/* Disable hardware interception of ARP */
   13626 		manc &= ~MANC_ARP_EN;
   13627 
   13628 		/* Enable receiving management packets to the host */
   13629 		if (sc->sc_type >= WM_T_82571) {
   13630 			manc |= MANC_EN_MNG2HOST;
   13631 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13632 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13633 		}
   13634 
   13635 		CSR_WRITE(sc, WMREG_MANC, manc);
   13636 	}
   13637 }
   13638 
   13639 static void
   13640 wm_release_manageability(struct wm_softc *sc)
   13641 {
   13642 
   13643 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13644 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13645 
   13646 		manc |= MANC_ARP_EN;
   13647 		if (sc->sc_type >= WM_T_82571)
   13648 			manc &= ~MANC_EN_MNG2HOST;
   13649 
   13650 		CSR_WRITE(sc, WMREG_MANC, manc);
   13651 	}
   13652 }
   13653 
   13654 static void
   13655 wm_get_wakeup(struct wm_softc *sc)
   13656 {
   13657 
   13658 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13659 	switch (sc->sc_type) {
   13660 	case WM_T_82573:
   13661 	case WM_T_82583:
   13662 		sc->sc_flags |= WM_F_HAS_AMT;
   13663 		/* FALLTHROUGH */
   13664 	case WM_T_80003:
   13665 	case WM_T_82575:
   13666 	case WM_T_82576:
   13667 	case WM_T_82580:
   13668 	case WM_T_I350:
   13669 	case WM_T_I354:
   13670 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13671 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13672 		/* FALLTHROUGH */
   13673 	case WM_T_82541:
   13674 	case WM_T_82541_2:
   13675 	case WM_T_82547:
   13676 	case WM_T_82547_2:
   13677 	case WM_T_82571:
   13678 	case WM_T_82572:
   13679 	case WM_T_82574:
   13680 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13681 		break;
   13682 	case WM_T_ICH8:
   13683 	case WM_T_ICH9:
   13684 	case WM_T_ICH10:
   13685 	case WM_T_PCH:
   13686 	case WM_T_PCH2:
   13687 	case WM_T_PCH_LPT:
   13688 	case WM_T_PCH_SPT:
   13689 		sc->sc_flags |= WM_F_HAS_AMT;
   13690 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13691 		break;
   13692 	default:
   13693 		break;
   13694 	}
   13695 
   13696 	/* 1: HAS_MANAGE */
   13697 	if (wm_enable_mng_pass_thru(sc) != 0)
   13698 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13699 
   13700 	/*
   13701 	 * Note that the WOL flags is set after the resetting of the eeprom
   13702 	 * stuff
   13703 	 */
   13704 }
   13705 
   13706 /*
   13707  * Unconfigure Ultra Low Power mode.
   13708  * Only for I217 and newer (see below).
   13709  */
   13710 static void
   13711 wm_ulp_disable(struct wm_softc *sc)
   13712 {
   13713 	uint32_t reg;
   13714 	int i = 0;
   13715 
   13716 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13717 		device_xname(sc->sc_dev), __func__));
   13718 	/* Exclude old devices */
   13719 	if ((sc->sc_type < WM_T_PCH_LPT)
   13720 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13721 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13722 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13723 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13724 		return;
   13725 
   13726 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13727 		/* Request ME un-configure ULP mode in the PHY */
   13728 		reg = CSR_READ(sc, WMREG_H2ME);
   13729 		reg &= ~H2ME_ULP;
   13730 		reg |= H2ME_ENFORCE_SETTINGS;
   13731 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13732 
   13733 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13734 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13735 			if (i++ == 30) {
   13736 				printf("%s timed out\n", __func__);
   13737 				return;
   13738 			}
   13739 			delay(10 * 1000);
   13740 		}
   13741 		reg = CSR_READ(sc, WMREG_H2ME);
   13742 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13743 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13744 
   13745 		return;
   13746 	}
   13747 
   13748 	/* Acquire semaphore */
   13749 	sc->phy.acquire(sc);
   13750 
   13751 	/* Toggle LANPHYPC */
   13752 	wm_toggle_lanphypc_pch_lpt(sc);
   13753 
   13754 	/* Unforce SMBus mode in PHY */
   13755 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13756 	if (reg == 0x0000 || reg == 0xffff) {
   13757 		uint32_t reg2;
   13758 
   13759 		printf("%s: Force SMBus first.\n", __func__);
   13760 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13761 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13762 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13763 		delay(50 * 1000);
   13764 
   13765 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13766 	}
   13767 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13768 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13769 
   13770 	/* Unforce SMBus mode in MAC */
   13771 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13772 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13773 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13774 
   13775 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13776 	reg |= HV_PM_CTRL_K1_ENA;
   13777 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13778 
   13779 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13780 	reg &= ~(I218_ULP_CONFIG1_IND
   13781 	    | I218_ULP_CONFIG1_STICKY_ULP
   13782 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13783 	    | I218_ULP_CONFIG1_WOL_HOST
   13784 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13785 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13786 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13787 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13788 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13789 	reg |= I218_ULP_CONFIG1_START;
   13790 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13791 
   13792 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13793 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13794 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13795 
   13796 	/* Release semaphore */
   13797 	sc->phy.release(sc);
   13798 	wm_gmii_reset(sc);
   13799 	delay(50 * 1000);
   13800 }
   13801 
   13802 /* WOL in the newer chipset interfaces (pchlan) */
   13803 static void
   13804 wm_enable_phy_wakeup(struct wm_softc *sc)
   13805 {
   13806 #if 0
   13807 	uint16_t preg;
   13808 
   13809 	/* Copy MAC RARs to PHY RARs */
   13810 
   13811 	/* Copy MAC MTA to PHY MTA */
   13812 
   13813 	/* Configure PHY Rx Control register */
   13814 
   13815 	/* Enable PHY wakeup in MAC register */
   13816 
   13817 	/* Configure and enable PHY wakeup in PHY registers */
   13818 
   13819 	/* Activate PHY wakeup */
   13820 
   13821 	/* XXX */
   13822 #endif
   13823 }
   13824 
   13825 /* Power down workaround on D3 */
   13826 static void
   13827 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13828 {
   13829 	uint32_t reg;
   13830 	int i;
   13831 
   13832 	for (i = 0; i < 2; i++) {
   13833 		/* Disable link */
   13834 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13835 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13836 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13837 
   13838 		/*
   13839 		 * Call gig speed drop workaround on Gig disable before
   13840 		 * accessing any PHY registers
   13841 		 */
   13842 		if (sc->sc_type == WM_T_ICH8)
   13843 			wm_gig_downshift_workaround_ich8lan(sc);
   13844 
   13845 		/* Write VR power-down enable */
   13846 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13847 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13848 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13849 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13850 
   13851 		/* Read it back and test */
   13852 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13853 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13854 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13855 			break;
   13856 
   13857 		/* Issue PHY reset and repeat at most one more time */
   13858 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13859 	}
   13860 }
   13861 
   13862 static void
   13863 wm_enable_wakeup(struct wm_softc *sc)
   13864 {
   13865 	uint32_t reg, pmreg;
   13866 	pcireg_t pmode;
   13867 
   13868 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13869 		device_xname(sc->sc_dev), __func__));
   13870 
   13871 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13872 		&pmreg, NULL) == 0)
   13873 		return;
   13874 
   13875 	/* Advertise the wakeup capability */
   13876 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13877 	    | CTRL_SWDPIN(3));
   13878 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13879 
   13880 	/* ICH workaround */
   13881 	switch (sc->sc_type) {
   13882 	case WM_T_ICH8:
   13883 	case WM_T_ICH9:
   13884 	case WM_T_ICH10:
   13885 	case WM_T_PCH:
   13886 	case WM_T_PCH2:
   13887 	case WM_T_PCH_LPT:
   13888 	case WM_T_PCH_SPT:
   13889 		/* Disable gig during WOL */
   13890 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13891 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13892 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13893 		if (sc->sc_type == WM_T_PCH)
   13894 			wm_gmii_reset(sc);
   13895 
   13896 		/* Power down workaround */
   13897 		if (sc->sc_phytype == WMPHY_82577) {
   13898 			struct mii_softc *child;
   13899 
   13900 			/* Assume that the PHY is copper */
   13901 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13902 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13903 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13904 				    (768 << 5) | 25, 0x0444); /* magic num */
   13905 		}
   13906 		break;
   13907 	default:
   13908 		break;
   13909 	}
   13910 
   13911 	/* Keep the laser running on fiber adapters */
   13912 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13913 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13914 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13915 		reg |= CTRL_EXT_SWDPIN(3);
   13916 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13917 	}
   13918 
   13919 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13920 #if 0	/* for the multicast packet */
   13921 	reg |= WUFC_MC;
   13922 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13923 #endif
   13924 
   13925 	if (sc->sc_type >= WM_T_PCH)
   13926 		wm_enable_phy_wakeup(sc);
   13927 	else {
   13928 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13929 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13930 	}
   13931 
   13932 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13933 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13934 		|| (sc->sc_type == WM_T_PCH2))
   13935 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13936 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13937 
   13938 	/* Request PME */
   13939 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13940 #if 0
   13941 	/* Disable WOL */
   13942 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13943 #else
   13944 	/* For WOL */
   13945 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13946 #endif
   13947 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13948 }
   13949 
   13950 /* Disable ASPM L0s and/or L1 for workaround */
   13951 static void
   13952 wm_disable_aspm(struct wm_softc *sc)
   13953 {
   13954 	pcireg_t reg, mask = 0;
   13955 	unsigned const char *str = "";
   13956 
   13957 	/*
   13958 	 *  Only for PCIe device which has PCIe capability in the PCI config
   13959 	 * space.
   13960 	 */
   13961 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   13962 		return;
   13963 
   13964 	switch (sc->sc_type) {
   13965 	case WM_T_82571:
   13966 	case WM_T_82572:
   13967 		/*
   13968 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   13969 		 * State Power management L1 State (ASPM L1).
   13970 		 */
   13971 		mask = PCIE_LCSR_ASPM_L1;
   13972 		str = "L1 is";
   13973 		break;
   13974 	case WM_T_82573:
   13975 	case WM_T_82574:
   13976 	case WM_T_82583:
   13977 		/*
   13978 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   13979 		 *
   13980 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   13981 		 * some chipset.  The document of 82574 and 82583 says that
   13982 		 * disabling L0s with some specific chipset is sufficient,
   13983 		 * but we follow as of the Intel em driver does.
   13984 		 *
   13985 		 * References:
   13986 		 * Errata 8 of the Specification Update of i82573.
   13987 		 * Errata 20 of the Specification Update of i82574.
   13988 		 * Errata 9 of the Specification Update of i82583.
   13989 		 */
   13990 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   13991 		str = "L0s and L1 are";
   13992 		break;
   13993 	default:
   13994 		return;
   13995 	}
   13996 
   13997 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13998 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   13999 	reg &= ~mask;
   14000 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14001 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14002 
   14003 	/* Print only in wm_attach() */
   14004 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14005 		aprint_verbose_dev(sc->sc_dev,
   14006 		    "ASPM %s disabled to workaround the errata.\n",
   14007 			str);
   14008 }
   14009 
   14010 /* LPLU */
   14011 
   14012 static void
   14013 wm_lplu_d0_disable(struct wm_softc *sc)
   14014 {
   14015 	struct mii_data *mii = &sc->sc_mii;
   14016 	uint32_t reg;
   14017 
   14018 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14019 		device_xname(sc->sc_dev), __func__));
   14020 
   14021 	if (sc->sc_phytype == WMPHY_IFE)
   14022 		return;
   14023 
   14024 	switch (sc->sc_type) {
   14025 	case WM_T_82571:
   14026 	case WM_T_82572:
   14027 	case WM_T_82573:
   14028 	case WM_T_82575:
   14029 	case WM_T_82576:
   14030 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14031 		reg &= ~PMR_D0_LPLU;
   14032 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14033 		break;
   14034 	case WM_T_82580:
   14035 	case WM_T_I350:
   14036 	case WM_T_I210:
   14037 	case WM_T_I211:
   14038 		reg = CSR_READ(sc, WMREG_PHPM);
   14039 		reg &= ~PHPM_D0A_LPLU;
   14040 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14041 		break;
   14042 	case WM_T_82574:
   14043 	case WM_T_82583:
   14044 	case WM_T_ICH8:
   14045 	case WM_T_ICH9:
   14046 	case WM_T_ICH10:
   14047 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14048 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14049 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14050 		CSR_WRITE_FLUSH(sc);
   14051 		break;
   14052 	case WM_T_PCH:
   14053 	case WM_T_PCH2:
   14054 	case WM_T_PCH_LPT:
   14055 	case WM_T_PCH_SPT:
   14056 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14057 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14058 		if (wm_phy_resetisblocked(sc) == false)
   14059 			reg |= HV_OEM_BITS_ANEGNOW;
   14060 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14061 		break;
   14062 	default:
   14063 		break;
   14064 	}
   14065 }
   14066 
   14067 /* EEE */
   14068 
   14069 static void
   14070 wm_set_eee_i350(struct wm_softc *sc)
   14071 {
   14072 	uint32_t ipcnfg, eeer;
   14073 
   14074 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14075 	eeer = CSR_READ(sc, WMREG_EEER);
   14076 
   14077 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14078 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14079 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14080 		    | EEER_LPI_FC);
   14081 	} else {
   14082 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14083 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14084 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14085 		    | EEER_LPI_FC);
   14086 	}
   14087 
   14088 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14089 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14090 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14091 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14092 }
   14093 
   14094 /*
   14095  * Workarounds (mainly PHY related).
   14096  * Basically, PHY's workarounds are in the PHY drivers.
   14097  */
   14098 
   14099 /* Work-around for 82566 Kumeran PCS lock loss */
   14100 static void
   14101 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14102 {
   14103 	struct mii_data *mii = &sc->sc_mii;
   14104 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14105 	int i;
   14106 	int reg;
   14107 
   14108 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14109 		device_xname(sc->sc_dev), __func__));
   14110 
   14111 	/* If the link is not up, do nothing */
   14112 	if ((status & STATUS_LU) == 0)
   14113 		return;
   14114 
   14115 	/* Nothing to do if the link is other than 1Gbps */
   14116 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14117 		return;
   14118 
   14119 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14120 	for (i = 0; i < 10; i++) {
   14121 		/* read twice */
   14122 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14123 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14124 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14125 			goto out;	/* GOOD! */
   14126 
   14127 		/* Reset the PHY */
   14128 		wm_reset_phy(sc);
   14129 		delay(5*1000);
   14130 	}
   14131 
   14132 	/* Disable GigE link negotiation */
   14133 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14134 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14135 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14136 
   14137 	/*
   14138 	 * Call gig speed drop workaround on Gig disable before accessing
   14139 	 * any PHY registers.
   14140 	 */
   14141 	wm_gig_downshift_workaround_ich8lan(sc);
   14142 
   14143 out:
   14144 	return;
   14145 }
   14146 
   14147 /* WOL from S5 stops working */
   14148 static void
   14149 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14150 {
   14151 	uint16_t kmreg;
   14152 
   14153 	/* Only for igp3 */
   14154 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14155 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14156 			return;
   14157 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14158 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14159 			return;
   14160 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14161 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14162 	}
   14163 }
   14164 
   14165 /*
   14166  * Workaround for pch's PHYs
   14167  * XXX should be moved to new PHY driver?
   14168  */
   14169 static void
   14170 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14171 {
   14172 
   14173 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14174 		device_xname(sc->sc_dev), __func__));
   14175 	KASSERT(sc->sc_type == WM_T_PCH);
   14176 
   14177 	if (sc->sc_phytype == WMPHY_82577)
   14178 		wm_set_mdio_slow_mode_hv(sc);
   14179 
   14180 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14181 
   14182 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14183 
   14184 	/* 82578 */
   14185 	if (sc->sc_phytype == WMPHY_82578) {
   14186 		struct mii_softc *child;
   14187 
   14188 		/*
   14189 		 * Return registers to default by doing a soft reset then
   14190 		 * writing 0x3140 to the control register
   14191 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14192 		 */
   14193 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14194 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14195 			PHY_RESET(child);
   14196 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14197 			    0x3140);
   14198 		}
   14199 	}
   14200 
   14201 	/* Select page 0 */
   14202 	sc->phy.acquire(sc);
   14203 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14204 	sc->phy.release(sc);
   14205 
   14206 	/*
   14207 	 * Configure the K1 Si workaround during phy reset assuming there is
   14208 	 * link so that it disables K1 if link is in 1Gbps.
   14209 	 */
   14210 	wm_k1_gig_workaround_hv(sc, 1);
   14211 }
   14212 
   14213 static void
   14214 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14215 {
   14216 
   14217 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14218 		device_xname(sc->sc_dev), __func__));
   14219 	KASSERT(sc->sc_type == WM_T_PCH2);
   14220 
   14221 	wm_set_mdio_slow_mode_hv(sc);
   14222 }
   14223 
   14224 static int
   14225 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14226 {
   14227 	int k1_enable = sc->sc_nvm_k1_enabled;
   14228 
   14229 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14230 		device_xname(sc->sc_dev), __func__));
   14231 
   14232 	if (sc->phy.acquire(sc) != 0)
   14233 		return -1;
   14234 
   14235 	if (link) {
   14236 		k1_enable = 0;
   14237 
   14238 		/* Link stall fix for link up */
   14239 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   14240 	} else {
   14241 		/* Link stall fix for link down */
   14242 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   14243 	}
   14244 
   14245 	wm_configure_k1_ich8lan(sc, k1_enable);
   14246 	sc->phy.release(sc);
   14247 
   14248 	return 0;
   14249 }
   14250 
   14251 static void
   14252 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14253 {
   14254 	uint32_t reg;
   14255 
   14256 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14257 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14258 	    reg | HV_KMRN_MDIO_SLOW);
   14259 }
   14260 
   14261 static void
   14262 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14263 {
   14264 	uint32_t ctrl, ctrl_ext, tmp;
   14265 	uint16_t kmreg;
   14266 	int rv;
   14267 
   14268 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14269 	if (rv != 0)
   14270 		return;
   14271 
   14272 	if (k1_enable)
   14273 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14274 	else
   14275 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14276 
   14277 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14278 	if (rv != 0)
   14279 		return;
   14280 
   14281 	delay(20);
   14282 
   14283 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14284 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14285 
   14286 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14287 	tmp |= CTRL_FRCSPD;
   14288 
   14289 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14290 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14291 	CSR_WRITE_FLUSH(sc);
   14292 	delay(20);
   14293 
   14294 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14295 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14296 	CSR_WRITE_FLUSH(sc);
   14297 	delay(20);
   14298 
   14299 	return;
   14300 }
   14301 
   14302 /* special case - for 82575 - need to do manual init ... */
   14303 static void
   14304 wm_reset_init_script_82575(struct wm_softc *sc)
   14305 {
   14306 	/*
   14307 	 * remark: this is untested code - we have no board without EEPROM
   14308 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14309 	 */
   14310 
   14311 	/* SerDes configuration via SERDESCTRL */
   14312 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14313 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14314 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14315 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14316 
   14317 	/* CCM configuration via CCMCTL register */
   14318 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14319 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14320 
   14321 	/* PCIe lanes configuration */
   14322 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14323 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14324 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14325 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14326 
   14327 	/* PCIe PLL Configuration */
   14328 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14329 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14330 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14331 }
   14332 
   14333 static void
   14334 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14335 {
   14336 	uint32_t reg;
   14337 	uint16_t nvmword;
   14338 	int rv;
   14339 
   14340 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14341 		return;
   14342 
   14343 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14344 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14345 	if (rv != 0) {
   14346 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14347 		    __func__);
   14348 		return;
   14349 	}
   14350 
   14351 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14352 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14353 		reg |= MDICNFG_DEST;
   14354 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14355 		reg |= MDICNFG_COM_MDIO;
   14356 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14357 }
   14358 
   14359 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14360 
   14361 static bool
   14362 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14363 {
   14364 	int i;
   14365 	uint32_t reg;
   14366 	uint16_t id1, id2;
   14367 
   14368 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14369 		device_xname(sc->sc_dev), __func__));
   14370 	id1 = id2 = 0xffff;
   14371 	for (i = 0; i < 2; i++) {
   14372 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14373 		if (MII_INVALIDID(id1))
   14374 			continue;
   14375 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14376 		if (MII_INVALIDID(id2))
   14377 			continue;
   14378 		break;
   14379 	}
   14380 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14381 		goto out;
   14382 	}
   14383 
   14384 	if (sc->sc_type < WM_T_PCH_LPT) {
   14385 		sc->phy.release(sc);
   14386 		wm_set_mdio_slow_mode_hv(sc);
   14387 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14388 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14389 		sc->phy.acquire(sc);
   14390 	}
   14391 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14392 		printf("XXX return with false\n");
   14393 		return false;
   14394 	}
   14395 out:
   14396 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   14397 		/* Only unforce SMBus if ME is not active */
   14398 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14399 			/* Unforce SMBus mode in PHY */
   14400 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14401 			    CV_SMB_CTRL);
   14402 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14403 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14404 			    CV_SMB_CTRL, reg);
   14405 
   14406 			/* Unforce SMBus mode in MAC */
   14407 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14408 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14409 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14410 		}
   14411 	}
   14412 	return true;
   14413 }
   14414 
   14415 static void
   14416 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14417 {
   14418 	uint32_t reg;
   14419 	int i;
   14420 
   14421 	/* Set PHY Config Counter to 50msec */
   14422 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14423 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14424 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14425 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14426 
   14427 	/* Toggle LANPHYPC */
   14428 	reg = CSR_READ(sc, WMREG_CTRL);
   14429 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14430 	reg &= ~CTRL_LANPHYPC_VALUE;
   14431 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14432 	CSR_WRITE_FLUSH(sc);
   14433 	delay(1000);
   14434 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14435 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14436 	CSR_WRITE_FLUSH(sc);
   14437 
   14438 	if (sc->sc_type < WM_T_PCH_LPT)
   14439 		delay(50 * 1000);
   14440 	else {
   14441 		i = 20;
   14442 
   14443 		do {
   14444 			delay(5 * 1000);
   14445 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14446 		    && i--);
   14447 
   14448 		delay(30 * 1000);
   14449 	}
   14450 }
   14451 
   14452 static int
   14453 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14454 {
   14455 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14456 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14457 	uint32_t rxa;
   14458 	uint16_t scale = 0, lat_enc = 0;
   14459 	int32_t obff_hwm = 0;
   14460 	int64_t lat_ns, value;
   14461 
   14462 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14463 		device_xname(sc->sc_dev), __func__));
   14464 
   14465 	if (link) {
   14466 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14467 		uint32_t status;
   14468 		uint16_t speed;
   14469 		pcireg_t preg;
   14470 
   14471 		status = CSR_READ(sc, WMREG_STATUS);
   14472 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14473 		case STATUS_SPEED_10:
   14474 			speed = 10;
   14475 			break;
   14476 		case STATUS_SPEED_100:
   14477 			speed = 100;
   14478 			break;
   14479 		case STATUS_SPEED_1000:
   14480 			speed = 1000;
   14481 			break;
   14482 		default:
   14483 			device_printf(sc->sc_dev, "Unknown speed "
   14484 			    "(status = %08x)\n", status);
   14485 			return -1;
   14486 		}
   14487 
   14488 		/* Rx Packet Buffer Allocation size (KB) */
   14489 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14490 
   14491 		/*
   14492 		 * Determine the maximum latency tolerated by the device.
   14493 		 *
   14494 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14495 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14496 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14497 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14498 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14499 		 */
   14500 		lat_ns = ((int64_t)rxa * 1024 -
   14501 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14502 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14503 		if (lat_ns < 0)
   14504 			lat_ns = 0;
   14505 		else
   14506 			lat_ns /= speed;
   14507 		value = lat_ns;
   14508 
   14509 		while (value > LTRV_VALUE) {
   14510 			scale ++;
   14511 			value = howmany(value, __BIT(5));
   14512 		}
   14513 		if (scale > LTRV_SCALE_MAX) {
   14514 			printf("%s: Invalid LTR latency scale %d\n",
   14515 			    device_xname(sc->sc_dev), scale);
   14516 			return -1;
   14517 		}
   14518 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14519 
   14520 		/* Determine the maximum latency tolerated by the platform */
   14521 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14522 		    WM_PCI_LTR_CAP_LPT);
   14523 		max_snoop = preg & 0xffff;
   14524 		max_nosnoop = preg >> 16;
   14525 
   14526 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14527 
   14528 		if (lat_enc > max_ltr_enc) {
   14529 			lat_enc = max_ltr_enc;
   14530 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14531 			    * PCI_LTR_SCALETONS(
   14532 				    __SHIFTOUT(lat_enc,
   14533 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14534 		}
   14535 
   14536 		if (lat_ns) {
   14537 			lat_ns *= speed * 1000;
   14538 			lat_ns /= 8;
   14539 			lat_ns /= 1000000000;
   14540 			obff_hwm = (int32_t)(rxa - lat_ns);
   14541 		}
   14542 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14543 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14544 			    "(rxa = %d, lat_ns = %d)\n",
   14545 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14546 			return -1;
   14547 		}
   14548 	}
   14549 	/* Snoop and No-Snoop latencies the same */
   14550 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14551 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14552 
   14553 	/* Set OBFF high water mark */
   14554 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14555 	reg |= obff_hwm;
   14556 	CSR_WRITE(sc, WMREG_SVT, reg);
   14557 
   14558 	/* Enable OBFF */
   14559 	reg = CSR_READ(sc, WMREG_SVCR);
   14560 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14561 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14562 
   14563 	return 0;
   14564 }
   14565 
   14566 /*
   14567  * I210 Errata 25 and I211 Errata 10
   14568  * Slow System Clock.
   14569  */
   14570 static void
   14571 wm_pll_workaround_i210(struct wm_softc *sc)
   14572 {
   14573 	uint32_t mdicnfg, wuc;
   14574 	uint32_t reg;
   14575 	pcireg_t pcireg;
   14576 	uint32_t pmreg;
   14577 	uint16_t nvmword, tmp_nvmword;
   14578 	int phyval;
   14579 	bool wa_done = false;
   14580 	int i;
   14581 
   14582 	/* Save WUC and MDICNFG registers */
   14583 	wuc = CSR_READ(sc, WMREG_WUC);
   14584 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14585 
   14586 	reg = mdicnfg & ~MDICNFG_DEST;
   14587 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14588 
   14589 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14590 		nvmword = INVM_DEFAULT_AL;
   14591 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14592 
   14593 	/* Get Power Management cap offset */
   14594 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14595 		&pmreg, NULL) == 0)
   14596 		return;
   14597 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14598 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14599 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14600 
   14601 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14602 			break; /* OK */
   14603 		}
   14604 
   14605 		wa_done = true;
   14606 		/* Directly reset the internal PHY */
   14607 		reg = CSR_READ(sc, WMREG_CTRL);
   14608 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14609 
   14610 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14611 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14612 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14613 
   14614 		CSR_WRITE(sc, WMREG_WUC, 0);
   14615 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14616 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14617 
   14618 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14619 		    pmreg + PCI_PMCSR);
   14620 		pcireg |= PCI_PMCSR_STATE_D3;
   14621 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14622 		    pmreg + PCI_PMCSR, pcireg);
   14623 		delay(1000);
   14624 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14625 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14626 		    pmreg + PCI_PMCSR, pcireg);
   14627 
   14628 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14629 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14630 
   14631 		/* Restore WUC register */
   14632 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14633 	}
   14634 
   14635 	/* Restore MDICNFG setting */
   14636 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14637 	if (wa_done)
   14638 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14639 }
   14640 
   14641 static void
   14642 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14643 {
   14644 	uint32_t reg;
   14645 
   14646 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14647 		device_xname(sc->sc_dev), __func__));
   14648 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14649 
   14650 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14651 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14652 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14653 
   14654 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14655 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14656 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14657 }
   14658