Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.600
      1 /*	$NetBSD: if_wm.c,v 1.600 2018/11/20 04:04:42 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.600 2018/11/20 04:04:42 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_flowflags;		/* 802.3x flow control flags */
    518 	int sc_align_tweak;
    519 
    520 	void *sc_ihs[WM_MAX_NINTR];	/*
    521 					 * interrupt cookie.
    522 					 * - legacy and msi use sc_ihs[0] only
    523 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    524 					 */
    525 	pci_intr_handle_t *sc_intrs;	/*
    526 					 * legacy and msi use sc_intrs[0] only
    527 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    528 					 */
    529 	int sc_nintrs;			/* number of interrupts */
    530 
    531 	int sc_link_intr_idx;		/* index of MSI-X tables */
    532 
    533 	callout_t sc_tick_ch;		/* tick callout */
    534 	bool sc_core_stopping;
    535 
    536 	int sc_nvm_ver_major;
    537 	int sc_nvm_ver_minor;
    538 	int sc_nvm_ver_build;
    539 	int sc_nvm_addrbits;		/* NVM address bits */
    540 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    541 	int sc_ich8_flash_base;
    542 	int sc_ich8_flash_bank_size;
    543 	int sc_nvm_k1_enabled;
    544 
    545 	int sc_nqueues;
    546 	struct wm_queue *sc_queue;
    547 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    548 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    549 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    550 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    551 
    552 	int sc_affinity_offset;
    553 
    554 #ifdef WM_EVENT_COUNTERS
    555 	/* Event counters. */
    556 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    557 
    558 	/* WM_T_82542_2_1 only */
    559 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    560 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    561 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    564 #endif /* WM_EVENT_COUNTERS */
    565 
    566 	/* This variable are used only on the 82547. */
    567 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    568 
    569 	uint32_t sc_ctrl;		/* prototype CTRL register */
    570 #if 0
    571 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    572 #endif
    573 	uint32_t sc_icr;		/* prototype interrupt bits */
    574 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    575 	uint32_t sc_tctl;		/* prototype TCTL register */
    576 	uint32_t sc_rctl;		/* prototype RCTL register */
    577 	uint32_t sc_txcw;		/* prototype TXCW register */
    578 	uint32_t sc_tipg;		/* prototype TIPG register */
    579 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    580 	uint32_t sc_pba;		/* prototype PBA register */
    581 
    582 	int sc_tbi_linkup;		/* TBI link status */
    583 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    584 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    585 
    586 	int sc_mchash_type;		/* multicast filter offset */
    587 
    588 	krndsource_t rnd_source;	/* random source */
    589 
    590 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    591 
    592 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    593 	kmutex_t *sc_ich_phymtx;	/*
    594 					 * 82574/82583/ICH/PCH specific PHY
    595 					 * mutex. For 82574/82583, the mutex
    596 					 * is used for both PHY and NVM.
    597 					 */
    598 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    599 
    600 	struct wm_phyop phy;
    601 	struct wm_nvmop nvm;
    602 };
    603 
    604 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    605 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    606 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    607 
    608 #define	WM_RXCHAIN_RESET(rxq)						\
    609 do {									\
    610 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    611 	*(rxq)->rxq_tailp = NULL;					\
    612 	(rxq)->rxq_len = 0;						\
    613 } while (/*CONSTCOND*/0)
    614 
    615 #define	WM_RXCHAIN_LINK(rxq, m)						\
    616 do {									\
    617 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    618 	(rxq)->rxq_tailp = &(m)->m_next;				\
    619 } while (/*CONSTCOND*/0)
    620 
    621 #ifdef WM_EVENT_COUNTERS
    622 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    623 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    624 
    625 #define WM_Q_EVCNT_INCR(qname, evname)			\
    626 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    627 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    628 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    629 #else /* !WM_EVENT_COUNTERS */
    630 #define	WM_EVCNT_INCR(ev)	/* nothing */
    631 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    632 
    633 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    634 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    635 #endif /* !WM_EVENT_COUNTERS */
    636 
    637 #define	CSR_READ(sc, reg)						\
    638 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    639 #define	CSR_WRITE(sc, reg, val)						\
    640 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    641 #define	CSR_WRITE_FLUSH(sc)						\
    642 	(void) CSR_READ((sc), WMREG_STATUS)
    643 
    644 #define ICH8_FLASH_READ32(sc, reg)					\
    645 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    646 	    (reg) + sc->sc_flashreg_offset)
    647 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    648 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    649 	    (reg) + sc->sc_flashreg_offset, (data))
    650 
    651 #define ICH8_FLASH_READ16(sc, reg)					\
    652 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    653 	    (reg) + sc->sc_flashreg_offset)
    654 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    655 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    656 	    (reg) + sc->sc_flashreg_offset, (data))
    657 
    658 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    659 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    660 
    661 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    662 #define	WM_CDTXADDR_HI(txq, x)						\
    663 	(sizeof(bus_addr_t) == 8 ?					\
    664 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    665 
    666 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    667 #define	WM_CDRXADDR_HI(rxq, x)						\
    668 	(sizeof(bus_addr_t) == 8 ?					\
    669 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    670 
    671 /*
    672  * Register read/write functions.
    673  * Other than CSR_{READ|WRITE}().
    674  */
    675 #if 0
    676 static inline uint32_t wm_io_read(struct wm_softc *, int);
    677 #endif
    678 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    679 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    680     uint32_t, uint32_t);
    681 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    682 
    683 /*
    684  * Descriptor sync/init functions.
    685  */
    686 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    687 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    688 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    689 
    690 /*
    691  * Device driver interface functions and commonly used functions.
    692  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    693  */
    694 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    695 static int	wm_match(device_t, cfdata_t, void *);
    696 static void	wm_attach(device_t, device_t, void *);
    697 static int	wm_detach(device_t, int);
    698 static bool	wm_suspend(device_t, const pmf_qual_t *);
    699 static bool	wm_resume(device_t, const pmf_qual_t *);
    700 static void	wm_watchdog(struct ifnet *);
    701 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    702     uint16_t *);
    703 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    704     uint16_t *);
    705 static void	wm_tick(void *);
    706 static int	wm_ifflags_cb(struct ethercom *);
    707 static int	wm_ioctl(struct ifnet *, u_long, void *);
    708 /* MAC address related */
    709 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    710 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    711 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    712 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    713 static void	wm_set_filter(struct wm_softc *);
    714 /* Reset and init related */
    715 static void	wm_set_vlan(struct wm_softc *);
    716 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    717 static void	wm_get_auto_rd_done(struct wm_softc *);
    718 static void	wm_lan_init_done(struct wm_softc *);
    719 static void	wm_get_cfg_done(struct wm_softc *);
    720 static void	wm_phy_post_reset(struct wm_softc *);
    721 static int	wm_write_smbus_addr(struct wm_softc *);
    722 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    723 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    724 static void	wm_initialize_hardware_bits(struct wm_softc *);
    725 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    726 static void	wm_reset_phy(struct wm_softc *);
    727 static void	wm_flush_desc_rings(struct wm_softc *);
    728 static void	wm_reset(struct wm_softc *);
    729 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    730 static void	wm_rxdrain(struct wm_rxqueue *);
    731 static void	wm_init_rss(struct wm_softc *);
    732 static void	wm_adjust_qnum(struct wm_softc *, int);
    733 static inline bool	wm_is_using_msix(struct wm_softc *);
    734 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    735 static int	wm_softint_establish(struct wm_softc *, int, int);
    736 static int	wm_setup_legacy(struct wm_softc *);
    737 static int	wm_setup_msix(struct wm_softc *);
    738 static int	wm_init(struct ifnet *);
    739 static int	wm_init_locked(struct ifnet *);
    740 static void	wm_unset_stopping_flags(struct wm_softc *);
    741 static void	wm_set_stopping_flags(struct wm_softc *);
    742 static void	wm_stop(struct ifnet *, int);
    743 static void	wm_stop_locked(struct ifnet *, int);
    744 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    745 static void	wm_82547_txfifo_stall(void *);
    746 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    747 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    748 /* DMA related */
    749 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    752 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    753     struct wm_txqueue *);
    754 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    755 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    756 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    757     struct wm_rxqueue *);
    758 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    759 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    761 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    762 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    763 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    764 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    765     struct wm_txqueue *);
    766 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    767     struct wm_rxqueue *);
    768 static int	wm_alloc_txrx_queues(struct wm_softc *);
    769 static void	wm_free_txrx_queues(struct wm_softc *);
    770 static int	wm_init_txrx_queues(struct wm_softc *);
    771 /* Start */
    772 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    773     struct wm_txsoft *, uint32_t *, uint8_t *);
    774 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    775 static void	wm_start(struct ifnet *);
    776 static void	wm_start_locked(struct ifnet *);
    777 static int	wm_transmit(struct ifnet *, struct mbuf *);
    778 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    779 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    780     bool);
    781 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    782     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    783 static void	wm_nq_start(struct ifnet *);
    784 static void	wm_nq_start_locked(struct ifnet *);
    785 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    786 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    787 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    788     bool);
    789 static void	wm_deferred_start_locked(struct wm_txqueue *);
    790 static void	wm_handle_queue(void *);
    791 /* Interrupt */
    792 static bool	wm_txeof(struct wm_txqueue *, u_int);
    793 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    794 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    795 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    796 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    797 static void	wm_linkintr(struct wm_softc *, uint32_t);
    798 static int	wm_intr_legacy(void *);
    799 static inline void	wm_txrxintr_disable(struct wm_queue *);
    800 static inline void	wm_txrxintr_enable(struct wm_queue *);
    801 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    802 static int	wm_txrxintr_msix(void *);
    803 static int	wm_linkintr_msix(void *);
    804 
    805 /*
    806  * Media related.
    807  * GMII, SGMII, TBI, SERDES and SFP.
    808  */
    809 /* Common */
    810 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    811 /* GMII related */
    812 static void	wm_gmii_reset(struct wm_softc *);
    813 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    814 static int	wm_get_phy_id_82575(struct wm_softc *);
    815 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    816 static int	wm_gmii_mediachange(struct ifnet *);
    817 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    818 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    819 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    820 static int	wm_gmii_i82543_readreg(device_t, int, int);
    821 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    822 static int	wm_gmii_mdic_readreg(device_t, int, int);
    823 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    824 static int	wm_gmii_i82544_readreg(device_t, int, int);
    825 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    826 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    827 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    828 static int	wm_gmii_i80003_readreg(device_t, int, int);
    829 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    830 static int	wm_gmii_bm_readreg(device_t, int, int);
    831 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    832 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    833 static int	wm_gmii_hv_readreg(device_t, int, int);
    834 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    835 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    836 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    837 static int	wm_gmii_82580_readreg(device_t, int, int);
    838 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    839 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    840 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    841 static void	wm_gmii_statchg(struct ifnet *);
    842 /*
    843  * kumeran related (80003, ICH* and PCH*).
    844  * These functions are not for accessing MII registers but for accessing
    845  * kumeran specific registers.
    846  */
    847 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    848 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    849 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    850 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    851 /* SGMII */
    852 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    853 static int	wm_sgmii_readreg(device_t, int, int);
    854 static void	wm_sgmii_writereg(device_t, int, int, int);
    855 /* TBI related */
    856 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    857 static void	wm_tbi_mediainit(struct wm_softc *);
    858 static int	wm_tbi_mediachange(struct ifnet *);
    859 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    860 static int	wm_check_for_link(struct wm_softc *);
    861 static void	wm_tbi_tick(struct wm_softc *);
    862 /* SERDES related */
    863 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    864 static int	wm_serdes_mediachange(struct ifnet *);
    865 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    866 static void	wm_serdes_tick(struct wm_softc *);
    867 /* SFP related */
    868 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    869 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    870 
    871 /*
    872  * NVM related.
    873  * Microwire, SPI (w/wo EERD) and Flash.
    874  */
    875 /* Misc functions */
    876 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    877 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    878 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    879 /* Microwire */
    880 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    881 /* SPI */
    882 static int	wm_nvm_ready_spi(struct wm_softc *);
    883 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    884 /* Using with EERD */
    885 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    886 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    887 /* Flash */
    888 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    889     unsigned int *);
    890 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    891 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    892 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    893     uint32_t *);
    894 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    895 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    896 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    897 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    898 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    899 /* iNVM */
    900 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    901 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    902 /* Lock, detecting NVM type, validate checksum and read */
    903 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    904 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    905 static int	wm_nvm_validate_checksum(struct wm_softc *);
    906 static void	wm_nvm_version_invm(struct wm_softc *);
    907 static void	wm_nvm_version(struct wm_softc *);
    908 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    909 
    910 /*
    911  * Hardware semaphores.
    912  * Very complexed...
    913  */
    914 static int	wm_get_null(struct wm_softc *);
    915 static void	wm_put_null(struct wm_softc *);
    916 static int	wm_get_eecd(struct wm_softc *);
    917 static void	wm_put_eecd(struct wm_softc *);
    918 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    919 static void	wm_put_swsm_semaphore(struct wm_softc *);
    920 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    921 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    922 static int	wm_get_nvm_80003(struct wm_softc *);
    923 static void	wm_put_nvm_80003(struct wm_softc *);
    924 static int	wm_get_nvm_82571(struct wm_softc *);
    925 static void	wm_put_nvm_82571(struct wm_softc *);
    926 static int	wm_get_phy_82575(struct wm_softc *);
    927 static void	wm_put_phy_82575(struct wm_softc *);
    928 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    929 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    930 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    931 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    932 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    933 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    934 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    935 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    936 
    937 /*
    938  * Management mode and power management related subroutines.
    939  * BMC, AMT, suspend/resume and EEE.
    940  */
    941 #if 0
    942 static int	wm_check_mng_mode(struct wm_softc *);
    943 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    944 static int	wm_check_mng_mode_82574(struct wm_softc *);
    945 static int	wm_check_mng_mode_generic(struct wm_softc *);
    946 #endif
    947 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    948 static bool	wm_phy_resetisblocked(struct wm_softc *);
    949 static void	wm_get_hw_control(struct wm_softc *);
    950 static void	wm_release_hw_control(struct wm_softc *);
    951 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    952 static void	wm_smbustopci(struct wm_softc *);
    953 static void	wm_init_manageability(struct wm_softc *);
    954 static void	wm_release_manageability(struct wm_softc *);
    955 static void	wm_get_wakeup(struct wm_softc *);
    956 static int	wm_ulp_disable(struct wm_softc *);
    957 static void	wm_enable_phy_wakeup(struct wm_softc *);
    958 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    959 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    960 static void	wm_enable_wakeup(struct wm_softc *);
    961 static void	wm_disable_aspm(struct wm_softc *);
    962 /* LPLU (Low Power Link Up) */
    963 static void	wm_lplu_d0_disable(struct wm_softc *);
    964 /* EEE */
    965 static void	wm_set_eee_i350(struct wm_softc *);
    966 
    967 /*
    968  * Workarounds (mainly PHY related).
    969  * Basically, PHY's workarounds are in the PHY drivers.
    970  */
    971 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    972 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    973 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    974 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    975 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    976 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    977 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    978 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    979 static void	wm_reset_init_script_82575(struct wm_softc *);
    980 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    981 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    982 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    983 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    984 static void	wm_pll_workaround_i210(struct wm_softc *);
    985 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    986 
    987 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    988     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    989 
    990 /*
    991  * Devices supported by this driver.
    992  */
    993 static const struct wm_product {
    994 	pci_vendor_id_t		wmp_vendor;
    995 	pci_product_id_t	wmp_product;
    996 	const char		*wmp_name;
    997 	wm_chip_type		wmp_type;
    998 	uint32_t		wmp_flags;
    999 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1000 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1001 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1002 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1003 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1004 } wm_products[] = {
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1006 	  "Intel i82542 1000BASE-X Ethernet",
   1007 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1008 
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1010 	  "Intel i82543GC 1000BASE-X Ethernet",
   1011 	  WM_T_82543,		WMP_F_FIBER },
   1012 
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1014 	  "Intel i82543GC 1000BASE-T Ethernet",
   1015 	  WM_T_82543,		WMP_F_COPPER },
   1016 
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1018 	  "Intel i82544EI 1000BASE-T Ethernet",
   1019 	  WM_T_82544,		WMP_F_COPPER },
   1020 
   1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1022 	  "Intel i82544EI 1000BASE-X Ethernet",
   1023 	  WM_T_82544,		WMP_F_FIBER },
   1024 
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1026 	  "Intel i82544GC 1000BASE-T Ethernet",
   1027 	  WM_T_82544,		WMP_F_COPPER },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1030 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1031 	  WM_T_82544,		WMP_F_COPPER },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1034 	  "Intel i82540EM 1000BASE-T Ethernet",
   1035 	  WM_T_82540,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1038 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1039 	  WM_T_82540,		WMP_F_COPPER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1042 	  "Intel i82540EP 1000BASE-T Ethernet",
   1043 	  WM_T_82540,		WMP_F_COPPER },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1046 	  "Intel i82540EP 1000BASE-T Ethernet",
   1047 	  WM_T_82540,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1050 	  "Intel i82540EP 1000BASE-T Ethernet",
   1051 	  WM_T_82540,		WMP_F_COPPER },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1054 	  "Intel i82545EM 1000BASE-T Ethernet",
   1055 	  WM_T_82545,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1058 	  "Intel i82545GM 1000BASE-T Ethernet",
   1059 	  WM_T_82545_3,		WMP_F_COPPER },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1062 	  "Intel i82545GM 1000BASE-X Ethernet",
   1063 	  WM_T_82545_3,		WMP_F_FIBER },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1066 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1067 	  WM_T_82545_3,		WMP_F_SERDES },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1070 	  "Intel i82546EB 1000BASE-T Ethernet",
   1071 	  WM_T_82546,		WMP_F_COPPER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1074 	  "Intel i82546EB 1000BASE-T Ethernet",
   1075 	  WM_T_82546,		WMP_F_COPPER },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1078 	  "Intel i82545EM 1000BASE-X Ethernet",
   1079 	  WM_T_82545,		WMP_F_FIBER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1082 	  "Intel i82546EB 1000BASE-X Ethernet",
   1083 	  WM_T_82546,		WMP_F_FIBER },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1086 	  "Intel i82546GB 1000BASE-T Ethernet",
   1087 	  WM_T_82546_3,		WMP_F_COPPER },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1090 	  "Intel i82546GB 1000BASE-X Ethernet",
   1091 	  WM_T_82546_3,		WMP_F_FIBER },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1094 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1095 	  WM_T_82546_3,		WMP_F_SERDES },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1098 	  "i82546GB quad-port Gigabit Ethernet",
   1099 	  WM_T_82546_3,		WMP_F_COPPER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1102 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1103 	  WM_T_82546_3,		WMP_F_COPPER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1106 	  "Intel PRO/1000MT (82546GB)",
   1107 	  WM_T_82546_3,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1110 	  "Intel i82541EI 1000BASE-T Ethernet",
   1111 	  WM_T_82541,		WMP_F_COPPER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1114 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1115 	  WM_T_82541,		WMP_F_COPPER },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1118 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1119 	  WM_T_82541,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1122 	  "Intel i82541ER 1000BASE-T Ethernet",
   1123 	  WM_T_82541_2,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1126 	  "Intel i82541GI 1000BASE-T Ethernet",
   1127 	  WM_T_82541_2,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1130 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1131 	  WM_T_82541_2,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1134 	  "Intel i82541PI 1000BASE-T Ethernet",
   1135 	  WM_T_82541_2,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1138 	  "Intel i82547EI 1000BASE-T Ethernet",
   1139 	  WM_T_82547,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1142 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1143 	  WM_T_82547,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1146 	  "Intel i82547GI 1000BASE-T Ethernet",
   1147 	  WM_T_82547_2,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1150 	  "Intel PRO/1000 PT (82571EB)",
   1151 	  WM_T_82571,		WMP_F_COPPER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1154 	  "Intel PRO/1000 PF (82571EB)",
   1155 	  WM_T_82571,		WMP_F_FIBER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1158 	  "Intel PRO/1000 PB (82571EB)",
   1159 	  WM_T_82571,		WMP_F_SERDES },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1162 	  "Intel PRO/1000 QT (82571EB)",
   1163 	  WM_T_82571,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1166 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1167 	  WM_T_82571,		WMP_F_COPPER, },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1170 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1171 	  WM_T_82571,		WMP_F_COPPER, },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1174 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1175 	  WM_T_82571,		WMP_F_SERDES, },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1178 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1179 	  WM_T_82571,		WMP_F_SERDES, },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1182 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1183 	  WM_T_82571,		WMP_F_FIBER, },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1186 	  "Intel i82572EI 1000baseT Ethernet",
   1187 	  WM_T_82572,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1190 	  "Intel i82572EI 1000baseX Ethernet",
   1191 	  WM_T_82572,		WMP_F_FIBER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1194 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1195 	  WM_T_82572,		WMP_F_SERDES },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1198 	  "Intel i82572EI 1000baseT Ethernet",
   1199 	  WM_T_82572,		WMP_F_COPPER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1202 	  "Intel i82573E",
   1203 	  WM_T_82573,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1206 	  "Intel i82573E IAMT",
   1207 	  WM_T_82573,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1210 	  "Intel i82573L Gigabit Ethernet",
   1211 	  WM_T_82573,		WMP_F_COPPER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1214 	  "Intel i82574L",
   1215 	  WM_T_82574,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1218 	  "Intel i82574L",
   1219 	  WM_T_82574,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1222 	  "Intel i82583V",
   1223 	  WM_T_82583,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1226 	  "i80003 dual 1000baseT Ethernet",
   1227 	  WM_T_80003,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1230 	  "i80003 dual 1000baseX Ethernet",
   1231 	  WM_T_80003,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1234 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1235 	  WM_T_80003,		WMP_F_SERDES },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1238 	  "Intel i80003 1000baseT Ethernet",
   1239 	  WM_T_80003,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1242 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1243 	  WM_T_80003,		WMP_F_SERDES },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1246 	  "Intel i82801H (M_AMT) LAN Controller",
   1247 	  WM_T_ICH8,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1249 	  "Intel i82801H (AMT) LAN Controller",
   1250 	  WM_T_ICH8,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1252 	  "Intel i82801H LAN Controller",
   1253 	  WM_T_ICH8,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1255 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1256 	  WM_T_ICH8,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1258 	  "Intel i82801H (M) LAN Controller",
   1259 	  WM_T_ICH8,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1261 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1262 	  WM_T_ICH8,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1264 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1265 	  WM_T_ICH8,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1267 	  "82567V-3 LAN Controller",
   1268 	  WM_T_ICH8,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1270 	  "82801I (AMT) LAN Controller",
   1271 	  WM_T_ICH9,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1273 	  "82801I 10/100 LAN Controller",
   1274 	  WM_T_ICH9,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1276 	  "82801I (G) 10/100 LAN Controller",
   1277 	  WM_T_ICH9,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1279 	  "82801I (GT) 10/100 LAN Controller",
   1280 	  WM_T_ICH9,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1282 	  "82801I (C) LAN Controller",
   1283 	  WM_T_ICH9,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1285 	  "82801I mobile LAN Controller",
   1286 	  WM_T_ICH9,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1288 	  "82801I mobile (V) LAN Controller",
   1289 	  WM_T_ICH9,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1291 	  "82801I mobile (AMT) LAN Controller",
   1292 	  WM_T_ICH9,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1294 	  "82567LM-4 LAN Controller",
   1295 	  WM_T_ICH9,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1297 	  "82567LM-2 LAN Controller",
   1298 	  WM_T_ICH10,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1300 	  "82567LF-2 LAN Controller",
   1301 	  WM_T_ICH10,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1303 	  "82567LM-3 LAN Controller",
   1304 	  WM_T_ICH10,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1306 	  "82567LF-3 LAN Controller",
   1307 	  WM_T_ICH10,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1309 	  "82567V-2 LAN Controller",
   1310 	  WM_T_ICH10,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1312 	  "82567V-3? LAN Controller",
   1313 	  WM_T_ICH10,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1315 	  "HANKSVILLE LAN Controller",
   1316 	  WM_T_ICH10,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1318 	  "PCH LAN (82577LM) Controller",
   1319 	  WM_T_PCH,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1321 	  "PCH LAN (82577LC) Controller",
   1322 	  WM_T_PCH,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1324 	  "PCH LAN (82578DM) Controller",
   1325 	  WM_T_PCH,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1327 	  "PCH LAN (82578DC) Controller",
   1328 	  WM_T_PCH,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1330 	  "PCH2 LAN (82579LM) Controller",
   1331 	  WM_T_PCH2,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1333 	  "PCH2 LAN (82579V) Controller",
   1334 	  WM_T_PCH2,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1336 	  "82575EB dual-1000baseT Ethernet",
   1337 	  WM_T_82575,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1339 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1340 	  WM_T_82575,		WMP_F_SERDES },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1342 	  "82575GB quad-1000baseT Ethernet",
   1343 	  WM_T_82575,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1345 	  "82575GB quad-1000baseT Ethernet (PM)",
   1346 	  WM_T_82575,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1348 	  "82576 1000BaseT Ethernet",
   1349 	  WM_T_82576,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1351 	  "82576 1000BaseX Ethernet",
   1352 	  WM_T_82576,		WMP_F_FIBER },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1355 	  "82576 gigabit Ethernet (SERDES)",
   1356 	  WM_T_82576,		WMP_F_SERDES },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1359 	  "82576 quad-1000BaseT Ethernet",
   1360 	  WM_T_82576,		WMP_F_COPPER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1363 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1364 	  WM_T_82576,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1367 	  "82576 gigabit Ethernet",
   1368 	  WM_T_82576,		WMP_F_COPPER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1371 	  "82576 gigabit Ethernet (SERDES)",
   1372 	  WM_T_82576,		WMP_F_SERDES },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1374 	  "82576 quad-gigabit Ethernet (SERDES)",
   1375 	  WM_T_82576,		WMP_F_SERDES },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1378 	  "82580 1000BaseT Ethernet",
   1379 	  WM_T_82580,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1381 	  "82580 1000BaseX Ethernet",
   1382 	  WM_T_82580,		WMP_F_FIBER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1385 	  "82580 1000BaseT Ethernet (SERDES)",
   1386 	  WM_T_82580,		WMP_F_SERDES },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1389 	  "82580 gigabit Ethernet (SGMII)",
   1390 	  WM_T_82580,		WMP_F_COPPER },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1392 	  "82580 dual-1000BaseT Ethernet",
   1393 	  WM_T_82580,		WMP_F_COPPER },
   1394 
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1396 	  "82580 quad-1000BaseX Ethernet",
   1397 	  WM_T_82580,		WMP_F_FIBER },
   1398 
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1400 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1401 	  WM_T_82580,		WMP_F_COPPER },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1404 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1405 	  WM_T_82580,		WMP_F_SERDES },
   1406 
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1408 	  "DH89XXCC 1000BASE-KX Ethernet",
   1409 	  WM_T_82580,		WMP_F_SERDES },
   1410 
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1412 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1413 	  WM_T_82580,		WMP_F_SERDES },
   1414 
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1416 	  "I350 Gigabit Network Connection",
   1417 	  WM_T_I350,		WMP_F_COPPER },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1420 	  "I350 Gigabit Fiber Network Connection",
   1421 	  WM_T_I350,		WMP_F_FIBER },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1424 	  "I350 Gigabit Backplane Connection",
   1425 	  WM_T_I350,		WMP_F_SERDES },
   1426 
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1428 	  "I350 Quad Port Gigabit Ethernet",
   1429 	  WM_T_I350,		WMP_F_SERDES },
   1430 
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1432 	  "I350 Gigabit Connection",
   1433 	  WM_T_I350,		WMP_F_COPPER },
   1434 
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1436 	  "I354 Gigabit Ethernet (KX)",
   1437 	  WM_T_I354,		WMP_F_SERDES },
   1438 
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1440 	  "I354 Gigabit Ethernet (SGMII)",
   1441 	  WM_T_I354,		WMP_F_COPPER },
   1442 
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1444 	  "I354 Gigabit Ethernet (2.5G)",
   1445 	  WM_T_I354,		WMP_F_COPPER },
   1446 
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1448 	  "I210-T1 Ethernet Server Adapter",
   1449 	  WM_T_I210,		WMP_F_COPPER },
   1450 
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1452 	  "I210 Ethernet (Copper OEM)",
   1453 	  WM_T_I210,		WMP_F_COPPER },
   1454 
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1456 	  "I210 Ethernet (Copper IT)",
   1457 	  WM_T_I210,		WMP_F_COPPER },
   1458 
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1460 	  "I210 Ethernet (FLASH less)",
   1461 	  WM_T_I210,		WMP_F_COPPER },
   1462 
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1464 	  "I210 Gigabit Ethernet (Fiber)",
   1465 	  WM_T_I210,		WMP_F_FIBER },
   1466 
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1468 	  "I210 Gigabit Ethernet (SERDES)",
   1469 	  WM_T_I210,		WMP_F_SERDES },
   1470 
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1472 	  "I210 Gigabit Ethernet (FLASH less)",
   1473 	  WM_T_I210,		WMP_F_SERDES },
   1474 
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1476 	  "I210 Gigabit Ethernet (SGMII)",
   1477 	  WM_T_I210,		WMP_F_COPPER },
   1478 
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1480 	  "I211 Ethernet (COPPER)",
   1481 	  WM_T_I211,		WMP_F_COPPER },
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1483 	  "I217 V Ethernet Connection",
   1484 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1486 	  "I217 LM Ethernet Connection",
   1487 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1489 	  "I218 V Ethernet Connection",
   1490 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1492 	  "I218 V Ethernet Connection",
   1493 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1495 	  "I218 V Ethernet Connection",
   1496 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1498 	  "I218 LM Ethernet Connection",
   1499 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1501 	  "I218 LM Ethernet Connection",
   1502 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1504 	  "I218 LM Ethernet Connection",
   1505 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1507 	  "I219 V Ethernet Connection",
   1508 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1510 	  "I219 V Ethernet Connection",
   1511 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1513 	  "I219 V Ethernet Connection",
   1514 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1516 	  "I219 V Ethernet Connection",
   1517 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1519 	  "I219 LM Ethernet Connection",
   1520 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1522 	  "I219 LM Ethernet Connection",
   1523 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1525 	  "I219 LM Ethernet Connection",
   1526 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1528 	  "I219 LM Ethernet Connection",
   1529 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1531 	  "I219 LM Ethernet Connection",
   1532 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1534 	  "I219 V Ethernet Connection",
   1535 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1537 	  "I219 V Ethernet Connection",
   1538 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1540 	  "I219 LM Ethernet Connection",
   1541 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1543 	  "I219 LM Ethernet Connection",
   1544 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1545 	{ 0,			0,
   1546 	  NULL,
   1547 	  0,			0 },
   1548 };
   1549 
   1550 /*
   1551  * Register read/write functions.
   1552  * Other than CSR_{READ|WRITE}().
   1553  */
   1554 
   1555 #if 0 /* Not currently used */
   1556 static inline uint32_t
   1557 wm_io_read(struct wm_softc *sc, int reg)
   1558 {
   1559 
   1560 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1561 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1562 }
   1563 #endif
   1564 
   1565 static inline void
   1566 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1567 {
   1568 
   1569 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1570 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1571 }
   1572 
   1573 static inline void
   1574 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1575     uint32_t data)
   1576 {
   1577 	uint32_t regval;
   1578 	int i;
   1579 
   1580 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1581 
   1582 	CSR_WRITE(sc, reg, regval);
   1583 
   1584 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1585 		delay(5);
   1586 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1587 			break;
   1588 	}
   1589 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1590 		aprint_error("%s: WARNING:"
   1591 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1592 		    device_xname(sc->sc_dev), reg);
   1593 	}
   1594 }
   1595 
   1596 static inline void
   1597 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1598 {
   1599 	wa->wa_low = htole32(v & 0xffffffffU);
   1600 	if (sizeof(bus_addr_t) == 8)
   1601 		wa->wa_high = htole32((uint64_t) v >> 32);
   1602 	else
   1603 		wa->wa_high = 0;
   1604 }
   1605 
   1606 /*
   1607  * Descriptor sync/init functions.
   1608  */
   1609 static inline void
   1610 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1611 {
   1612 	struct wm_softc *sc = txq->txq_sc;
   1613 
   1614 	/* If it will wrap around, sync to the end of the ring. */
   1615 	if ((start + num) > WM_NTXDESC(txq)) {
   1616 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1617 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1618 		    (WM_NTXDESC(txq) - start), ops);
   1619 		num -= (WM_NTXDESC(txq) - start);
   1620 		start = 0;
   1621 	}
   1622 
   1623 	/* Now sync whatever is left. */
   1624 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1625 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1626 }
   1627 
   1628 static inline void
   1629 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1630 {
   1631 	struct wm_softc *sc = rxq->rxq_sc;
   1632 
   1633 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1634 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1635 }
   1636 
   1637 static inline void
   1638 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1639 {
   1640 	struct wm_softc *sc = rxq->rxq_sc;
   1641 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1642 	struct mbuf *m = rxs->rxs_mbuf;
   1643 
   1644 	/*
   1645 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1646 	 * so that the payload after the Ethernet header is aligned
   1647 	 * to a 4-byte boundary.
   1648 
   1649 	 * XXX BRAINDAMAGE ALERT!
   1650 	 * The stupid chip uses the same size for every buffer, which
   1651 	 * is set in the Receive Control register.  We are using the 2K
   1652 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1653 	 * reason, we can't "scoot" packets longer than the standard
   1654 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1655 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1656 	 * the upper layer copy the headers.
   1657 	 */
   1658 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1659 
   1660 	if (sc->sc_type == WM_T_82574) {
   1661 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1662 		rxd->erx_data.erxd_addr =
   1663 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1664 		rxd->erx_data.erxd_dd = 0;
   1665 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1666 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1667 
   1668 		rxd->nqrx_data.nrxd_paddr =
   1669 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1670 		/* Currently, split header is not supported. */
   1671 		rxd->nqrx_data.nrxd_haddr = 0;
   1672 	} else {
   1673 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1674 
   1675 		wm_set_dma_addr(&rxd->wrx_addr,
   1676 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1677 		rxd->wrx_len = 0;
   1678 		rxd->wrx_cksum = 0;
   1679 		rxd->wrx_status = 0;
   1680 		rxd->wrx_errors = 0;
   1681 		rxd->wrx_special = 0;
   1682 	}
   1683 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1684 
   1685 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1686 }
   1687 
   1688 /*
   1689  * Device driver interface functions and commonly used functions.
   1690  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1691  */
   1692 
   1693 /* Lookup supported device table */
   1694 static const struct wm_product *
   1695 wm_lookup(const struct pci_attach_args *pa)
   1696 {
   1697 	const struct wm_product *wmp;
   1698 
   1699 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1700 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1701 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1702 			return wmp;
   1703 	}
   1704 	return NULL;
   1705 }
   1706 
   1707 /* The match function (ca_match) */
   1708 static int
   1709 wm_match(device_t parent, cfdata_t cf, void *aux)
   1710 {
   1711 	struct pci_attach_args *pa = aux;
   1712 
   1713 	if (wm_lookup(pa) != NULL)
   1714 		return 1;
   1715 
   1716 	return 0;
   1717 }
   1718 
   1719 /* The attach function (ca_attach) */
   1720 static void
   1721 wm_attach(device_t parent, device_t self, void *aux)
   1722 {
   1723 	struct wm_softc *sc = device_private(self);
   1724 	struct pci_attach_args *pa = aux;
   1725 	prop_dictionary_t dict;
   1726 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1727 	pci_chipset_tag_t pc = pa->pa_pc;
   1728 	int counts[PCI_INTR_TYPE_SIZE];
   1729 	pci_intr_type_t max_type;
   1730 	const char *eetype, *xname;
   1731 	bus_space_tag_t memt;
   1732 	bus_space_handle_t memh;
   1733 	bus_size_t memsize;
   1734 	int memh_valid;
   1735 	int i, error;
   1736 	const struct wm_product *wmp;
   1737 	prop_data_t ea;
   1738 	prop_number_t pn;
   1739 	uint8_t enaddr[ETHER_ADDR_LEN];
   1740 	char buf[256];
   1741 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1742 	pcireg_t preg, memtype;
   1743 	uint16_t eeprom_data, apme_mask;
   1744 	bool force_clear_smbi;
   1745 	uint32_t link_mode;
   1746 	uint32_t reg;
   1747 
   1748 	sc->sc_dev = self;
   1749 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1750 	sc->sc_core_stopping = false;
   1751 
   1752 	wmp = wm_lookup(pa);
   1753 #ifdef DIAGNOSTIC
   1754 	if (wmp == NULL) {
   1755 		printf("\n");
   1756 		panic("wm_attach: impossible");
   1757 	}
   1758 #endif
   1759 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1760 
   1761 	sc->sc_pc = pa->pa_pc;
   1762 	sc->sc_pcitag = pa->pa_tag;
   1763 
   1764 	if (pci_dma64_available(pa))
   1765 		sc->sc_dmat = pa->pa_dmat64;
   1766 	else
   1767 		sc->sc_dmat = pa->pa_dmat;
   1768 
   1769 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1770 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1771 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1772 
   1773 	sc->sc_type = wmp->wmp_type;
   1774 
   1775 	/* Set default function pointers */
   1776 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1777 	sc->phy.release = sc->nvm.release = wm_put_null;
   1778 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1779 
   1780 	if (sc->sc_type < WM_T_82543) {
   1781 		if (sc->sc_rev < 2) {
   1782 			aprint_error_dev(sc->sc_dev,
   1783 			    "i82542 must be at least rev. 2\n");
   1784 			return;
   1785 		}
   1786 		if (sc->sc_rev < 3)
   1787 			sc->sc_type = WM_T_82542_2_0;
   1788 	}
   1789 
   1790 	/*
   1791 	 * Disable MSI for Errata:
   1792 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1793 	 *
   1794 	 *  82544: Errata 25
   1795 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1796 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1797 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1798 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1799 	 *
   1800 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1801 	 *
   1802 	 *  82571 & 82572: Errata 63
   1803 	 */
   1804 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1805 	    || (sc->sc_type == WM_T_82572))
   1806 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1807 
   1808 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1809 	    || (sc->sc_type == WM_T_82580)
   1810 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1811 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1812 		sc->sc_flags |= WM_F_NEWQUEUE;
   1813 
   1814 	/* Set device properties (mactype) */
   1815 	dict = device_properties(sc->sc_dev);
   1816 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1817 
   1818 	/*
   1819 	 * Map the device.  All devices support memory-mapped acccess,
   1820 	 * and it is really required for normal operation.
   1821 	 */
   1822 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1823 	switch (memtype) {
   1824 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1825 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1826 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1827 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1828 		break;
   1829 	default:
   1830 		memh_valid = 0;
   1831 		break;
   1832 	}
   1833 
   1834 	if (memh_valid) {
   1835 		sc->sc_st = memt;
   1836 		sc->sc_sh = memh;
   1837 		sc->sc_ss = memsize;
   1838 	} else {
   1839 		aprint_error_dev(sc->sc_dev,
   1840 		    "unable to map device registers\n");
   1841 		return;
   1842 	}
   1843 
   1844 	/*
   1845 	 * In addition, i82544 and later support I/O mapped indirect
   1846 	 * register access.  It is not desirable (nor supported in
   1847 	 * this driver) to use it for normal operation, though it is
   1848 	 * required to work around bugs in some chip versions.
   1849 	 */
   1850 	if (sc->sc_type >= WM_T_82544) {
   1851 		/* First we have to find the I/O BAR. */
   1852 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1853 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1854 			if (memtype == PCI_MAPREG_TYPE_IO)
   1855 				break;
   1856 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1857 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1858 				i += 4;	/* skip high bits, too */
   1859 		}
   1860 		if (i < PCI_MAPREG_END) {
   1861 			/*
   1862 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1863 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1864 			 * It's no problem because newer chips has no this
   1865 			 * bug.
   1866 			 *
   1867 			 * The i8254x doesn't apparently respond when the
   1868 			 * I/O BAR is 0, which looks somewhat like it's not
   1869 			 * been configured.
   1870 			 */
   1871 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1872 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1873 				aprint_error_dev(sc->sc_dev,
   1874 				    "WARNING: I/O BAR at zero.\n");
   1875 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1876 					0, &sc->sc_iot, &sc->sc_ioh,
   1877 					NULL, &sc->sc_ios) == 0) {
   1878 				sc->sc_flags |= WM_F_IOH_VALID;
   1879 			} else
   1880 				aprint_error_dev(sc->sc_dev,
   1881 				    "WARNING: unable to map I/O space\n");
   1882 		}
   1883 
   1884 	}
   1885 
   1886 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1887 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1888 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1889 	if (sc->sc_type < WM_T_82542_2_1)
   1890 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1891 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1892 
   1893 	/* power up chip */
   1894 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1895 	    && error != EOPNOTSUPP) {
   1896 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1897 		return;
   1898 	}
   1899 
   1900 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1901 	/*
   1902 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1903 	 * resource.
   1904 	 */
   1905 	if (sc->sc_nqueues > 1) {
   1906 		max_type = PCI_INTR_TYPE_MSIX;
   1907 		/*
   1908 		 *  82583 has a MSI-X capability in the PCI configuration space
   1909 		 * but it doesn't support it. At least the document doesn't
   1910 		 * say anything about MSI-X.
   1911 		 */
   1912 		counts[PCI_INTR_TYPE_MSIX]
   1913 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1914 	} else {
   1915 		max_type = PCI_INTR_TYPE_MSI;
   1916 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1917 	}
   1918 
   1919 	/* Allocation settings */
   1920 	counts[PCI_INTR_TYPE_MSI] = 1;
   1921 	counts[PCI_INTR_TYPE_INTX] = 1;
   1922 	/* overridden by disable flags */
   1923 	if (wm_disable_msi != 0) {
   1924 		counts[PCI_INTR_TYPE_MSI] = 0;
   1925 		if (wm_disable_msix != 0) {
   1926 			max_type = PCI_INTR_TYPE_INTX;
   1927 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1928 		}
   1929 	} else if (wm_disable_msix != 0) {
   1930 		max_type = PCI_INTR_TYPE_MSI;
   1931 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1932 	}
   1933 
   1934 alloc_retry:
   1935 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1936 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1937 		return;
   1938 	}
   1939 
   1940 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1941 		error = wm_setup_msix(sc);
   1942 		if (error) {
   1943 			pci_intr_release(pc, sc->sc_intrs,
   1944 			    counts[PCI_INTR_TYPE_MSIX]);
   1945 
   1946 			/* Setup for MSI: Disable MSI-X */
   1947 			max_type = PCI_INTR_TYPE_MSI;
   1948 			counts[PCI_INTR_TYPE_MSI] = 1;
   1949 			counts[PCI_INTR_TYPE_INTX] = 1;
   1950 			goto alloc_retry;
   1951 		}
   1952 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1953 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1954 		error = wm_setup_legacy(sc);
   1955 		if (error) {
   1956 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1957 			    counts[PCI_INTR_TYPE_MSI]);
   1958 
   1959 			/* The next try is for INTx: Disable MSI */
   1960 			max_type = PCI_INTR_TYPE_INTX;
   1961 			counts[PCI_INTR_TYPE_INTX] = 1;
   1962 			goto alloc_retry;
   1963 		}
   1964 	} else {
   1965 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1966 		error = wm_setup_legacy(sc);
   1967 		if (error) {
   1968 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1969 			    counts[PCI_INTR_TYPE_INTX]);
   1970 			return;
   1971 		}
   1972 	}
   1973 
   1974 	/*
   1975 	 * Check the function ID (unit number of the chip).
   1976 	 */
   1977 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1978 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1979 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1980 	    || (sc->sc_type == WM_T_82580)
   1981 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1982 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1983 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1984 	else
   1985 		sc->sc_funcid = 0;
   1986 
   1987 	/*
   1988 	 * Determine a few things about the bus we're connected to.
   1989 	 */
   1990 	if (sc->sc_type < WM_T_82543) {
   1991 		/* We don't really know the bus characteristics here. */
   1992 		sc->sc_bus_speed = 33;
   1993 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1994 		/*
   1995 		 * CSA (Communication Streaming Architecture) is about as fast
   1996 		 * a 32-bit 66MHz PCI Bus.
   1997 		 */
   1998 		sc->sc_flags |= WM_F_CSA;
   1999 		sc->sc_bus_speed = 66;
   2000 		aprint_verbose_dev(sc->sc_dev,
   2001 		    "Communication Streaming Architecture\n");
   2002 		if (sc->sc_type == WM_T_82547) {
   2003 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2004 			callout_setfunc(&sc->sc_txfifo_ch,
   2005 			    wm_82547_txfifo_stall, sc);
   2006 			aprint_verbose_dev(sc->sc_dev,
   2007 			    "using 82547 Tx FIFO stall work-around\n");
   2008 		}
   2009 	} else if (sc->sc_type >= WM_T_82571) {
   2010 		sc->sc_flags |= WM_F_PCIE;
   2011 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2012 		    && (sc->sc_type != WM_T_ICH10)
   2013 		    && (sc->sc_type != WM_T_PCH)
   2014 		    && (sc->sc_type != WM_T_PCH2)
   2015 		    && (sc->sc_type != WM_T_PCH_LPT)
   2016 		    && (sc->sc_type != WM_T_PCH_SPT)
   2017 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2018 			/* ICH* and PCH* have no PCIe capability registers */
   2019 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2020 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2021 				NULL) == 0)
   2022 				aprint_error_dev(sc->sc_dev,
   2023 				    "unable to find PCIe capability\n");
   2024 		}
   2025 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2026 	} else {
   2027 		reg = CSR_READ(sc, WMREG_STATUS);
   2028 		if (reg & STATUS_BUS64)
   2029 			sc->sc_flags |= WM_F_BUS64;
   2030 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2031 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2032 
   2033 			sc->sc_flags |= WM_F_PCIX;
   2034 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2035 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2036 				aprint_error_dev(sc->sc_dev,
   2037 				    "unable to find PCIX capability\n");
   2038 			else if (sc->sc_type != WM_T_82545_3 &&
   2039 				 sc->sc_type != WM_T_82546_3) {
   2040 				/*
   2041 				 * Work around a problem caused by the BIOS
   2042 				 * setting the max memory read byte count
   2043 				 * incorrectly.
   2044 				 */
   2045 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2046 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2047 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2048 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2049 
   2050 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2051 				    PCIX_CMD_BYTECNT_SHIFT;
   2052 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2053 				    PCIX_STATUS_MAXB_SHIFT;
   2054 				if (bytecnt > maxb) {
   2055 					aprint_verbose_dev(sc->sc_dev,
   2056 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2057 					    512 << bytecnt, 512 << maxb);
   2058 					pcix_cmd = (pcix_cmd &
   2059 					    ~PCIX_CMD_BYTECNT_MASK) |
   2060 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2061 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2062 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2063 					    pcix_cmd);
   2064 				}
   2065 			}
   2066 		}
   2067 		/*
   2068 		 * The quad port adapter is special; it has a PCIX-PCIX
   2069 		 * bridge on the board, and can run the secondary bus at
   2070 		 * a higher speed.
   2071 		 */
   2072 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2073 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2074 								      : 66;
   2075 		} else if (sc->sc_flags & WM_F_PCIX) {
   2076 			switch (reg & STATUS_PCIXSPD_MASK) {
   2077 			case STATUS_PCIXSPD_50_66:
   2078 				sc->sc_bus_speed = 66;
   2079 				break;
   2080 			case STATUS_PCIXSPD_66_100:
   2081 				sc->sc_bus_speed = 100;
   2082 				break;
   2083 			case STATUS_PCIXSPD_100_133:
   2084 				sc->sc_bus_speed = 133;
   2085 				break;
   2086 			default:
   2087 				aprint_error_dev(sc->sc_dev,
   2088 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2089 				    reg & STATUS_PCIXSPD_MASK);
   2090 				sc->sc_bus_speed = 66;
   2091 				break;
   2092 			}
   2093 		} else
   2094 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2095 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2096 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2097 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2098 	}
   2099 
   2100 	/* Disable ASPM L0s and/or L1 for workaround */
   2101 	wm_disable_aspm(sc);
   2102 
   2103 	/* clear interesting stat counters */
   2104 	CSR_READ(sc, WMREG_COLC);
   2105 	CSR_READ(sc, WMREG_RXERRC);
   2106 
   2107 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2108 	    || (sc->sc_type >= WM_T_ICH8))
   2109 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2110 	if (sc->sc_type >= WM_T_ICH8)
   2111 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2112 
   2113 	/* Set PHY, NVM mutex related stuff */
   2114 	switch (sc->sc_type) {
   2115 	case WM_T_82542_2_0:
   2116 	case WM_T_82542_2_1:
   2117 	case WM_T_82543:
   2118 	case WM_T_82544:
   2119 		/* Microwire */
   2120 		sc->nvm.read = wm_nvm_read_uwire;
   2121 		sc->sc_nvm_wordsize = 64;
   2122 		sc->sc_nvm_addrbits = 6;
   2123 		break;
   2124 	case WM_T_82540:
   2125 	case WM_T_82545:
   2126 	case WM_T_82545_3:
   2127 	case WM_T_82546:
   2128 	case WM_T_82546_3:
   2129 		/* Microwire */
   2130 		sc->nvm.read = wm_nvm_read_uwire;
   2131 		reg = CSR_READ(sc, WMREG_EECD);
   2132 		if (reg & EECD_EE_SIZE) {
   2133 			sc->sc_nvm_wordsize = 256;
   2134 			sc->sc_nvm_addrbits = 8;
   2135 		} else {
   2136 			sc->sc_nvm_wordsize = 64;
   2137 			sc->sc_nvm_addrbits = 6;
   2138 		}
   2139 		sc->sc_flags |= WM_F_LOCK_EECD;
   2140 		sc->nvm.acquire = wm_get_eecd;
   2141 		sc->nvm.release = wm_put_eecd;
   2142 		break;
   2143 	case WM_T_82541:
   2144 	case WM_T_82541_2:
   2145 	case WM_T_82547:
   2146 	case WM_T_82547_2:
   2147 		reg = CSR_READ(sc, WMREG_EECD);
   2148 		/*
   2149 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2150 		 * on 8254[17], so set flags and functios before calling it.
   2151 		 */
   2152 		sc->sc_flags |= WM_F_LOCK_EECD;
   2153 		sc->nvm.acquire = wm_get_eecd;
   2154 		sc->nvm.release = wm_put_eecd;
   2155 		if (reg & EECD_EE_TYPE) {
   2156 			/* SPI */
   2157 			sc->nvm.read = wm_nvm_read_spi;
   2158 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2159 			wm_nvm_set_addrbits_size_eecd(sc);
   2160 		} else {
   2161 			/* Microwire */
   2162 			sc->nvm.read = wm_nvm_read_uwire;
   2163 			if ((reg & EECD_EE_ABITS) != 0) {
   2164 				sc->sc_nvm_wordsize = 256;
   2165 				sc->sc_nvm_addrbits = 8;
   2166 			} else {
   2167 				sc->sc_nvm_wordsize = 64;
   2168 				sc->sc_nvm_addrbits = 6;
   2169 			}
   2170 		}
   2171 		break;
   2172 	case WM_T_82571:
   2173 	case WM_T_82572:
   2174 		/* SPI */
   2175 		sc->nvm.read = wm_nvm_read_eerd;
   2176 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2177 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2178 		wm_nvm_set_addrbits_size_eecd(sc);
   2179 		sc->phy.acquire = wm_get_swsm_semaphore;
   2180 		sc->phy.release = wm_put_swsm_semaphore;
   2181 		sc->nvm.acquire = wm_get_nvm_82571;
   2182 		sc->nvm.release = wm_put_nvm_82571;
   2183 		break;
   2184 	case WM_T_82573:
   2185 	case WM_T_82574:
   2186 	case WM_T_82583:
   2187 		sc->nvm.read = wm_nvm_read_eerd;
   2188 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2189 		if (sc->sc_type == WM_T_82573) {
   2190 			sc->phy.acquire = wm_get_swsm_semaphore;
   2191 			sc->phy.release = wm_put_swsm_semaphore;
   2192 			sc->nvm.acquire = wm_get_nvm_82571;
   2193 			sc->nvm.release = wm_put_nvm_82571;
   2194 		} else {
   2195 			/* Both PHY and NVM use the same semaphore. */
   2196 			sc->phy.acquire = sc->nvm.acquire
   2197 			    = wm_get_swfwhw_semaphore;
   2198 			sc->phy.release = sc->nvm.release
   2199 			    = wm_put_swfwhw_semaphore;
   2200 		}
   2201 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2202 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2203 			sc->sc_nvm_wordsize = 2048;
   2204 		} else {
   2205 			/* SPI */
   2206 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2207 			wm_nvm_set_addrbits_size_eecd(sc);
   2208 		}
   2209 		break;
   2210 	case WM_T_82575:
   2211 	case WM_T_82576:
   2212 	case WM_T_82580:
   2213 	case WM_T_I350:
   2214 	case WM_T_I354:
   2215 	case WM_T_80003:
   2216 		/* SPI */
   2217 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2218 		wm_nvm_set_addrbits_size_eecd(sc);
   2219 		if ((sc->sc_type == WM_T_80003)
   2220 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2221 			sc->nvm.read = wm_nvm_read_eerd;
   2222 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2223 		} else {
   2224 			sc->nvm.read = wm_nvm_read_spi;
   2225 			sc->sc_flags |= WM_F_LOCK_EECD;
   2226 		}
   2227 		sc->phy.acquire = wm_get_phy_82575;
   2228 		sc->phy.release = wm_put_phy_82575;
   2229 		sc->nvm.acquire = wm_get_nvm_80003;
   2230 		sc->nvm.release = wm_put_nvm_80003;
   2231 		break;
   2232 	case WM_T_ICH8:
   2233 	case WM_T_ICH9:
   2234 	case WM_T_ICH10:
   2235 	case WM_T_PCH:
   2236 	case WM_T_PCH2:
   2237 	case WM_T_PCH_LPT:
   2238 		sc->nvm.read = wm_nvm_read_ich8;
   2239 		/* FLASH */
   2240 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2241 		sc->sc_nvm_wordsize = 2048;
   2242 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2243 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2244 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2245 			aprint_error_dev(sc->sc_dev,
   2246 			    "can't map FLASH registers\n");
   2247 			goto out;
   2248 		}
   2249 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2250 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2251 		    ICH_FLASH_SECTOR_SIZE;
   2252 		sc->sc_ich8_flash_bank_size =
   2253 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2254 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2255 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2256 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2257 		sc->sc_flashreg_offset = 0;
   2258 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2259 		sc->phy.release = wm_put_swflag_ich8lan;
   2260 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2261 		sc->nvm.release = wm_put_nvm_ich8lan;
   2262 		break;
   2263 	case WM_T_PCH_SPT:
   2264 	case WM_T_PCH_CNP:
   2265 		sc->nvm.read = wm_nvm_read_spt;
   2266 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2267 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2268 		sc->sc_flasht = sc->sc_st;
   2269 		sc->sc_flashh = sc->sc_sh;
   2270 		sc->sc_ich8_flash_base = 0;
   2271 		sc->sc_nvm_wordsize =
   2272 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2273 		    * NVM_SIZE_MULTIPLIER;
   2274 		/* It is size in bytes, we want words */
   2275 		sc->sc_nvm_wordsize /= 2;
   2276 		/* assume 2 banks */
   2277 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2278 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2279 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2280 		sc->phy.release = wm_put_swflag_ich8lan;
   2281 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2282 		sc->nvm.release = wm_put_nvm_ich8lan;
   2283 		break;
   2284 	case WM_T_I210:
   2285 	case WM_T_I211:
   2286 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2287 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2288 		if (wm_nvm_flash_presence_i210(sc)) {
   2289 			sc->nvm.read = wm_nvm_read_eerd;
   2290 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2291 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2292 			wm_nvm_set_addrbits_size_eecd(sc);
   2293 		} else {
   2294 			sc->nvm.read = wm_nvm_read_invm;
   2295 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2296 			sc->sc_nvm_wordsize = INVM_SIZE;
   2297 		}
   2298 		sc->phy.acquire = wm_get_phy_82575;
   2299 		sc->phy.release = wm_put_phy_82575;
   2300 		sc->nvm.acquire = wm_get_nvm_80003;
   2301 		sc->nvm.release = wm_put_nvm_80003;
   2302 		break;
   2303 	default:
   2304 		break;
   2305 	}
   2306 
   2307 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2308 	switch (sc->sc_type) {
   2309 	case WM_T_82571:
   2310 	case WM_T_82572:
   2311 		reg = CSR_READ(sc, WMREG_SWSM2);
   2312 		if ((reg & SWSM2_LOCK) == 0) {
   2313 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2314 			force_clear_smbi = true;
   2315 		} else
   2316 			force_clear_smbi = false;
   2317 		break;
   2318 	case WM_T_82573:
   2319 	case WM_T_82574:
   2320 	case WM_T_82583:
   2321 		force_clear_smbi = true;
   2322 		break;
   2323 	default:
   2324 		force_clear_smbi = false;
   2325 		break;
   2326 	}
   2327 	if (force_clear_smbi) {
   2328 		reg = CSR_READ(sc, WMREG_SWSM);
   2329 		if ((reg & SWSM_SMBI) != 0)
   2330 			aprint_error_dev(sc->sc_dev,
   2331 			    "Please update the Bootagent\n");
   2332 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2333 	}
   2334 
   2335 	/*
   2336 	 * Defer printing the EEPROM type until after verifying the checksum
   2337 	 * This allows the EEPROM type to be printed correctly in the case
   2338 	 * that no EEPROM is attached.
   2339 	 */
   2340 	/*
   2341 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2342 	 * this for later, so we can fail future reads from the EEPROM.
   2343 	 */
   2344 	if (wm_nvm_validate_checksum(sc)) {
   2345 		/*
   2346 		 * Read twice again because some PCI-e parts fail the
   2347 		 * first check due to the link being in sleep state.
   2348 		 */
   2349 		if (wm_nvm_validate_checksum(sc))
   2350 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2351 	}
   2352 
   2353 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2354 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2355 	else {
   2356 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2357 		    sc->sc_nvm_wordsize);
   2358 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2359 			aprint_verbose("iNVM");
   2360 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2361 			aprint_verbose("FLASH(HW)");
   2362 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2363 			aprint_verbose("FLASH");
   2364 		else {
   2365 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2366 				eetype = "SPI";
   2367 			else
   2368 				eetype = "MicroWire";
   2369 			aprint_verbose("(%d address bits) %s EEPROM",
   2370 			    sc->sc_nvm_addrbits, eetype);
   2371 		}
   2372 	}
   2373 	wm_nvm_version(sc);
   2374 	aprint_verbose("\n");
   2375 
   2376 	/*
   2377 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2378 	 * incorrect.
   2379 	 */
   2380 	wm_gmii_setup_phytype(sc, 0, 0);
   2381 
   2382 	/* Reset the chip to a known state. */
   2383 	wm_reset(sc);
   2384 
   2385 	/*
   2386 	 * Check for I21[01] PLL workaround.
   2387 	 *
   2388 	 * Three cases:
   2389 	 * a) Chip is I211.
   2390 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2391 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2392 	 */
   2393 	if (sc->sc_type == WM_T_I211)
   2394 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2395 	if (sc->sc_type == WM_T_I210) {
   2396 		if (!wm_nvm_flash_presence_i210(sc))
   2397 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2398 		else if ((sc->sc_nvm_ver_major < 3)
   2399 		    || ((sc->sc_nvm_ver_major == 3)
   2400 			&& (sc->sc_nvm_ver_minor < 25))) {
   2401 			aprint_verbose_dev(sc->sc_dev,
   2402 			    "ROM image version %d.%d is older than 3.25\n",
   2403 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2404 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2405 		}
   2406 	}
   2407 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2408 		wm_pll_workaround_i210(sc);
   2409 
   2410 	wm_get_wakeup(sc);
   2411 
   2412 	/* Non-AMT based hardware can now take control from firmware */
   2413 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2414 		wm_get_hw_control(sc);
   2415 
   2416 	/*
   2417 	 * Read the Ethernet address from the EEPROM, if not first found
   2418 	 * in device properties.
   2419 	 */
   2420 	ea = prop_dictionary_get(dict, "mac-address");
   2421 	if (ea != NULL) {
   2422 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2423 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2424 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2425 	} else {
   2426 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2427 			aprint_error_dev(sc->sc_dev,
   2428 			    "unable to read Ethernet address\n");
   2429 			goto out;
   2430 		}
   2431 	}
   2432 
   2433 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2434 	    ether_sprintf(enaddr));
   2435 
   2436 	/*
   2437 	 * Read the config info from the EEPROM, and set up various
   2438 	 * bits in the control registers based on their contents.
   2439 	 */
   2440 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2441 	if (pn != NULL) {
   2442 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2443 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2444 	} else {
   2445 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2446 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2447 			goto out;
   2448 		}
   2449 	}
   2450 
   2451 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2452 	if (pn != NULL) {
   2453 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2454 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2455 	} else {
   2456 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2457 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2458 			goto out;
   2459 		}
   2460 	}
   2461 
   2462 	/* check for WM_F_WOL */
   2463 	switch (sc->sc_type) {
   2464 	case WM_T_82542_2_0:
   2465 	case WM_T_82542_2_1:
   2466 	case WM_T_82543:
   2467 		/* dummy? */
   2468 		eeprom_data = 0;
   2469 		apme_mask = NVM_CFG3_APME;
   2470 		break;
   2471 	case WM_T_82544:
   2472 		apme_mask = NVM_CFG2_82544_APM_EN;
   2473 		eeprom_data = cfg2;
   2474 		break;
   2475 	case WM_T_82546:
   2476 	case WM_T_82546_3:
   2477 	case WM_T_82571:
   2478 	case WM_T_82572:
   2479 	case WM_T_82573:
   2480 	case WM_T_82574:
   2481 	case WM_T_82583:
   2482 	case WM_T_80003:
   2483 	default:
   2484 		apme_mask = NVM_CFG3_APME;
   2485 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2486 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2487 		break;
   2488 	case WM_T_82575:
   2489 	case WM_T_82576:
   2490 	case WM_T_82580:
   2491 	case WM_T_I350:
   2492 	case WM_T_I354: /* XXX ok? */
   2493 	case WM_T_ICH8:
   2494 	case WM_T_ICH9:
   2495 	case WM_T_ICH10:
   2496 	case WM_T_PCH:
   2497 	case WM_T_PCH2:
   2498 	case WM_T_PCH_LPT:
   2499 	case WM_T_PCH_SPT:
   2500 	case WM_T_PCH_CNP:
   2501 		/* XXX The funcid should be checked on some devices */
   2502 		apme_mask = WUC_APME;
   2503 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2504 		break;
   2505 	}
   2506 
   2507 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2508 	if ((eeprom_data & apme_mask) != 0)
   2509 		sc->sc_flags |= WM_F_WOL;
   2510 
   2511 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2512 		/* Check NVM for autonegotiation */
   2513 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2514 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2515 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2516 		}
   2517 	}
   2518 
   2519 	/*
   2520 	 * XXX need special handling for some multiple port cards
   2521 	 * to disable a paticular port.
   2522 	 */
   2523 
   2524 	if (sc->sc_type >= WM_T_82544) {
   2525 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2526 		if (pn != NULL) {
   2527 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2528 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2529 		} else {
   2530 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2531 				aprint_error_dev(sc->sc_dev,
   2532 				    "unable to read SWDPIN\n");
   2533 				goto out;
   2534 			}
   2535 		}
   2536 	}
   2537 
   2538 	if (cfg1 & NVM_CFG1_ILOS)
   2539 		sc->sc_ctrl |= CTRL_ILOS;
   2540 
   2541 	/*
   2542 	 * XXX
   2543 	 * This code isn't correct because pin 2 and 3 are located
   2544 	 * in different position on newer chips. Check all datasheet.
   2545 	 *
   2546 	 * Until resolve this problem, check if a chip < 82580
   2547 	 */
   2548 	if (sc->sc_type <= WM_T_82580) {
   2549 		if (sc->sc_type >= WM_T_82544) {
   2550 			sc->sc_ctrl |=
   2551 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2552 			    CTRL_SWDPIO_SHIFT;
   2553 			sc->sc_ctrl |=
   2554 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2555 			    CTRL_SWDPINS_SHIFT;
   2556 		} else {
   2557 			sc->sc_ctrl |=
   2558 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2559 			    CTRL_SWDPIO_SHIFT;
   2560 		}
   2561 	}
   2562 
   2563 	/* XXX For other than 82580? */
   2564 	if (sc->sc_type == WM_T_82580) {
   2565 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2566 		if (nvmword & __BIT(13))
   2567 			sc->sc_ctrl |= CTRL_ILOS;
   2568 	}
   2569 
   2570 #if 0
   2571 	if (sc->sc_type >= WM_T_82544) {
   2572 		if (cfg1 & NVM_CFG1_IPS0)
   2573 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2574 		if (cfg1 & NVM_CFG1_IPS1)
   2575 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2576 		sc->sc_ctrl_ext |=
   2577 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2578 		    CTRL_EXT_SWDPIO_SHIFT;
   2579 		sc->sc_ctrl_ext |=
   2580 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2581 		    CTRL_EXT_SWDPINS_SHIFT;
   2582 	} else {
   2583 		sc->sc_ctrl_ext |=
   2584 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2585 		    CTRL_EXT_SWDPIO_SHIFT;
   2586 	}
   2587 #endif
   2588 
   2589 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2590 #if 0
   2591 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2592 #endif
   2593 
   2594 	if (sc->sc_type == WM_T_PCH) {
   2595 		uint16_t val;
   2596 
   2597 		/* Save the NVM K1 bit setting */
   2598 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2599 
   2600 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2601 			sc->sc_nvm_k1_enabled = 1;
   2602 		else
   2603 			sc->sc_nvm_k1_enabled = 0;
   2604 	}
   2605 
   2606 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2607 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2608 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2609 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2610 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2611 	    || sc->sc_type == WM_T_82573
   2612 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2613 		/* Copper only */
   2614 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2615 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2616 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2617 	    || (sc->sc_type ==WM_T_I211)) {
   2618 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2619 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2620 		switch (link_mode) {
   2621 		case CTRL_EXT_LINK_MODE_1000KX:
   2622 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2623 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2624 			break;
   2625 		case CTRL_EXT_LINK_MODE_SGMII:
   2626 			if (wm_sgmii_uses_mdio(sc)) {
   2627 				aprint_verbose_dev(sc->sc_dev,
   2628 				    "SGMII(MDIO)\n");
   2629 				sc->sc_flags |= WM_F_SGMII;
   2630 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2631 				break;
   2632 			}
   2633 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2634 			/*FALLTHROUGH*/
   2635 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2636 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2637 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2638 				if (link_mode
   2639 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2640 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2641 					sc->sc_flags |= WM_F_SGMII;
   2642 				} else {
   2643 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2644 					aprint_verbose_dev(sc->sc_dev,
   2645 					    "SERDES\n");
   2646 				}
   2647 				break;
   2648 			}
   2649 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2650 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2651 
   2652 			/* Change current link mode setting */
   2653 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2654 			switch (sc->sc_mediatype) {
   2655 			case WM_MEDIATYPE_COPPER:
   2656 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2657 				break;
   2658 			case WM_MEDIATYPE_SERDES:
   2659 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2660 				break;
   2661 			default:
   2662 				break;
   2663 			}
   2664 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2665 			break;
   2666 		case CTRL_EXT_LINK_MODE_GMII:
   2667 		default:
   2668 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2669 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2670 			break;
   2671 		}
   2672 
   2673 		reg &= ~CTRL_EXT_I2C_ENA;
   2674 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2675 			reg |= CTRL_EXT_I2C_ENA;
   2676 		else
   2677 			reg &= ~CTRL_EXT_I2C_ENA;
   2678 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2679 	} else if (sc->sc_type < WM_T_82543 ||
   2680 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2681 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2682 			aprint_error_dev(sc->sc_dev,
   2683 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2684 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2685 		}
   2686 	} else {
   2687 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2688 			aprint_error_dev(sc->sc_dev,
   2689 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2690 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2691 		}
   2692 	}
   2693 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2694 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2695 
   2696 	/* Set device properties (macflags) */
   2697 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2698 
   2699 	/* Initialize the media structures accordingly. */
   2700 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2701 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2702 	else
   2703 		wm_tbi_mediainit(sc); /* All others */
   2704 
   2705 	ifp = &sc->sc_ethercom.ec_if;
   2706 	xname = device_xname(sc->sc_dev);
   2707 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2708 	ifp->if_softc = sc;
   2709 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2710 #ifdef WM_MPSAFE
   2711 	ifp->if_extflags = IFEF_MPSAFE;
   2712 #endif
   2713 	ifp->if_ioctl = wm_ioctl;
   2714 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2715 		ifp->if_start = wm_nq_start;
   2716 		/*
   2717 		 * When the number of CPUs is one and the controller can use
   2718 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2719 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2720 		 * and the other is used for link status changing.
   2721 		 * In this situation, wm_nq_transmit() is disadvantageous
   2722 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2723 		 */
   2724 		if (wm_is_using_multiqueue(sc))
   2725 			ifp->if_transmit = wm_nq_transmit;
   2726 	} else {
   2727 		ifp->if_start = wm_start;
   2728 		/*
   2729 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2730 		 */
   2731 		if (wm_is_using_multiqueue(sc))
   2732 			ifp->if_transmit = wm_transmit;
   2733 	}
   2734 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2735 	ifp->if_init = wm_init;
   2736 	ifp->if_stop = wm_stop;
   2737 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2738 	IFQ_SET_READY(&ifp->if_snd);
   2739 
   2740 	/* Check for jumbo frame */
   2741 	switch (sc->sc_type) {
   2742 	case WM_T_82573:
   2743 		/* XXX limited to 9234 if ASPM is disabled */
   2744 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2745 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2746 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2747 		break;
   2748 	case WM_T_82571:
   2749 	case WM_T_82572:
   2750 	case WM_T_82574:
   2751 	case WM_T_82583:
   2752 	case WM_T_82575:
   2753 	case WM_T_82576:
   2754 	case WM_T_82580:
   2755 	case WM_T_I350:
   2756 	case WM_T_I354:
   2757 	case WM_T_I210:
   2758 	case WM_T_I211:
   2759 	case WM_T_80003:
   2760 	case WM_T_ICH9:
   2761 	case WM_T_ICH10:
   2762 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2763 	case WM_T_PCH_LPT:
   2764 	case WM_T_PCH_SPT:
   2765 	case WM_T_PCH_CNP:
   2766 		/* XXX limited to 9234 */
   2767 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2768 		break;
   2769 	case WM_T_PCH:
   2770 		/* XXX limited to 4096 */
   2771 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2772 		break;
   2773 	case WM_T_82542_2_0:
   2774 	case WM_T_82542_2_1:
   2775 	case WM_T_ICH8:
   2776 		/* No support for jumbo frame */
   2777 		break;
   2778 	default:
   2779 		/* ETHER_MAX_LEN_JUMBO */
   2780 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2781 		break;
   2782 	}
   2783 
   2784 	/* If we're a i82543 or greater, we can support VLANs. */
   2785 	if (sc->sc_type >= WM_T_82543)
   2786 		sc->sc_ethercom.ec_capabilities |=
   2787 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2788 
   2789 	/*
   2790 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2791 	 * on i82543 and later.
   2792 	 */
   2793 	if (sc->sc_type >= WM_T_82543) {
   2794 		ifp->if_capabilities |=
   2795 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2796 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2797 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2798 		    IFCAP_CSUM_TCPv6_Tx |
   2799 		    IFCAP_CSUM_UDPv6_Tx;
   2800 	}
   2801 
   2802 	/*
   2803 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2804 	 *
   2805 	 *	82541GI (8086:1076) ... no
   2806 	 *	82572EI (8086:10b9) ... yes
   2807 	 */
   2808 	if (sc->sc_type >= WM_T_82571) {
   2809 		ifp->if_capabilities |=
   2810 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2811 	}
   2812 
   2813 	/*
   2814 	 * If we're a i82544 or greater (except i82547), we can do
   2815 	 * TCP segmentation offload.
   2816 	 */
   2817 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2818 		ifp->if_capabilities |= IFCAP_TSOv4;
   2819 	}
   2820 
   2821 	if (sc->sc_type >= WM_T_82571) {
   2822 		ifp->if_capabilities |= IFCAP_TSOv6;
   2823 	}
   2824 
   2825 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2826 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2827 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2828 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2829 
   2830 #ifdef WM_MPSAFE
   2831 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2832 #else
   2833 	sc->sc_core_lock = NULL;
   2834 #endif
   2835 
   2836 	/* Attach the interface. */
   2837 	error = if_initialize(ifp);
   2838 	if (error != 0) {
   2839 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2840 		    error);
   2841 		return; /* Error */
   2842 	}
   2843 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2844 	ether_ifattach(ifp, enaddr);
   2845 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2846 	if_register(ifp);
   2847 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2848 	    RND_FLAG_DEFAULT);
   2849 
   2850 #ifdef WM_EVENT_COUNTERS
   2851 	/* Attach event counters. */
   2852 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2853 	    NULL, xname, "linkintr");
   2854 
   2855 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2856 	    NULL, xname, "tx_xoff");
   2857 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2858 	    NULL, xname, "tx_xon");
   2859 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2860 	    NULL, xname, "rx_xoff");
   2861 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2862 	    NULL, xname, "rx_xon");
   2863 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2864 	    NULL, xname, "rx_macctl");
   2865 #endif /* WM_EVENT_COUNTERS */
   2866 
   2867 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2868 		pmf_class_network_register(self, ifp);
   2869 	else
   2870 		aprint_error_dev(self, "couldn't establish power handler\n");
   2871 
   2872 	sc->sc_flags |= WM_F_ATTACHED;
   2873  out:
   2874 	return;
   2875 }
   2876 
   2877 /* The detach function (ca_detach) */
   2878 static int
   2879 wm_detach(device_t self, int flags __unused)
   2880 {
   2881 	struct wm_softc *sc = device_private(self);
   2882 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2883 	int i;
   2884 
   2885 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2886 		return 0;
   2887 
   2888 	/* Stop the interface. Callouts are stopped in it. */
   2889 	wm_stop(ifp, 1);
   2890 
   2891 	pmf_device_deregister(self);
   2892 
   2893 #ifdef WM_EVENT_COUNTERS
   2894 	evcnt_detach(&sc->sc_ev_linkintr);
   2895 
   2896 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2897 	evcnt_detach(&sc->sc_ev_tx_xon);
   2898 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2899 	evcnt_detach(&sc->sc_ev_rx_xon);
   2900 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2901 #endif /* WM_EVENT_COUNTERS */
   2902 
   2903 	/* Tell the firmware about the release */
   2904 	WM_CORE_LOCK(sc);
   2905 	wm_release_manageability(sc);
   2906 	wm_release_hw_control(sc);
   2907 	wm_enable_wakeup(sc);
   2908 	WM_CORE_UNLOCK(sc);
   2909 
   2910 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2911 
   2912 	/* Delete all remaining media. */
   2913 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2914 
   2915 	ether_ifdetach(ifp);
   2916 	if_detach(ifp);
   2917 	if_percpuq_destroy(sc->sc_ipq);
   2918 
   2919 	/* Unload RX dmamaps and free mbufs */
   2920 	for (i = 0; i < sc->sc_nqueues; i++) {
   2921 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2922 		mutex_enter(rxq->rxq_lock);
   2923 		wm_rxdrain(rxq);
   2924 		mutex_exit(rxq->rxq_lock);
   2925 	}
   2926 	/* Must unlock here */
   2927 
   2928 	/* Disestablish the interrupt handler */
   2929 	for (i = 0; i < sc->sc_nintrs; i++) {
   2930 		if (sc->sc_ihs[i] != NULL) {
   2931 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2932 			sc->sc_ihs[i] = NULL;
   2933 		}
   2934 	}
   2935 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2936 
   2937 	wm_free_txrx_queues(sc);
   2938 
   2939 	/* Unmap the registers */
   2940 	if (sc->sc_ss) {
   2941 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2942 		sc->sc_ss = 0;
   2943 	}
   2944 	if (sc->sc_ios) {
   2945 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2946 		sc->sc_ios = 0;
   2947 	}
   2948 	if (sc->sc_flashs) {
   2949 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2950 		sc->sc_flashs = 0;
   2951 	}
   2952 
   2953 	if (sc->sc_core_lock)
   2954 		mutex_obj_free(sc->sc_core_lock);
   2955 	if (sc->sc_ich_phymtx)
   2956 		mutex_obj_free(sc->sc_ich_phymtx);
   2957 	if (sc->sc_ich_nvmmtx)
   2958 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2959 
   2960 	return 0;
   2961 }
   2962 
   2963 static bool
   2964 wm_suspend(device_t self, const pmf_qual_t *qual)
   2965 {
   2966 	struct wm_softc *sc = device_private(self);
   2967 
   2968 	wm_release_manageability(sc);
   2969 	wm_release_hw_control(sc);
   2970 	wm_enable_wakeup(sc);
   2971 
   2972 	return true;
   2973 }
   2974 
   2975 static bool
   2976 wm_resume(device_t self, const pmf_qual_t *qual)
   2977 {
   2978 	struct wm_softc *sc = device_private(self);
   2979 
   2980 	/* Disable ASPM L0s and/or L1 for workaround */
   2981 	wm_disable_aspm(sc);
   2982 	wm_init_manageability(sc);
   2983 
   2984 	return true;
   2985 }
   2986 
   2987 /*
   2988  * wm_watchdog:		[ifnet interface function]
   2989  *
   2990  *	Watchdog timer handler.
   2991  */
   2992 static void
   2993 wm_watchdog(struct ifnet *ifp)
   2994 {
   2995 	int qid;
   2996 	struct wm_softc *sc = ifp->if_softc;
   2997 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2998 
   2999 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3000 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3001 
   3002 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3003 	}
   3004 
   3005 	/*
   3006 	 * IF any of queues hanged up, reset the interface.
   3007 	 */
   3008 	if (hang_queue != 0) {
   3009 		(void) wm_init(ifp);
   3010 
   3011 		/*
   3012 		 * There are still some upper layer processing which call
   3013 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3014 		 */
   3015 		/* Try to get more packets going. */
   3016 		ifp->if_start(ifp);
   3017 	}
   3018 }
   3019 
   3020 
   3021 static void
   3022 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3023 {
   3024 
   3025 	mutex_enter(txq->txq_lock);
   3026 	if (txq->txq_sending &&
   3027 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3028 		wm_watchdog_txq_locked(ifp, txq, hang);
   3029 	}
   3030 	mutex_exit(txq->txq_lock);
   3031 }
   3032 
   3033 static void
   3034 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3035     uint16_t *hang)
   3036 {
   3037 	struct wm_softc *sc = ifp->if_softc;
   3038 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3039 
   3040 	KASSERT(mutex_owned(txq->txq_lock));
   3041 
   3042 	/*
   3043 	 * Since we're using delayed interrupts, sweep up
   3044 	 * before we report an error.
   3045 	 */
   3046 	wm_txeof(txq, UINT_MAX);
   3047 
   3048 	if (txq->txq_sending)
   3049 		*hang |= __BIT(wmq->wmq_id);
   3050 
   3051 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3052 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3053 		    device_xname(sc->sc_dev));
   3054 	} else {
   3055 #ifdef WM_DEBUG
   3056 		int i, j;
   3057 		struct wm_txsoft *txs;
   3058 #endif
   3059 		log(LOG_ERR,
   3060 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3061 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3062 		    txq->txq_next);
   3063 		ifp->if_oerrors++;
   3064 #ifdef WM_DEBUG
   3065 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3066 		    i = WM_NEXTTXS(txq, i)) {
   3067 		    txs = &txq->txq_soft[i];
   3068 		    printf("txs %d tx %d -> %d\n",
   3069 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3070 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3071 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3072 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3073 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3074 				    printf("\t %#08x%08x\n",
   3075 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3076 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3077 			    } else {
   3078 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3079 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3080 					txq->txq_descs[j].wtx_addr.wa_low);
   3081 				    printf("\t %#04x%02x%02x%08x\n",
   3082 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3083 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3084 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3085 					txq->txq_descs[j].wtx_cmdlen);
   3086 			    }
   3087 			if (j == txs->txs_lastdesc)
   3088 				break;
   3089 			}
   3090 		}
   3091 #endif
   3092 	}
   3093 }
   3094 
   3095 /*
   3096  * wm_tick:
   3097  *
   3098  *	One second timer, used to check link status, sweep up
   3099  *	completed transmit jobs, etc.
   3100  */
   3101 static void
   3102 wm_tick(void *arg)
   3103 {
   3104 	struct wm_softc *sc = arg;
   3105 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3106 #ifndef WM_MPSAFE
   3107 	int s = splnet();
   3108 #endif
   3109 
   3110 	WM_CORE_LOCK(sc);
   3111 
   3112 	if (sc->sc_core_stopping) {
   3113 		WM_CORE_UNLOCK(sc);
   3114 #ifndef WM_MPSAFE
   3115 		splx(s);
   3116 #endif
   3117 		return;
   3118 	}
   3119 
   3120 	if (sc->sc_type >= WM_T_82542_2_1) {
   3121 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3122 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3123 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3124 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3125 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3126 	}
   3127 
   3128 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3129 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3130 	    + CSR_READ(sc, WMREG_CRCERRS)
   3131 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3132 	    + CSR_READ(sc, WMREG_SYMERRC)
   3133 	    + CSR_READ(sc, WMREG_RXERRC)
   3134 	    + CSR_READ(sc, WMREG_SEC)
   3135 	    + CSR_READ(sc, WMREG_CEXTERR)
   3136 	    + CSR_READ(sc, WMREG_RLEC);
   3137 	/*
   3138 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3139 	 * memory. It does not mean the number of dropped packet. Because
   3140 	 * ethernet controller can receive packets in such case if there is
   3141 	 * space in phy's FIFO.
   3142 	 *
   3143 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3144 	 * own EVCNT instead of if_iqdrops.
   3145 	 */
   3146 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3147 
   3148 	if (sc->sc_flags & WM_F_HAS_MII)
   3149 		mii_tick(&sc->sc_mii);
   3150 	else if ((sc->sc_type >= WM_T_82575)
   3151 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3152 		wm_serdes_tick(sc);
   3153 	else
   3154 		wm_tbi_tick(sc);
   3155 
   3156 	WM_CORE_UNLOCK(sc);
   3157 
   3158 	wm_watchdog(ifp);
   3159 
   3160 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3161 }
   3162 
   3163 static int
   3164 wm_ifflags_cb(struct ethercom *ec)
   3165 {
   3166 	struct ifnet *ifp = &ec->ec_if;
   3167 	struct wm_softc *sc = ifp->if_softc;
   3168 	int rc = 0;
   3169 
   3170 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3171 		device_xname(sc->sc_dev), __func__));
   3172 
   3173 	WM_CORE_LOCK(sc);
   3174 
   3175 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3176 	sc->sc_if_flags = ifp->if_flags;
   3177 
   3178 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3179 		rc = ENETRESET;
   3180 		goto out;
   3181 	}
   3182 
   3183 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3184 		wm_set_filter(sc);
   3185 
   3186 	wm_set_vlan(sc);
   3187 
   3188 out:
   3189 	WM_CORE_UNLOCK(sc);
   3190 
   3191 	return rc;
   3192 }
   3193 
   3194 /*
   3195  * wm_ioctl:		[ifnet interface function]
   3196  *
   3197  *	Handle control requests from the operator.
   3198  */
   3199 static int
   3200 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3201 {
   3202 	struct wm_softc *sc = ifp->if_softc;
   3203 	struct ifreq *ifr = (struct ifreq *) data;
   3204 	struct ifaddr *ifa = (struct ifaddr *)data;
   3205 	struct sockaddr_dl *sdl;
   3206 	int s, error;
   3207 
   3208 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3209 		device_xname(sc->sc_dev), __func__));
   3210 
   3211 #ifndef WM_MPSAFE
   3212 	s = splnet();
   3213 #endif
   3214 	switch (cmd) {
   3215 	case SIOCSIFMEDIA:
   3216 	case SIOCGIFMEDIA:
   3217 		WM_CORE_LOCK(sc);
   3218 		/* Flow control requires full-duplex mode. */
   3219 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3220 		    (ifr->ifr_media & IFM_FDX) == 0)
   3221 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3222 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3223 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3224 				/* We can do both TXPAUSE and RXPAUSE. */
   3225 				ifr->ifr_media |=
   3226 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3227 			}
   3228 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3229 		}
   3230 		WM_CORE_UNLOCK(sc);
   3231 #ifdef WM_MPSAFE
   3232 		s = splnet();
   3233 #endif
   3234 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3235 #ifdef WM_MPSAFE
   3236 		splx(s);
   3237 #endif
   3238 		break;
   3239 	case SIOCINITIFADDR:
   3240 		WM_CORE_LOCK(sc);
   3241 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3242 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3243 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3244 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3245 			/* unicast address is first multicast entry */
   3246 			wm_set_filter(sc);
   3247 			error = 0;
   3248 			WM_CORE_UNLOCK(sc);
   3249 			break;
   3250 		}
   3251 		WM_CORE_UNLOCK(sc);
   3252 		/*FALLTHROUGH*/
   3253 	default:
   3254 #ifdef WM_MPSAFE
   3255 		s = splnet();
   3256 #endif
   3257 		/* It may call wm_start, so unlock here */
   3258 		error = ether_ioctl(ifp, cmd, data);
   3259 #ifdef WM_MPSAFE
   3260 		splx(s);
   3261 #endif
   3262 		if (error != ENETRESET)
   3263 			break;
   3264 
   3265 		error = 0;
   3266 
   3267 		if (cmd == SIOCSIFCAP)
   3268 			error = (*ifp->if_init)(ifp);
   3269 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3270 			;
   3271 		else if (ifp->if_flags & IFF_RUNNING) {
   3272 			/*
   3273 			 * Multicast list has changed; set the hardware filter
   3274 			 * accordingly.
   3275 			 */
   3276 			WM_CORE_LOCK(sc);
   3277 			wm_set_filter(sc);
   3278 			WM_CORE_UNLOCK(sc);
   3279 		}
   3280 		break;
   3281 	}
   3282 
   3283 #ifndef WM_MPSAFE
   3284 	splx(s);
   3285 #endif
   3286 	return error;
   3287 }
   3288 
   3289 /* MAC address related */
   3290 
   3291 /*
   3292  * Get the offset of MAC address and return it.
   3293  * If error occured, use offset 0.
   3294  */
   3295 static uint16_t
   3296 wm_check_alt_mac_addr(struct wm_softc *sc)
   3297 {
   3298 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3299 	uint16_t offset = NVM_OFF_MACADDR;
   3300 
   3301 	/* Try to read alternative MAC address pointer */
   3302 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3303 		return 0;
   3304 
   3305 	/* Check pointer if it's valid or not. */
   3306 	if ((offset == 0x0000) || (offset == 0xffff))
   3307 		return 0;
   3308 
   3309 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3310 	/*
   3311 	 * Check whether alternative MAC address is valid or not.
   3312 	 * Some cards have non 0xffff pointer but those don't use
   3313 	 * alternative MAC address in reality.
   3314 	 *
   3315 	 * Check whether the broadcast bit is set or not.
   3316 	 */
   3317 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3318 		if (((myea[0] & 0xff) & 0x01) == 0)
   3319 			return offset; /* Found */
   3320 
   3321 	/* Not found */
   3322 	return 0;
   3323 }
   3324 
   3325 static int
   3326 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3327 {
   3328 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3329 	uint16_t offset = NVM_OFF_MACADDR;
   3330 	int do_invert = 0;
   3331 
   3332 	switch (sc->sc_type) {
   3333 	case WM_T_82580:
   3334 	case WM_T_I350:
   3335 	case WM_T_I354:
   3336 		/* EEPROM Top Level Partitioning */
   3337 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3338 		break;
   3339 	case WM_T_82571:
   3340 	case WM_T_82575:
   3341 	case WM_T_82576:
   3342 	case WM_T_80003:
   3343 	case WM_T_I210:
   3344 	case WM_T_I211:
   3345 		offset = wm_check_alt_mac_addr(sc);
   3346 		if (offset == 0)
   3347 			if ((sc->sc_funcid & 0x01) == 1)
   3348 				do_invert = 1;
   3349 		break;
   3350 	default:
   3351 		if ((sc->sc_funcid & 0x01) == 1)
   3352 			do_invert = 1;
   3353 		break;
   3354 	}
   3355 
   3356 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3357 		goto bad;
   3358 
   3359 	enaddr[0] = myea[0] & 0xff;
   3360 	enaddr[1] = myea[0] >> 8;
   3361 	enaddr[2] = myea[1] & 0xff;
   3362 	enaddr[3] = myea[1] >> 8;
   3363 	enaddr[4] = myea[2] & 0xff;
   3364 	enaddr[5] = myea[2] >> 8;
   3365 
   3366 	/*
   3367 	 * Toggle the LSB of the MAC address on the second port
   3368 	 * of some dual port cards.
   3369 	 */
   3370 	if (do_invert != 0)
   3371 		enaddr[5] ^= 1;
   3372 
   3373 	return 0;
   3374 
   3375  bad:
   3376 	return -1;
   3377 }
   3378 
   3379 /*
   3380  * wm_set_ral:
   3381  *
   3382  *	Set an entery in the receive address list.
   3383  */
   3384 static void
   3385 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3386 {
   3387 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3388 	uint32_t wlock_mac;
   3389 	int rv;
   3390 
   3391 	if (enaddr != NULL) {
   3392 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3393 		    (enaddr[3] << 24);
   3394 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3395 		ral_hi |= RAL_AV;
   3396 	} else {
   3397 		ral_lo = 0;
   3398 		ral_hi = 0;
   3399 	}
   3400 
   3401 	switch (sc->sc_type) {
   3402 	case WM_T_82542_2_0:
   3403 	case WM_T_82542_2_1:
   3404 	case WM_T_82543:
   3405 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3406 		CSR_WRITE_FLUSH(sc);
   3407 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3408 		CSR_WRITE_FLUSH(sc);
   3409 		break;
   3410 	case WM_T_PCH2:
   3411 	case WM_T_PCH_LPT:
   3412 	case WM_T_PCH_SPT:
   3413 	case WM_T_PCH_CNP:
   3414 		if (idx == 0) {
   3415 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3416 			CSR_WRITE_FLUSH(sc);
   3417 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3418 			CSR_WRITE_FLUSH(sc);
   3419 			return;
   3420 		}
   3421 		if (sc->sc_type != WM_T_PCH2) {
   3422 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3423 			    FWSM_WLOCK_MAC);
   3424 			addrl = WMREG_SHRAL(idx - 1);
   3425 			addrh = WMREG_SHRAH(idx - 1);
   3426 		} else {
   3427 			wlock_mac = 0;
   3428 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3429 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3430 		}
   3431 
   3432 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3433 			rv = wm_get_swflag_ich8lan(sc);
   3434 			if (rv != 0)
   3435 				return;
   3436 			CSR_WRITE(sc, addrl, ral_lo);
   3437 			CSR_WRITE_FLUSH(sc);
   3438 			CSR_WRITE(sc, addrh, ral_hi);
   3439 			CSR_WRITE_FLUSH(sc);
   3440 			wm_put_swflag_ich8lan(sc);
   3441 		}
   3442 
   3443 		break;
   3444 	default:
   3445 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3446 		CSR_WRITE_FLUSH(sc);
   3447 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3448 		CSR_WRITE_FLUSH(sc);
   3449 		break;
   3450 	}
   3451 }
   3452 
   3453 /*
   3454  * wm_mchash:
   3455  *
   3456  *	Compute the hash of the multicast address for the 4096-bit
   3457  *	multicast filter.
   3458  */
   3459 static uint32_t
   3460 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3461 {
   3462 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3463 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3464 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3465 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3466 	uint32_t hash;
   3467 
   3468 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3469 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3470 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3471 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3472 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3473 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3474 		return (hash & 0x3ff);
   3475 	}
   3476 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3477 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3478 
   3479 	return (hash & 0xfff);
   3480 }
   3481 
   3482 /*
   3483  * wm_set_filter:
   3484  *
   3485  *	Set up the receive filter.
   3486  */
   3487 static void
   3488 wm_set_filter(struct wm_softc *sc)
   3489 {
   3490 	struct ethercom *ec = &sc->sc_ethercom;
   3491 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3492 	struct ether_multi *enm;
   3493 	struct ether_multistep step;
   3494 	bus_addr_t mta_reg;
   3495 	uint32_t hash, reg, bit;
   3496 	int i, size, ralmax;
   3497 
   3498 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3499 		device_xname(sc->sc_dev), __func__));
   3500 
   3501 	if (sc->sc_type >= WM_T_82544)
   3502 		mta_reg = WMREG_CORDOVA_MTA;
   3503 	else
   3504 		mta_reg = WMREG_MTA;
   3505 
   3506 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3507 
   3508 	if (ifp->if_flags & IFF_BROADCAST)
   3509 		sc->sc_rctl |= RCTL_BAM;
   3510 	if (ifp->if_flags & IFF_PROMISC) {
   3511 		sc->sc_rctl |= RCTL_UPE;
   3512 		goto allmulti;
   3513 	}
   3514 
   3515 	/*
   3516 	 * Set the station address in the first RAL slot, and
   3517 	 * clear the remaining slots.
   3518 	 */
   3519 	if (sc->sc_type == WM_T_ICH8)
   3520 		size = WM_RAL_TABSIZE_ICH8 -1;
   3521 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3522 	    || (sc->sc_type == WM_T_PCH))
   3523 		size = WM_RAL_TABSIZE_ICH8;
   3524 	else if (sc->sc_type == WM_T_PCH2)
   3525 		size = WM_RAL_TABSIZE_PCH2;
   3526 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3527 	    || (sc->sc_type == WM_T_PCH_CNP))
   3528 		size = WM_RAL_TABSIZE_PCH_LPT;
   3529 	else if (sc->sc_type == WM_T_82575)
   3530 		size = WM_RAL_TABSIZE_82575;
   3531 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3532 		size = WM_RAL_TABSIZE_82576;
   3533 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3534 		size = WM_RAL_TABSIZE_I350;
   3535 	else
   3536 		size = WM_RAL_TABSIZE;
   3537 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3538 
   3539 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3540 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3541 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3542 		switch (i) {
   3543 		case 0:
   3544 			/* We can use all entries */
   3545 			ralmax = size;
   3546 			break;
   3547 		case 1:
   3548 			/* Only RAR[0] */
   3549 			ralmax = 1;
   3550 			break;
   3551 		default:
   3552 			/* available SHRA + RAR[0] */
   3553 			ralmax = i + 1;
   3554 		}
   3555 	} else
   3556 		ralmax = size;
   3557 	for (i = 1; i < size; i++) {
   3558 		if (i < ralmax)
   3559 			wm_set_ral(sc, NULL, i);
   3560 	}
   3561 
   3562 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3563 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3564 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3565 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3566 		size = WM_ICH8_MC_TABSIZE;
   3567 	else
   3568 		size = WM_MC_TABSIZE;
   3569 	/* Clear out the multicast table. */
   3570 	for (i = 0; i < size; i++) {
   3571 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3572 		CSR_WRITE_FLUSH(sc);
   3573 	}
   3574 
   3575 	ETHER_LOCK(ec);
   3576 	ETHER_FIRST_MULTI(step, ec, enm);
   3577 	while (enm != NULL) {
   3578 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3579 			ETHER_UNLOCK(ec);
   3580 			/*
   3581 			 * We must listen to a range of multicast addresses.
   3582 			 * For now, just accept all multicasts, rather than
   3583 			 * trying to set only those filter bits needed to match
   3584 			 * the range.  (At this time, the only use of address
   3585 			 * ranges is for IP multicast routing, for which the
   3586 			 * range is big enough to require all bits set.)
   3587 			 */
   3588 			goto allmulti;
   3589 		}
   3590 
   3591 		hash = wm_mchash(sc, enm->enm_addrlo);
   3592 
   3593 		reg = (hash >> 5);
   3594 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3595 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3596 		    || (sc->sc_type == WM_T_PCH2)
   3597 		    || (sc->sc_type == WM_T_PCH_LPT)
   3598 		    || (sc->sc_type == WM_T_PCH_SPT)
   3599 		    || (sc->sc_type == WM_T_PCH_CNP))
   3600 			reg &= 0x1f;
   3601 		else
   3602 			reg &= 0x7f;
   3603 		bit = hash & 0x1f;
   3604 
   3605 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3606 		hash |= 1U << bit;
   3607 
   3608 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3609 			/*
   3610 			 * 82544 Errata 9: Certain register cannot be written
   3611 			 * with particular alignments in PCI-X bus operation
   3612 			 * (FCAH, MTA and VFTA).
   3613 			 */
   3614 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3615 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3616 			CSR_WRITE_FLUSH(sc);
   3617 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3618 			CSR_WRITE_FLUSH(sc);
   3619 		} else {
   3620 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3621 			CSR_WRITE_FLUSH(sc);
   3622 		}
   3623 
   3624 		ETHER_NEXT_MULTI(step, enm);
   3625 	}
   3626 	ETHER_UNLOCK(ec);
   3627 
   3628 	ifp->if_flags &= ~IFF_ALLMULTI;
   3629 	goto setit;
   3630 
   3631  allmulti:
   3632 	ifp->if_flags |= IFF_ALLMULTI;
   3633 	sc->sc_rctl |= RCTL_MPE;
   3634 
   3635  setit:
   3636 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3637 }
   3638 
   3639 /* Reset and init related */
   3640 
   3641 static void
   3642 wm_set_vlan(struct wm_softc *sc)
   3643 {
   3644 
   3645 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3646 		device_xname(sc->sc_dev), __func__));
   3647 
   3648 	/* Deal with VLAN enables. */
   3649 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3650 		sc->sc_ctrl |= CTRL_VME;
   3651 	else
   3652 		sc->sc_ctrl &= ~CTRL_VME;
   3653 
   3654 	/* Write the control registers. */
   3655 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3656 }
   3657 
   3658 static void
   3659 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3660 {
   3661 	uint32_t gcr;
   3662 	pcireg_t ctrl2;
   3663 
   3664 	gcr = CSR_READ(sc, WMREG_GCR);
   3665 
   3666 	/* Only take action if timeout value is defaulted to 0 */
   3667 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3668 		goto out;
   3669 
   3670 	if ((gcr & GCR_CAP_VER2) == 0) {
   3671 		gcr |= GCR_CMPL_TMOUT_10MS;
   3672 		goto out;
   3673 	}
   3674 
   3675 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3676 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3677 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3678 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3679 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3680 
   3681 out:
   3682 	/* Disable completion timeout resend */
   3683 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3684 
   3685 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3686 }
   3687 
   3688 void
   3689 wm_get_auto_rd_done(struct wm_softc *sc)
   3690 {
   3691 	int i;
   3692 
   3693 	/* wait for eeprom to reload */
   3694 	switch (sc->sc_type) {
   3695 	case WM_T_82571:
   3696 	case WM_T_82572:
   3697 	case WM_T_82573:
   3698 	case WM_T_82574:
   3699 	case WM_T_82583:
   3700 	case WM_T_82575:
   3701 	case WM_T_82576:
   3702 	case WM_T_82580:
   3703 	case WM_T_I350:
   3704 	case WM_T_I354:
   3705 	case WM_T_I210:
   3706 	case WM_T_I211:
   3707 	case WM_T_80003:
   3708 	case WM_T_ICH8:
   3709 	case WM_T_ICH9:
   3710 		for (i = 0; i < 10; i++) {
   3711 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3712 				break;
   3713 			delay(1000);
   3714 		}
   3715 		if (i == 10) {
   3716 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3717 			    "complete\n", device_xname(sc->sc_dev));
   3718 		}
   3719 		break;
   3720 	default:
   3721 		break;
   3722 	}
   3723 }
   3724 
   3725 void
   3726 wm_lan_init_done(struct wm_softc *sc)
   3727 {
   3728 	uint32_t reg = 0;
   3729 	int i;
   3730 
   3731 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3732 		device_xname(sc->sc_dev), __func__));
   3733 
   3734 	/* Wait for eeprom to reload */
   3735 	switch (sc->sc_type) {
   3736 	case WM_T_ICH10:
   3737 	case WM_T_PCH:
   3738 	case WM_T_PCH2:
   3739 	case WM_T_PCH_LPT:
   3740 	case WM_T_PCH_SPT:
   3741 	case WM_T_PCH_CNP:
   3742 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3743 			reg = CSR_READ(sc, WMREG_STATUS);
   3744 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3745 				break;
   3746 			delay(100);
   3747 		}
   3748 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3749 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3750 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3751 		}
   3752 		break;
   3753 	default:
   3754 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3755 		    __func__);
   3756 		break;
   3757 	}
   3758 
   3759 	reg &= ~STATUS_LAN_INIT_DONE;
   3760 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3761 }
   3762 
   3763 void
   3764 wm_get_cfg_done(struct wm_softc *sc)
   3765 {
   3766 	int mask;
   3767 	uint32_t reg;
   3768 	int i;
   3769 
   3770 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3771 		device_xname(sc->sc_dev), __func__));
   3772 
   3773 	/* Wait for eeprom to reload */
   3774 	switch (sc->sc_type) {
   3775 	case WM_T_82542_2_0:
   3776 	case WM_T_82542_2_1:
   3777 		/* null */
   3778 		break;
   3779 	case WM_T_82543:
   3780 	case WM_T_82544:
   3781 	case WM_T_82540:
   3782 	case WM_T_82545:
   3783 	case WM_T_82545_3:
   3784 	case WM_T_82546:
   3785 	case WM_T_82546_3:
   3786 	case WM_T_82541:
   3787 	case WM_T_82541_2:
   3788 	case WM_T_82547:
   3789 	case WM_T_82547_2:
   3790 	case WM_T_82573:
   3791 	case WM_T_82574:
   3792 	case WM_T_82583:
   3793 		/* generic */
   3794 		delay(10*1000);
   3795 		break;
   3796 	case WM_T_80003:
   3797 	case WM_T_82571:
   3798 	case WM_T_82572:
   3799 	case WM_T_82575:
   3800 	case WM_T_82576:
   3801 	case WM_T_82580:
   3802 	case WM_T_I350:
   3803 	case WM_T_I354:
   3804 	case WM_T_I210:
   3805 	case WM_T_I211:
   3806 		if (sc->sc_type == WM_T_82571) {
   3807 			/* Only 82571 shares port 0 */
   3808 			mask = EEMNGCTL_CFGDONE_0;
   3809 		} else
   3810 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3811 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3812 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3813 				break;
   3814 			delay(1000);
   3815 		}
   3816 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3817 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3818 				device_xname(sc->sc_dev), __func__));
   3819 		}
   3820 		break;
   3821 	case WM_T_ICH8:
   3822 	case WM_T_ICH9:
   3823 	case WM_T_ICH10:
   3824 	case WM_T_PCH:
   3825 	case WM_T_PCH2:
   3826 	case WM_T_PCH_LPT:
   3827 	case WM_T_PCH_SPT:
   3828 	case WM_T_PCH_CNP:
   3829 		delay(10*1000);
   3830 		if (sc->sc_type >= WM_T_ICH10)
   3831 			wm_lan_init_done(sc);
   3832 		else
   3833 			wm_get_auto_rd_done(sc);
   3834 
   3835 		/* Clear PHY Reset Asserted bit */
   3836 		reg = CSR_READ(sc, WMREG_STATUS);
   3837 		if ((reg & STATUS_PHYRA) != 0)
   3838 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3839 		break;
   3840 	default:
   3841 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3842 		    __func__);
   3843 		break;
   3844 	}
   3845 }
   3846 
   3847 void
   3848 wm_phy_post_reset(struct wm_softc *sc)
   3849 {
   3850 	uint32_t reg;
   3851 
   3852 	/* This function is only for ICH8 and newer. */
   3853 	if (sc->sc_type < WM_T_ICH8)
   3854 		return;
   3855 
   3856 	if (wm_phy_resetisblocked(sc)) {
   3857 		/* XXX */
   3858 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3859 		return;
   3860 	}
   3861 
   3862 	/* Allow time for h/w to get to quiescent state after reset */
   3863 	delay(10*1000);
   3864 
   3865 	/* Perform any necessary post-reset workarounds */
   3866 	if (sc->sc_type == WM_T_PCH)
   3867 		wm_hv_phy_workaround_ich8lan(sc);
   3868 	else if (sc->sc_type == WM_T_PCH2)
   3869 		wm_lv_phy_workaround_ich8lan(sc);
   3870 
   3871 	/* Clear the host wakeup bit after lcd reset */
   3872 	if (sc->sc_type >= WM_T_PCH) {
   3873 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3874 		    BM_PORT_GEN_CFG);
   3875 		reg &= ~BM_WUC_HOST_WU_BIT;
   3876 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3877 		    BM_PORT_GEN_CFG, reg);
   3878 	}
   3879 
   3880 	/* Configure the LCD with the extended configuration region in NVM */
   3881 	wm_init_lcd_from_nvm(sc);
   3882 
   3883 	/* Configure the LCD with the OEM bits in NVM */
   3884 	wm_oem_bits_config_ich8lan(sc, true);
   3885 
   3886 	if (sc->sc_type == WM_T_PCH2) {
   3887 		/* Ungate automatic PHY configuration on non-managed 82579 */
   3888 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   3889 			delay(10 * 1000);
   3890 			wm_gate_hw_phy_config_ich8lan(sc, false);
   3891 		}
   3892 		/* XXX Set EEE LPI Update Timer to 200usec */
   3893 	}
   3894 }
   3895 
   3896 /* Only for PCH and newer */
   3897 static int
   3898 wm_write_smbus_addr(struct wm_softc *sc)
   3899 {
   3900 	uint32_t strap, freq;
   3901 	uint16_t phy_data;
   3902 	int rv;
   3903 
   3904 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3905 		device_xname(sc->sc_dev), __func__));
   3906 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   3907 
   3908 	strap = CSR_READ(sc, WMREG_STRAP);
   3909 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3910 
   3911 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   3912 	if (rv != 0)
   3913 		return -1;
   3914 
   3915 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3916 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3917 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3918 
   3919 	if (sc->sc_phytype == WMPHY_I217) {
   3920 		/* Restore SMBus frequency */
   3921 		if (freq --) {
   3922 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3923 			    | HV_SMB_ADDR_FREQ_HIGH);
   3924 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3925 			    HV_SMB_ADDR_FREQ_LOW);
   3926 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3927 			    HV_SMB_ADDR_FREQ_HIGH);
   3928 		} else {
   3929 			DPRINTF(WM_DEBUG_INIT,
   3930 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3931 				device_xname(sc->sc_dev), __func__));
   3932 		}
   3933 	}
   3934 
   3935 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   3936 	    phy_data);
   3937 }
   3938 
   3939 void
   3940 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3941 {
   3942 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3943 	uint16_t phy_page = 0;
   3944 
   3945 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3946 		device_xname(sc->sc_dev), __func__));
   3947 
   3948 	switch (sc->sc_type) {
   3949 	case WM_T_ICH8:
   3950 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3951 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3952 			return;
   3953 
   3954 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3955 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3956 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3957 			break;
   3958 		}
   3959 		/* FALLTHROUGH */
   3960 	case WM_T_PCH:
   3961 	case WM_T_PCH2:
   3962 	case WM_T_PCH_LPT:
   3963 	case WM_T_PCH_SPT:
   3964 	case WM_T_PCH_CNP:
   3965 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3966 		break;
   3967 	default:
   3968 		return;
   3969 	}
   3970 
   3971 	sc->phy.acquire(sc);
   3972 
   3973 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3974 	if ((reg & sw_cfg_mask) == 0)
   3975 		goto release;
   3976 
   3977 	/*
   3978 	 * Make sure HW does not configure LCD from PHY extended configuration
   3979 	 * before SW configuration
   3980 	 */
   3981 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3982 	if ((sc->sc_type < WM_T_PCH2)
   3983 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3984 		goto release;
   3985 
   3986 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3987 		device_xname(sc->sc_dev), __func__));
   3988 	/* word_addr is in DWORD */
   3989 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3990 
   3991 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3992 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3993 	if (cnf_size == 0)
   3994 		goto release;
   3995 
   3996 	if (((sc->sc_type == WM_T_PCH)
   3997 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3998 	    || (sc->sc_type > WM_T_PCH)) {
   3999 		/*
   4000 		 * HW configures the SMBus address and LEDs when the OEM and
   4001 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4002 		 * are cleared, SW will configure them instead.
   4003 		 */
   4004 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4005 			device_xname(sc->sc_dev), __func__));
   4006 		wm_write_smbus_addr(sc);
   4007 
   4008 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4009 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   4010 	}
   4011 
   4012 	/* Configure LCD from extended configuration region. */
   4013 	for (i = 0; i < cnf_size; i++) {
   4014 		uint16_t reg_data, reg_addr;
   4015 
   4016 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4017 			goto release;
   4018 
   4019 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4020 			goto release;
   4021 
   4022 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4023 			phy_page = reg_data;
   4024 
   4025 		reg_addr &= IGPHY_MAXREGADDR;
   4026 		reg_addr |= phy_page;
   4027 
   4028 		KASSERT(sc->phy.writereg_locked != NULL);
   4029 		sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr, reg_data);
   4030 	}
   4031 
   4032 release:
   4033 	sc->phy.release(sc);
   4034 	return;
   4035 }
   4036 
   4037 /*
   4038  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4039  *  @sc:       pointer to the HW structure
   4040  *  @d0_state: boolean if entering d0 or d3 device state
   4041  *
   4042  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4043  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4044  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4045  */
   4046 int
   4047 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4048 {
   4049 	uint32_t mac_reg;
   4050 	uint16_t oem_reg;
   4051 	int rv;
   4052 
   4053 	if (sc->sc_type < WM_T_PCH)
   4054 		return 0;
   4055 
   4056 	rv = sc->phy.acquire(sc);
   4057 	if (rv != 0)
   4058 		return rv;
   4059 
   4060 	if (sc->sc_type == WM_T_PCH) {
   4061 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4062 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4063 			goto release;
   4064 	}
   4065 
   4066 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4067 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4068 		goto release;
   4069 
   4070 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4071 
   4072 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4073 	if (rv != 0)
   4074 		goto release;
   4075 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4076 
   4077 	if (d0_state) {
   4078 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4079 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4080 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4081 			oem_reg |= HV_OEM_BITS_LPLU;
   4082 	} else {
   4083 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4084 		    != 0)
   4085 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4086 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4087 		    != 0)
   4088 			oem_reg |= HV_OEM_BITS_LPLU;
   4089 	}
   4090 
   4091 	/* Set Restart auto-neg to activate the bits */
   4092 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4093 	    && (wm_phy_resetisblocked(sc) == false))
   4094 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4095 
   4096 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4097 
   4098 release:
   4099 	sc->phy.release(sc);
   4100 
   4101 	return rv;
   4102 }
   4103 
   4104 /* Init hardware bits */
   4105 void
   4106 wm_initialize_hardware_bits(struct wm_softc *sc)
   4107 {
   4108 	uint32_t tarc0, tarc1, reg;
   4109 
   4110 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4111 		device_xname(sc->sc_dev), __func__));
   4112 
   4113 	/* For 82571 variant, 80003 and ICHs */
   4114 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4115 	    || (sc->sc_type >= WM_T_80003)) {
   4116 
   4117 		/* Transmit Descriptor Control 0 */
   4118 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4119 		reg |= TXDCTL_COUNT_DESC;
   4120 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4121 
   4122 		/* Transmit Descriptor Control 1 */
   4123 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4124 		reg |= TXDCTL_COUNT_DESC;
   4125 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4126 
   4127 		/* TARC0 */
   4128 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4129 		switch (sc->sc_type) {
   4130 		case WM_T_82571:
   4131 		case WM_T_82572:
   4132 		case WM_T_82573:
   4133 		case WM_T_82574:
   4134 		case WM_T_82583:
   4135 		case WM_T_80003:
   4136 			/* Clear bits 30..27 */
   4137 			tarc0 &= ~__BITS(30, 27);
   4138 			break;
   4139 		default:
   4140 			break;
   4141 		}
   4142 
   4143 		switch (sc->sc_type) {
   4144 		case WM_T_82571:
   4145 		case WM_T_82572:
   4146 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4147 
   4148 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4149 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4150 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4151 			/* 8257[12] Errata No.7 */
   4152 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4153 
   4154 			/* TARC1 bit 28 */
   4155 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4156 				tarc1 &= ~__BIT(28);
   4157 			else
   4158 				tarc1 |= __BIT(28);
   4159 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4160 
   4161 			/*
   4162 			 * 8257[12] Errata No.13
   4163 			 * Disable Dyamic Clock Gating.
   4164 			 */
   4165 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4166 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4167 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4168 			break;
   4169 		case WM_T_82573:
   4170 		case WM_T_82574:
   4171 		case WM_T_82583:
   4172 			if ((sc->sc_type == WM_T_82574)
   4173 			    || (sc->sc_type == WM_T_82583))
   4174 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4175 
   4176 			/* Extended Device Control */
   4177 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4178 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4179 			reg |= __BIT(22);	/* Set bit 22 */
   4180 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4181 
   4182 			/* Device Control */
   4183 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4184 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4185 
   4186 			/* PCIe Control Register */
   4187 			/*
   4188 			 * 82573 Errata (unknown).
   4189 			 *
   4190 			 * 82574 Errata 25 and 82583 Errata 12
   4191 			 * "Dropped Rx Packets":
   4192 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4193 			 */
   4194 			reg = CSR_READ(sc, WMREG_GCR);
   4195 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4196 			CSR_WRITE(sc, WMREG_GCR, reg);
   4197 
   4198 			if ((sc->sc_type == WM_T_82574)
   4199 			    || (sc->sc_type == WM_T_82583)) {
   4200 				/*
   4201 				 * Document says this bit must be set for
   4202 				 * proper operation.
   4203 				 */
   4204 				reg = CSR_READ(sc, WMREG_GCR);
   4205 				reg |= __BIT(22);
   4206 				CSR_WRITE(sc, WMREG_GCR, reg);
   4207 
   4208 				/*
   4209 				 * Apply workaround for hardware errata
   4210 				 * documented in errata docs Fixes issue where
   4211 				 * some error prone or unreliable PCIe
   4212 				 * completions are occurring, particularly
   4213 				 * with ASPM enabled. Without fix, issue can
   4214 				 * cause Tx timeouts.
   4215 				 */
   4216 				reg = CSR_READ(sc, WMREG_GCR2);
   4217 				reg |= __BIT(0);
   4218 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4219 			}
   4220 			break;
   4221 		case WM_T_80003:
   4222 			/* TARC0 */
   4223 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4224 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4225 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4226 
   4227 			/* TARC1 bit 28 */
   4228 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4229 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4230 				tarc1 &= ~__BIT(28);
   4231 			else
   4232 				tarc1 |= __BIT(28);
   4233 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4234 			break;
   4235 		case WM_T_ICH8:
   4236 		case WM_T_ICH9:
   4237 		case WM_T_ICH10:
   4238 		case WM_T_PCH:
   4239 		case WM_T_PCH2:
   4240 		case WM_T_PCH_LPT:
   4241 		case WM_T_PCH_SPT:
   4242 		case WM_T_PCH_CNP:
   4243 			/* TARC0 */
   4244 			if (sc->sc_type == WM_T_ICH8) {
   4245 				/* Set TARC0 bits 29 and 28 */
   4246 				tarc0 |= __BITS(29, 28);
   4247 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4248 				tarc0 |= __BIT(29);
   4249 				/*
   4250 				 *  Drop bit 28. From Linux.
   4251 				 * See I218/I219 spec update
   4252 				 * "5. Buffer Overrun While the I219 is
   4253 				 * Processing DMA Transactions"
   4254 				 */
   4255 				tarc0 &= ~__BIT(28);
   4256 			}
   4257 			/* Set TARC0 bits 23,24,26,27 */
   4258 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4259 
   4260 			/* CTRL_EXT */
   4261 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4262 			reg |= __BIT(22);	/* Set bit 22 */
   4263 			/*
   4264 			 * Enable PHY low-power state when MAC is at D3
   4265 			 * w/o WoL
   4266 			 */
   4267 			if (sc->sc_type >= WM_T_PCH)
   4268 				reg |= CTRL_EXT_PHYPDEN;
   4269 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4270 
   4271 			/* TARC1 */
   4272 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4273 			/* bit 28 */
   4274 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4275 				tarc1 &= ~__BIT(28);
   4276 			else
   4277 				tarc1 |= __BIT(28);
   4278 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4279 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4280 
   4281 			/* Device Status */
   4282 			if (sc->sc_type == WM_T_ICH8) {
   4283 				reg = CSR_READ(sc, WMREG_STATUS);
   4284 				reg &= ~__BIT(31);
   4285 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4286 
   4287 			}
   4288 
   4289 			/* IOSFPC */
   4290 			if (sc->sc_type == WM_T_PCH_SPT) {
   4291 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4292 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4293 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4294 			}
   4295 			/*
   4296 			 * Work-around descriptor data corruption issue during
   4297 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4298 			 * capability.
   4299 			 */
   4300 			reg = CSR_READ(sc, WMREG_RFCTL);
   4301 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4302 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4303 			break;
   4304 		default:
   4305 			break;
   4306 		}
   4307 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4308 
   4309 		switch (sc->sc_type) {
   4310 		/*
   4311 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4312 		 * Avoid RSS Hash Value bug.
   4313 		 */
   4314 		case WM_T_82571:
   4315 		case WM_T_82572:
   4316 		case WM_T_82573:
   4317 		case WM_T_80003:
   4318 		case WM_T_ICH8:
   4319 			reg = CSR_READ(sc, WMREG_RFCTL);
   4320 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4321 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4322 			break;
   4323 		case WM_T_82574:
   4324 			/* use extened Rx descriptor. */
   4325 			reg = CSR_READ(sc, WMREG_RFCTL);
   4326 			reg |= WMREG_RFCTL_EXSTEN;
   4327 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4328 			break;
   4329 		default:
   4330 			break;
   4331 		}
   4332 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4333 		/*
   4334 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4335 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4336 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4337 		 * Correctly by the Device"
   4338 		 *
   4339 		 * I354(C2000) Errata AVR53:
   4340 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4341 		 * Hang"
   4342 		 */
   4343 		reg = CSR_READ(sc, WMREG_RFCTL);
   4344 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4345 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4346 	}
   4347 }
   4348 
   4349 static uint32_t
   4350 wm_rxpbs_adjust_82580(uint32_t val)
   4351 {
   4352 	uint32_t rv = 0;
   4353 
   4354 	if (val < __arraycount(wm_82580_rxpbs_table))
   4355 		rv = wm_82580_rxpbs_table[val];
   4356 
   4357 	return rv;
   4358 }
   4359 
   4360 /*
   4361  * wm_reset_phy:
   4362  *
   4363  *	generic PHY reset function.
   4364  *	Same as e1000_phy_hw_reset_generic()
   4365  */
   4366 static void
   4367 wm_reset_phy(struct wm_softc *sc)
   4368 {
   4369 	uint32_t reg;
   4370 
   4371 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4372 		device_xname(sc->sc_dev), __func__));
   4373 	if (wm_phy_resetisblocked(sc))
   4374 		return;
   4375 
   4376 	sc->phy.acquire(sc);
   4377 
   4378 	reg = CSR_READ(sc, WMREG_CTRL);
   4379 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4380 	CSR_WRITE_FLUSH(sc);
   4381 
   4382 	delay(sc->phy.reset_delay_us);
   4383 
   4384 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4385 	CSR_WRITE_FLUSH(sc);
   4386 
   4387 	delay(150);
   4388 
   4389 	sc->phy.release(sc);
   4390 
   4391 	wm_get_cfg_done(sc);
   4392 	wm_phy_post_reset(sc);
   4393 }
   4394 
   4395 /*
   4396  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4397  * so it is enough to check sc->sc_queue[0] only.
   4398  */
   4399 static void
   4400 wm_flush_desc_rings(struct wm_softc *sc)
   4401 {
   4402 	pcireg_t preg;
   4403 	uint32_t reg;
   4404 	struct wm_txqueue *txq;
   4405 	wiseman_txdesc_t *txd;
   4406 	int nexttx;
   4407 	uint32_t rctl;
   4408 
   4409 	/* First, disable MULR fix in FEXTNVM11 */
   4410 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4411 	reg |= FEXTNVM11_DIS_MULRFIX;
   4412 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4413 
   4414 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4415 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4416 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4417 		return;
   4418 
   4419 	/* TX */
   4420 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4421 	    device_xname(sc->sc_dev), preg, reg);
   4422 	reg = CSR_READ(sc, WMREG_TCTL);
   4423 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4424 
   4425 	txq = &sc->sc_queue[0].wmq_txq;
   4426 	nexttx = txq->txq_next;
   4427 	txd = &txq->txq_descs[nexttx];
   4428 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4429 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4430 	txd->wtx_fields.wtxu_status = 0;
   4431 	txd->wtx_fields.wtxu_options = 0;
   4432 	txd->wtx_fields.wtxu_vlan = 0;
   4433 
   4434 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4435 	    BUS_SPACE_BARRIER_WRITE);
   4436 
   4437 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4438 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4439 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4440 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4441 	delay(250);
   4442 
   4443 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4444 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4445 		return;
   4446 
   4447 	/* RX */
   4448 	printf("%s: Need RX flush (reg = %08x)\n",
   4449 	    device_xname(sc->sc_dev), preg);
   4450 	rctl = CSR_READ(sc, WMREG_RCTL);
   4451 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4452 	CSR_WRITE_FLUSH(sc);
   4453 	delay(150);
   4454 
   4455 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4456 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4457 	reg &= 0xffffc000;
   4458 	/*
   4459 	 * update thresholds: prefetch threshold to 31, host threshold
   4460 	 * to 1 and make sure the granularity is "descriptors" and not
   4461 	 * "cache lines"
   4462 	 */
   4463 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4464 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4465 
   4466 	/*
   4467 	 * momentarily enable the RX ring for the changes to take
   4468 	 * effect
   4469 	 */
   4470 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4471 	CSR_WRITE_FLUSH(sc);
   4472 	delay(150);
   4473 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4474 }
   4475 
   4476 /*
   4477  * wm_reset:
   4478  *
   4479  *	Reset the i82542 chip.
   4480  */
   4481 static void
   4482 wm_reset(struct wm_softc *sc)
   4483 {
   4484 	int phy_reset = 0;
   4485 	int i, error = 0;
   4486 	uint32_t reg;
   4487 	uint16_t kmreg;
   4488 	int rv;
   4489 
   4490 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4491 		device_xname(sc->sc_dev), __func__));
   4492 	KASSERT(sc->sc_type != 0);
   4493 
   4494 	/*
   4495 	 * Allocate on-chip memory according to the MTU size.
   4496 	 * The Packet Buffer Allocation register must be written
   4497 	 * before the chip is reset.
   4498 	 */
   4499 	switch (sc->sc_type) {
   4500 	case WM_T_82547:
   4501 	case WM_T_82547_2:
   4502 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4503 		    PBA_22K : PBA_30K;
   4504 		for (i = 0; i < sc->sc_nqueues; i++) {
   4505 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4506 			txq->txq_fifo_head = 0;
   4507 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4508 			txq->txq_fifo_size =
   4509 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4510 			txq->txq_fifo_stall = 0;
   4511 		}
   4512 		break;
   4513 	case WM_T_82571:
   4514 	case WM_T_82572:
   4515 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4516 	case WM_T_80003:
   4517 		sc->sc_pba = PBA_32K;
   4518 		break;
   4519 	case WM_T_82573:
   4520 		sc->sc_pba = PBA_12K;
   4521 		break;
   4522 	case WM_T_82574:
   4523 	case WM_T_82583:
   4524 		sc->sc_pba = PBA_20K;
   4525 		break;
   4526 	case WM_T_82576:
   4527 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4528 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4529 		break;
   4530 	case WM_T_82580:
   4531 	case WM_T_I350:
   4532 	case WM_T_I354:
   4533 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4534 		break;
   4535 	case WM_T_I210:
   4536 	case WM_T_I211:
   4537 		sc->sc_pba = PBA_34K;
   4538 		break;
   4539 	case WM_T_ICH8:
   4540 		/* Workaround for a bit corruption issue in FIFO memory */
   4541 		sc->sc_pba = PBA_8K;
   4542 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4543 		break;
   4544 	case WM_T_ICH9:
   4545 	case WM_T_ICH10:
   4546 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4547 		    PBA_14K : PBA_10K;
   4548 		break;
   4549 	case WM_T_PCH:
   4550 	case WM_T_PCH2:	/* XXX 14K? */
   4551 	case WM_T_PCH_LPT:
   4552 	case WM_T_PCH_SPT:
   4553 	case WM_T_PCH_CNP:
   4554 		sc->sc_pba = PBA_26K;
   4555 		break;
   4556 	default:
   4557 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4558 		    PBA_40K : PBA_48K;
   4559 		break;
   4560 	}
   4561 	/*
   4562 	 * Only old or non-multiqueue devices have the PBA register
   4563 	 * XXX Need special handling for 82575.
   4564 	 */
   4565 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4566 	    || (sc->sc_type == WM_T_82575))
   4567 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4568 
   4569 	/* Prevent the PCI-E bus from sticking */
   4570 	if (sc->sc_flags & WM_F_PCIE) {
   4571 		int timeout = 800;
   4572 
   4573 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4574 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4575 
   4576 		while (timeout--) {
   4577 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4578 			    == 0)
   4579 				break;
   4580 			delay(100);
   4581 		}
   4582 		if (timeout == 0)
   4583 			device_printf(sc->sc_dev,
   4584 			    "failed to disable busmastering\n");
   4585 	}
   4586 
   4587 	/* Set the completion timeout for interface */
   4588 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4589 	    || (sc->sc_type == WM_T_82580)
   4590 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4591 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4592 		wm_set_pcie_completion_timeout(sc);
   4593 
   4594 	/* Clear interrupt */
   4595 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4596 	if (wm_is_using_msix(sc)) {
   4597 		if (sc->sc_type != WM_T_82574) {
   4598 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4599 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4600 		} else
   4601 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4602 	}
   4603 
   4604 	/* Stop the transmit and receive processes. */
   4605 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4606 	sc->sc_rctl &= ~RCTL_EN;
   4607 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4608 	CSR_WRITE_FLUSH(sc);
   4609 
   4610 	/* XXX set_tbi_sbp_82543() */
   4611 
   4612 	delay(10*1000);
   4613 
   4614 	/* Must acquire the MDIO ownership before MAC reset */
   4615 	switch (sc->sc_type) {
   4616 	case WM_T_82573:
   4617 	case WM_T_82574:
   4618 	case WM_T_82583:
   4619 		error = wm_get_hw_semaphore_82573(sc);
   4620 		break;
   4621 	default:
   4622 		break;
   4623 	}
   4624 
   4625 	/*
   4626 	 * 82541 Errata 29? & 82547 Errata 28?
   4627 	 * See also the description about PHY_RST bit in CTRL register
   4628 	 * in 8254x_GBe_SDM.pdf.
   4629 	 */
   4630 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4631 		CSR_WRITE(sc, WMREG_CTRL,
   4632 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4633 		CSR_WRITE_FLUSH(sc);
   4634 		delay(5000);
   4635 	}
   4636 
   4637 	switch (sc->sc_type) {
   4638 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4639 	case WM_T_82541:
   4640 	case WM_T_82541_2:
   4641 	case WM_T_82547:
   4642 	case WM_T_82547_2:
   4643 		/*
   4644 		 * On some chipsets, a reset through a memory-mapped write
   4645 		 * cycle can cause the chip to reset before completing the
   4646 		 * write cycle. This causes major headache that can be avoided
   4647 		 * by issuing the reset via indirect register writes through
   4648 		 * I/O space.
   4649 		 *
   4650 		 * So, if we successfully mapped the I/O BAR at attach time,
   4651 		 * use that. Otherwise, try our luck with a memory-mapped
   4652 		 * reset.
   4653 		 */
   4654 		if (sc->sc_flags & WM_F_IOH_VALID)
   4655 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4656 		else
   4657 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4658 		break;
   4659 	case WM_T_82545_3:
   4660 	case WM_T_82546_3:
   4661 		/* Use the shadow control register on these chips. */
   4662 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4663 		break;
   4664 	case WM_T_80003:
   4665 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4666 		sc->phy.acquire(sc);
   4667 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4668 		sc->phy.release(sc);
   4669 		break;
   4670 	case WM_T_ICH8:
   4671 	case WM_T_ICH9:
   4672 	case WM_T_ICH10:
   4673 	case WM_T_PCH:
   4674 	case WM_T_PCH2:
   4675 	case WM_T_PCH_LPT:
   4676 	case WM_T_PCH_SPT:
   4677 	case WM_T_PCH_CNP:
   4678 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4679 		if (wm_phy_resetisblocked(sc) == false) {
   4680 			/*
   4681 			 * Gate automatic PHY configuration by hardware on
   4682 			 * non-managed 82579
   4683 			 */
   4684 			if ((sc->sc_type == WM_T_PCH2)
   4685 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4686 				== 0))
   4687 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4688 
   4689 			reg |= CTRL_PHY_RESET;
   4690 			phy_reset = 1;
   4691 		} else
   4692 			printf("XXX reset is blocked!!!\n");
   4693 		sc->phy.acquire(sc);
   4694 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4695 		/* Don't insert a completion barrier when reset */
   4696 		delay(20*1000);
   4697 		mutex_exit(sc->sc_ich_phymtx);
   4698 		break;
   4699 	case WM_T_82580:
   4700 	case WM_T_I350:
   4701 	case WM_T_I354:
   4702 	case WM_T_I210:
   4703 	case WM_T_I211:
   4704 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4705 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4706 			CSR_WRITE_FLUSH(sc);
   4707 		delay(5000);
   4708 		break;
   4709 	case WM_T_82542_2_0:
   4710 	case WM_T_82542_2_1:
   4711 	case WM_T_82543:
   4712 	case WM_T_82540:
   4713 	case WM_T_82545:
   4714 	case WM_T_82546:
   4715 	case WM_T_82571:
   4716 	case WM_T_82572:
   4717 	case WM_T_82573:
   4718 	case WM_T_82574:
   4719 	case WM_T_82575:
   4720 	case WM_T_82576:
   4721 	case WM_T_82583:
   4722 	default:
   4723 		/* Everything else can safely use the documented method. */
   4724 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4725 		break;
   4726 	}
   4727 
   4728 	/* Must release the MDIO ownership after MAC reset */
   4729 	switch (sc->sc_type) {
   4730 	case WM_T_82573:
   4731 	case WM_T_82574:
   4732 	case WM_T_82583:
   4733 		if (error == 0)
   4734 			wm_put_hw_semaphore_82573(sc);
   4735 		break;
   4736 	default:
   4737 		break;
   4738 	}
   4739 
   4740 	/* Set Phy Config Counter to 50msec */
   4741 	if (sc->sc_type == WM_T_PCH2) {
   4742 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4743 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4744 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4745 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4746 	}
   4747 
   4748 	if (phy_reset != 0)
   4749 		wm_get_cfg_done(sc);
   4750 
   4751 	/* reload EEPROM */
   4752 	switch (sc->sc_type) {
   4753 	case WM_T_82542_2_0:
   4754 	case WM_T_82542_2_1:
   4755 	case WM_T_82543:
   4756 	case WM_T_82544:
   4757 		delay(10);
   4758 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4759 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4760 		CSR_WRITE_FLUSH(sc);
   4761 		delay(2000);
   4762 		break;
   4763 	case WM_T_82540:
   4764 	case WM_T_82545:
   4765 	case WM_T_82545_3:
   4766 	case WM_T_82546:
   4767 	case WM_T_82546_3:
   4768 		delay(5*1000);
   4769 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4770 		break;
   4771 	case WM_T_82541:
   4772 	case WM_T_82541_2:
   4773 	case WM_T_82547:
   4774 	case WM_T_82547_2:
   4775 		delay(20000);
   4776 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4777 		break;
   4778 	case WM_T_82571:
   4779 	case WM_T_82572:
   4780 	case WM_T_82573:
   4781 	case WM_T_82574:
   4782 	case WM_T_82583:
   4783 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4784 			delay(10);
   4785 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4786 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4787 			CSR_WRITE_FLUSH(sc);
   4788 		}
   4789 		/* check EECD_EE_AUTORD */
   4790 		wm_get_auto_rd_done(sc);
   4791 		/*
   4792 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4793 		 * is set.
   4794 		 */
   4795 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4796 		    || (sc->sc_type == WM_T_82583))
   4797 			delay(25*1000);
   4798 		break;
   4799 	case WM_T_82575:
   4800 	case WM_T_82576:
   4801 	case WM_T_82580:
   4802 	case WM_T_I350:
   4803 	case WM_T_I354:
   4804 	case WM_T_I210:
   4805 	case WM_T_I211:
   4806 	case WM_T_80003:
   4807 		/* check EECD_EE_AUTORD */
   4808 		wm_get_auto_rd_done(sc);
   4809 		break;
   4810 	case WM_T_ICH8:
   4811 	case WM_T_ICH9:
   4812 	case WM_T_ICH10:
   4813 	case WM_T_PCH:
   4814 	case WM_T_PCH2:
   4815 	case WM_T_PCH_LPT:
   4816 	case WM_T_PCH_SPT:
   4817 	case WM_T_PCH_CNP:
   4818 		break;
   4819 	default:
   4820 		panic("%s: unknown type\n", __func__);
   4821 	}
   4822 
   4823 	/* Check whether EEPROM is present or not */
   4824 	switch (sc->sc_type) {
   4825 	case WM_T_82575:
   4826 	case WM_T_82576:
   4827 	case WM_T_82580:
   4828 	case WM_T_I350:
   4829 	case WM_T_I354:
   4830 	case WM_T_ICH8:
   4831 	case WM_T_ICH9:
   4832 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4833 			/* Not found */
   4834 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4835 			if (sc->sc_type == WM_T_82575)
   4836 				wm_reset_init_script_82575(sc);
   4837 		}
   4838 		break;
   4839 	default:
   4840 		break;
   4841 	}
   4842 
   4843 	if (phy_reset != 0)
   4844 		wm_phy_post_reset(sc);
   4845 
   4846 	if ((sc->sc_type == WM_T_82580)
   4847 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4848 		/* clear global device reset status bit */
   4849 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4850 	}
   4851 
   4852 	/* Clear any pending interrupt events. */
   4853 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4854 	reg = CSR_READ(sc, WMREG_ICR);
   4855 	if (wm_is_using_msix(sc)) {
   4856 		if (sc->sc_type != WM_T_82574) {
   4857 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4858 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4859 		} else
   4860 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4861 	}
   4862 
   4863 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4864 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4865 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4866 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4867 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4868 		reg |= KABGTXD_BGSQLBIAS;
   4869 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4870 	}
   4871 
   4872 	/* reload sc_ctrl */
   4873 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4874 
   4875 	if (sc->sc_type == WM_T_I354) {
   4876 #if 0
   4877 		/* I354 uses an external PHY */
   4878 		wm_set_eee_i354(sc);
   4879 #endif
   4880 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4881 		wm_set_eee_i350(sc);
   4882 
   4883 	/*
   4884 	 * For PCH, this write will make sure that any noise will be detected
   4885 	 * as a CRC error and be dropped rather than show up as a bad packet
   4886 	 * to the DMA engine
   4887 	 */
   4888 	if (sc->sc_type == WM_T_PCH)
   4889 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4890 
   4891 	if (sc->sc_type >= WM_T_82544)
   4892 		CSR_WRITE(sc, WMREG_WUC, 0);
   4893 
   4894 	wm_reset_mdicnfg_82580(sc);
   4895 
   4896 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4897 		wm_pll_workaround_i210(sc);
   4898 
   4899 	if (sc->sc_type == WM_T_80003) {
   4900 		/* default to TRUE to enable the MDIC W/A */
   4901 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4902 
   4903 		rv = wm_kmrn_readreg(sc,
   4904 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4905 		if (rv == 0) {
   4906 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4907 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4908 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4909 			else
   4910 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4911 		}
   4912 	}
   4913 }
   4914 
   4915 /*
   4916  * wm_add_rxbuf:
   4917  *
   4918  *	Add a receive buffer to the indiciated descriptor.
   4919  */
   4920 static int
   4921 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4922 {
   4923 	struct wm_softc *sc = rxq->rxq_sc;
   4924 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4925 	struct mbuf *m;
   4926 	int error;
   4927 
   4928 	KASSERT(mutex_owned(rxq->rxq_lock));
   4929 
   4930 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4931 	if (m == NULL)
   4932 		return ENOBUFS;
   4933 
   4934 	MCLGET(m, M_DONTWAIT);
   4935 	if ((m->m_flags & M_EXT) == 0) {
   4936 		m_freem(m);
   4937 		return ENOBUFS;
   4938 	}
   4939 
   4940 	if (rxs->rxs_mbuf != NULL)
   4941 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4942 
   4943 	rxs->rxs_mbuf = m;
   4944 
   4945 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4946 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4947 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4948 	if (error) {
   4949 		/* XXX XXX XXX */
   4950 		aprint_error_dev(sc->sc_dev,
   4951 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   4952 		panic("wm_add_rxbuf");
   4953 	}
   4954 
   4955 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4956 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4957 
   4958 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4959 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4960 			wm_init_rxdesc(rxq, idx);
   4961 	} else
   4962 		wm_init_rxdesc(rxq, idx);
   4963 
   4964 	return 0;
   4965 }
   4966 
   4967 /*
   4968  * wm_rxdrain:
   4969  *
   4970  *	Drain the receive queue.
   4971  */
   4972 static void
   4973 wm_rxdrain(struct wm_rxqueue *rxq)
   4974 {
   4975 	struct wm_softc *sc = rxq->rxq_sc;
   4976 	struct wm_rxsoft *rxs;
   4977 	int i;
   4978 
   4979 	KASSERT(mutex_owned(rxq->rxq_lock));
   4980 
   4981 	for (i = 0; i < WM_NRXDESC; i++) {
   4982 		rxs = &rxq->rxq_soft[i];
   4983 		if (rxs->rxs_mbuf != NULL) {
   4984 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4985 			m_freem(rxs->rxs_mbuf);
   4986 			rxs->rxs_mbuf = NULL;
   4987 		}
   4988 	}
   4989 }
   4990 
   4991 /*
   4992  * Setup registers for RSS.
   4993  *
   4994  * XXX not yet VMDq support
   4995  */
   4996 static void
   4997 wm_init_rss(struct wm_softc *sc)
   4998 {
   4999 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5000 	int i;
   5001 
   5002 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5003 
   5004 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5005 		int qid, reta_ent;
   5006 
   5007 		qid  = i % sc->sc_nqueues;
   5008 		switch (sc->sc_type) {
   5009 		case WM_T_82574:
   5010 			reta_ent = __SHIFTIN(qid,
   5011 			    RETA_ENT_QINDEX_MASK_82574);
   5012 			break;
   5013 		case WM_T_82575:
   5014 			reta_ent = __SHIFTIN(qid,
   5015 			    RETA_ENT_QINDEX1_MASK_82575);
   5016 			break;
   5017 		default:
   5018 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5019 			break;
   5020 		}
   5021 
   5022 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5023 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5024 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5025 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5026 	}
   5027 
   5028 	rss_getkey((uint8_t *)rss_key);
   5029 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5030 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5031 
   5032 	if (sc->sc_type == WM_T_82574)
   5033 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5034 	else
   5035 		mrqc = MRQC_ENABLE_RSS_MQ;
   5036 
   5037 	/*
   5038 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5039 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5040 	 */
   5041 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5042 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5043 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5044 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5045 
   5046 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5047 }
   5048 
   5049 /*
   5050  * Adjust TX and RX queue numbers which the system actulally uses.
   5051  *
   5052  * The numbers are affected by below parameters.
   5053  *     - The nubmer of hardware queues
   5054  *     - The number of MSI-X vectors (= "nvectors" argument)
   5055  *     - ncpu
   5056  */
   5057 static void
   5058 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5059 {
   5060 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5061 
   5062 	if (nvectors < 2) {
   5063 		sc->sc_nqueues = 1;
   5064 		return;
   5065 	}
   5066 
   5067 	switch (sc->sc_type) {
   5068 	case WM_T_82572:
   5069 		hw_ntxqueues = 2;
   5070 		hw_nrxqueues = 2;
   5071 		break;
   5072 	case WM_T_82574:
   5073 		hw_ntxqueues = 2;
   5074 		hw_nrxqueues = 2;
   5075 		break;
   5076 	case WM_T_82575:
   5077 		hw_ntxqueues = 4;
   5078 		hw_nrxqueues = 4;
   5079 		break;
   5080 	case WM_T_82576:
   5081 		hw_ntxqueues = 16;
   5082 		hw_nrxqueues = 16;
   5083 		break;
   5084 	case WM_T_82580:
   5085 	case WM_T_I350:
   5086 	case WM_T_I354:
   5087 		hw_ntxqueues = 8;
   5088 		hw_nrxqueues = 8;
   5089 		break;
   5090 	case WM_T_I210:
   5091 		hw_ntxqueues = 4;
   5092 		hw_nrxqueues = 4;
   5093 		break;
   5094 	case WM_T_I211:
   5095 		hw_ntxqueues = 2;
   5096 		hw_nrxqueues = 2;
   5097 		break;
   5098 		/*
   5099 		 * As below ethernet controllers does not support MSI-X,
   5100 		 * this driver let them not use multiqueue.
   5101 		 *     - WM_T_80003
   5102 		 *     - WM_T_ICH8
   5103 		 *     - WM_T_ICH9
   5104 		 *     - WM_T_ICH10
   5105 		 *     - WM_T_PCH
   5106 		 *     - WM_T_PCH2
   5107 		 *     - WM_T_PCH_LPT
   5108 		 */
   5109 	default:
   5110 		hw_ntxqueues = 1;
   5111 		hw_nrxqueues = 1;
   5112 		break;
   5113 	}
   5114 
   5115 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5116 
   5117 	/*
   5118 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5119 	 * the number of queues used actually.
   5120 	 */
   5121 	if (nvectors < hw_nqueues + 1)
   5122 		sc->sc_nqueues = nvectors - 1;
   5123 	else
   5124 		sc->sc_nqueues = hw_nqueues;
   5125 
   5126 	/*
   5127 	 * As queues more then cpus cannot improve scaling, we limit
   5128 	 * the number of queues used actually.
   5129 	 */
   5130 	if (ncpu < sc->sc_nqueues)
   5131 		sc->sc_nqueues = ncpu;
   5132 }
   5133 
   5134 static inline bool
   5135 wm_is_using_msix(struct wm_softc *sc)
   5136 {
   5137 
   5138 	return (sc->sc_nintrs > 1);
   5139 }
   5140 
   5141 static inline bool
   5142 wm_is_using_multiqueue(struct wm_softc *sc)
   5143 {
   5144 
   5145 	return (sc->sc_nqueues > 1);
   5146 }
   5147 
   5148 static int
   5149 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5150 {
   5151 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5152 	wmq->wmq_id = qidx;
   5153 	wmq->wmq_intr_idx = intr_idx;
   5154 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5155 #ifdef WM_MPSAFE
   5156 	    | SOFTINT_MPSAFE
   5157 #endif
   5158 	    , wm_handle_queue, wmq);
   5159 	if (wmq->wmq_si != NULL)
   5160 		return 0;
   5161 
   5162 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5163 	    wmq->wmq_id);
   5164 
   5165 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5166 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5167 	return ENOMEM;
   5168 }
   5169 
   5170 /*
   5171  * Both single interrupt MSI and INTx can use this function.
   5172  */
   5173 static int
   5174 wm_setup_legacy(struct wm_softc *sc)
   5175 {
   5176 	pci_chipset_tag_t pc = sc->sc_pc;
   5177 	const char *intrstr = NULL;
   5178 	char intrbuf[PCI_INTRSTR_LEN];
   5179 	int error;
   5180 
   5181 	error = wm_alloc_txrx_queues(sc);
   5182 	if (error) {
   5183 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5184 		    error);
   5185 		return ENOMEM;
   5186 	}
   5187 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5188 	    sizeof(intrbuf));
   5189 #ifdef WM_MPSAFE
   5190 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5191 #endif
   5192 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5193 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5194 	if (sc->sc_ihs[0] == NULL) {
   5195 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5196 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5197 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5198 		return ENOMEM;
   5199 	}
   5200 
   5201 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5202 	sc->sc_nintrs = 1;
   5203 
   5204 	return wm_softint_establish(sc, 0, 0);
   5205 }
   5206 
   5207 static int
   5208 wm_setup_msix(struct wm_softc *sc)
   5209 {
   5210 	void *vih;
   5211 	kcpuset_t *affinity;
   5212 	int qidx, error, intr_idx, txrx_established;
   5213 	pci_chipset_tag_t pc = sc->sc_pc;
   5214 	const char *intrstr = NULL;
   5215 	char intrbuf[PCI_INTRSTR_LEN];
   5216 	char intr_xname[INTRDEVNAMEBUF];
   5217 
   5218 	if (sc->sc_nqueues < ncpu) {
   5219 		/*
   5220 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5221 		 * interrupts start from CPU#1.
   5222 		 */
   5223 		sc->sc_affinity_offset = 1;
   5224 	} else {
   5225 		/*
   5226 		 * In this case, this device use all CPUs. So, we unify
   5227 		 * affinitied cpu_index to msix vector number for readability.
   5228 		 */
   5229 		sc->sc_affinity_offset = 0;
   5230 	}
   5231 
   5232 	error = wm_alloc_txrx_queues(sc);
   5233 	if (error) {
   5234 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5235 		    error);
   5236 		return ENOMEM;
   5237 	}
   5238 
   5239 	kcpuset_create(&affinity, false);
   5240 	intr_idx = 0;
   5241 
   5242 	/*
   5243 	 * TX and RX
   5244 	 */
   5245 	txrx_established = 0;
   5246 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5247 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5248 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5249 
   5250 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5251 		    sizeof(intrbuf));
   5252 #ifdef WM_MPSAFE
   5253 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5254 		    PCI_INTR_MPSAFE, true);
   5255 #endif
   5256 		memset(intr_xname, 0, sizeof(intr_xname));
   5257 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5258 		    device_xname(sc->sc_dev), qidx);
   5259 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5260 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5261 		if (vih == NULL) {
   5262 			aprint_error_dev(sc->sc_dev,
   5263 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5264 			    intrstr ? " at " : "",
   5265 			    intrstr ? intrstr : "");
   5266 
   5267 			goto fail;
   5268 		}
   5269 		kcpuset_zero(affinity);
   5270 		/* Round-robin affinity */
   5271 		kcpuset_set(affinity, affinity_to);
   5272 		error = interrupt_distribute(vih, affinity, NULL);
   5273 		if (error == 0) {
   5274 			aprint_normal_dev(sc->sc_dev,
   5275 			    "for TX and RX interrupting at %s affinity to %u\n",
   5276 			    intrstr, affinity_to);
   5277 		} else {
   5278 			aprint_normal_dev(sc->sc_dev,
   5279 			    "for TX and RX interrupting at %s\n", intrstr);
   5280 		}
   5281 		sc->sc_ihs[intr_idx] = vih;
   5282 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5283 			goto fail;
   5284 		txrx_established++;
   5285 		intr_idx++;
   5286 	}
   5287 
   5288 	/*
   5289 	 * LINK
   5290 	 */
   5291 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5292 	    sizeof(intrbuf));
   5293 #ifdef WM_MPSAFE
   5294 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5295 #endif
   5296 	memset(intr_xname, 0, sizeof(intr_xname));
   5297 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5298 	    device_xname(sc->sc_dev));
   5299 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5300 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5301 	if (vih == NULL) {
   5302 		aprint_error_dev(sc->sc_dev,
   5303 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5304 		    intrstr ? " at " : "",
   5305 		    intrstr ? intrstr : "");
   5306 
   5307 		goto fail;
   5308 	}
   5309 	/* keep default affinity to LINK interrupt */
   5310 	aprint_normal_dev(sc->sc_dev,
   5311 	    "for LINK interrupting at %s\n", intrstr);
   5312 	sc->sc_ihs[intr_idx] = vih;
   5313 	sc->sc_link_intr_idx = intr_idx;
   5314 
   5315 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5316 	kcpuset_destroy(affinity);
   5317 	return 0;
   5318 
   5319  fail:
   5320 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5321 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5322 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5323 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5324 	}
   5325 
   5326 	kcpuset_destroy(affinity);
   5327 	return ENOMEM;
   5328 }
   5329 
   5330 static void
   5331 wm_unset_stopping_flags(struct wm_softc *sc)
   5332 {
   5333 	int i;
   5334 
   5335 	KASSERT(WM_CORE_LOCKED(sc));
   5336 
   5337 	/*
   5338 	 * must unset stopping flags in ascending order.
   5339 	 */
   5340 	for (i = 0; i < sc->sc_nqueues; i++) {
   5341 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5342 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5343 
   5344 		mutex_enter(txq->txq_lock);
   5345 		txq->txq_stopping = false;
   5346 		mutex_exit(txq->txq_lock);
   5347 
   5348 		mutex_enter(rxq->rxq_lock);
   5349 		rxq->rxq_stopping = false;
   5350 		mutex_exit(rxq->rxq_lock);
   5351 	}
   5352 
   5353 	sc->sc_core_stopping = false;
   5354 }
   5355 
   5356 static void
   5357 wm_set_stopping_flags(struct wm_softc *sc)
   5358 {
   5359 	int i;
   5360 
   5361 	KASSERT(WM_CORE_LOCKED(sc));
   5362 
   5363 	sc->sc_core_stopping = true;
   5364 
   5365 	/*
   5366 	 * must set stopping flags in ascending order.
   5367 	 */
   5368 	for (i = 0; i < sc->sc_nqueues; i++) {
   5369 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5370 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5371 
   5372 		mutex_enter(rxq->rxq_lock);
   5373 		rxq->rxq_stopping = true;
   5374 		mutex_exit(rxq->rxq_lock);
   5375 
   5376 		mutex_enter(txq->txq_lock);
   5377 		txq->txq_stopping = true;
   5378 		mutex_exit(txq->txq_lock);
   5379 	}
   5380 }
   5381 
   5382 /*
   5383  * write interrupt interval value to ITR or EITR
   5384  */
   5385 static void
   5386 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5387 {
   5388 
   5389 	if (!wmq->wmq_set_itr)
   5390 		return;
   5391 
   5392 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5393 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5394 
   5395 		/*
   5396 		 * 82575 doesn't have CNT_INGR field.
   5397 		 * So, overwrite counter field by software.
   5398 		 */
   5399 		if (sc->sc_type == WM_T_82575)
   5400 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5401 		else
   5402 			eitr |= EITR_CNT_INGR;
   5403 
   5404 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5405 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5406 		/*
   5407 		 * 82574 has both ITR and EITR. SET EITR when we use
   5408 		 * the multi queue function with MSI-X.
   5409 		 */
   5410 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5411 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5412 	} else {
   5413 		KASSERT(wmq->wmq_id == 0);
   5414 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5415 	}
   5416 
   5417 	wmq->wmq_set_itr = false;
   5418 }
   5419 
   5420 /*
   5421  * TODO
   5422  * Below dynamic calculation of itr is almost the same as linux igb,
   5423  * however it does not fit to wm(4). So, we will have been disable AIM
   5424  * until we will find appropriate calculation of itr.
   5425  */
   5426 /*
   5427  * calculate interrupt interval value to be going to write register in
   5428  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5429  */
   5430 static void
   5431 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5432 {
   5433 #ifdef NOTYET
   5434 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5435 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5436 	uint32_t avg_size = 0;
   5437 	uint32_t new_itr;
   5438 
   5439 	if (rxq->rxq_packets)
   5440 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5441 	if (txq->txq_packets)
   5442 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5443 
   5444 	if (avg_size == 0) {
   5445 		new_itr = 450; /* restore default value */
   5446 		goto out;
   5447 	}
   5448 
   5449 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5450 	avg_size += 24;
   5451 
   5452 	/* Don't starve jumbo frames */
   5453 	avg_size = uimin(avg_size, 3000);
   5454 
   5455 	/* Give a little boost to mid-size frames */
   5456 	if ((avg_size > 300) && (avg_size < 1200))
   5457 		new_itr = avg_size / 3;
   5458 	else
   5459 		new_itr = avg_size / 2;
   5460 
   5461 out:
   5462 	/*
   5463 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5464 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5465 	 */
   5466 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5467 		new_itr *= 4;
   5468 
   5469 	if (new_itr != wmq->wmq_itr) {
   5470 		wmq->wmq_itr = new_itr;
   5471 		wmq->wmq_set_itr = true;
   5472 	} else
   5473 		wmq->wmq_set_itr = false;
   5474 
   5475 	rxq->rxq_packets = 0;
   5476 	rxq->rxq_bytes = 0;
   5477 	txq->txq_packets = 0;
   5478 	txq->txq_bytes = 0;
   5479 #endif
   5480 }
   5481 
   5482 /*
   5483  * wm_init:		[ifnet interface function]
   5484  *
   5485  *	Initialize the interface.
   5486  */
   5487 static int
   5488 wm_init(struct ifnet *ifp)
   5489 {
   5490 	struct wm_softc *sc = ifp->if_softc;
   5491 	int ret;
   5492 
   5493 	WM_CORE_LOCK(sc);
   5494 	ret = wm_init_locked(ifp);
   5495 	WM_CORE_UNLOCK(sc);
   5496 
   5497 	return ret;
   5498 }
   5499 
   5500 static int
   5501 wm_init_locked(struct ifnet *ifp)
   5502 {
   5503 	struct wm_softc *sc = ifp->if_softc;
   5504 	int i, j, trynum, error = 0;
   5505 	uint32_t reg;
   5506 
   5507 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5508 		device_xname(sc->sc_dev), __func__));
   5509 	KASSERT(WM_CORE_LOCKED(sc));
   5510 
   5511 	/*
   5512 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5513 	 * There is a small but measurable benefit to avoiding the adjusment
   5514 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5515 	 * on such platforms.  One possibility is that the DMA itself is
   5516 	 * slightly more efficient if the front of the entire packet (instead
   5517 	 * of the front of the headers) is aligned.
   5518 	 *
   5519 	 * Note we must always set align_tweak to 0 if we are using
   5520 	 * jumbo frames.
   5521 	 */
   5522 #ifdef __NO_STRICT_ALIGNMENT
   5523 	sc->sc_align_tweak = 0;
   5524 #else
   5525 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5526 		sc->sc_align_tweak = 0;
   5527 	else
   5528 		sc->sc_align_tweak = 2;
   5529 #endif /* __NO_STRICT_ALIGNMENT */
   5530 
   5531 	/* Cancel any pending I/O. */
   5532 	wm_stop_locked(ifp, 0);
   5533 
   5534 	/* update statistics before reset */
   5535 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5536 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5537 
   5538 	/* PCH_SPT hardware workaround */
   5539 	if (sc->sc_type == WM_T_PCH_SPT)
   5540 		wm_flush_desc_rings(sc);
   5541 
   5542 	/* Reset the chip to a known state. */
   5543 	wm_reset(sc);
   5544 
   5545 	/*
   5546 	 * AMT based hardware can now take control from firmware
   5547 	 * Do this after reset.
   5548 	 */
   5549 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5550 		wm_get_hw_control(sc);
   5551 
   5552 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5553 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5554 		wm_legacy_irq_quirk_spt(sc);
   5555 
   5556 	/* Init hardware bits */
   5557 	wm_initialize_hardware_bits(sc);
   5558 
   5559 	/* Reset the PHY. */
   5560 	if (sc->sc_flags & WM_F_HAS_MII)
   5561 		wm_gmii_reset(sc);
   5562 
   5563 	if (sc->sc_type >= WM_T_ICH8) {
   5564 		reg = CSR_READ(sc, WMREG_GCR);
   5565 		/*
   5566 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5567 		 * default after reset.
   5568 		 */
   5569 		if (sc->sc_type == WM_T_ICH8)
   5570 			reg |= GCR_NO_SNOOP_ALL;
   5571 		else
   5572 			reg &= ~GCR_NO_SNOOP_ALL;
   5573 		CSR_WRITE(sc, WMREG_GCR, reg);
   5574 	}
   5575 	if ((sc->sc_type >= WM_T_ICH8)
   5576 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5577 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5578 
   5579 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5580 		reg |= CTRL_EXT_RO_DIS;
   5581 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5582 	}
   5583 
   5584 	/* Calculate (E)ITR value */
   5585 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5586 		/*
   5587 		 * For NEWQUEUE's EITR (except for 82575).
   5588 		 * 82575's EITR should be set same throttling value as other
   5589 		 * old controllers' ITR because the interrupt/sec calculation
   5590 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5591 		 *
   5592 		 * 82574's EITR should be set same throttling value as ITR.
   5593 		 *
   5594 		 * For N interrupts/sec, set this value to:
   5595 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5596 		 */
   5597 		sc->sc_itr_init = 450;
   5598 	} else if (sc->sc_type >= WM_T_82543) {
   5599 		/*
   5600 		 * Set up the interrupt throttling register (units of 256ns)
   5601 		 * Note that a footnote in Intel's documentation says this
   5602 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5603 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5604 		 * that that is also true for the 1024ns units of the other
   5605 		 * interrupt-related timer registers -- so, really, we ought
   5606 		 * to divide this value by 4 when the link speed is low.
   5607 		 *
   5608 		 * XXX implement this division at link speed change!
   5609 		 */
   5610 
   5611 		/*
   5612 		 * For N interrupts/sec, set this value to:
   5613 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5614 		 * absolute and packet timer values to this value
   5615 		 * divided by 4 to get "simple timer" behavior.
   5616 		 */
   5617 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5618 	}
   5619 
   5620 	error = wm_init_txrx_queues(sc);
   5621 	if (error)
   5622 		goto out;
   5623 
   5624 	/*
   5625 	 * Clear out the VLAN table -- we don't use it (yet).
   5626 	 */
   5627 	CSR_WRITE(sc, WMREG_VET, 0);
   5628 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5629 		trynum = 10; /* Due to hw errata */
   5630 	else
   5631 		trynum = 1;
   5632 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5633 		for (j = 0; j < trynum; j++)
   5634 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5635 
   5636 	/*
   5637 	 * Set up flow-control parameters.
   5638 	 *
   5639 	 * XXX Values could probably stand some tuning.
   5640 	 */
   5641 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5642 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5643 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5644 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5645 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5646 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5647 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5648 	}
   5649 
   5650 	sc->sc_fcrtl = FCRTL_DFLT;
   5651 	if (sc->sc_type < WM_T_82543) {
   5652 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5653 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5654 	} else {
   5655 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5656 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5657 	}
   5658 
   5659 	if (sc->sc_type == WM_T_80003)
   5660 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5661 	else
   5662 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5663 
   5664 	/* Writes the control register. */
   5665 	wm_set_vlan(sc);
   5666 
   5667 	if (sc->sc_flags & WM_F_HAS_MII) {
   5668 		uint16_t kmreg;
   5669 
   5670 		switch (sc->sc_type) {
   5671 		case WM_T_80003:
   5672 		case WM_T_ICH8:
   5673 		case WM_T_ICH9:
   5674 		case WM_T_ICH10:
   5675 		case WM_T_PCH:
   5676 		case WM_T_PCH2:
   5677 		case WM_T_PCH_LPT:
   5678 		case WM_T_PCH_SPT:
   5679 		case WM_T_PCH_CNP:
   5680 			/*
   5681 			 * Set the mac to wait the maximum time between each
   5682 			 * iteration and increase the max iterations when
   5683 			 * polling the phy; this fixes erroneous timeouts at
   5684 			 * 10Mbps.
   5685 			 */
   5686 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5687 			    0xFFFF);
   5688 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5689 			    &kmreg);
   5690 			kmreg |= 0x3F;
   5691 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5692 			    kmreg);
   5693 			break;
   5694 		default:
   5695 			break;
   5696 		}
   5697 
   5698 		if (sc->sc_type == WM_T_80003) {
   5699 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5700 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5701 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5702 
   5703 			/* Bypass RX and TX FIFO's */
   5704 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5705 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5706 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5707 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5708 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5709 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5710 		}
   5711 	}
   5712 #if 0
   5713 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5714 #endif
   5715 
   5716 	/* Set up checksum offload parameters. */
   5717 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5718 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5719 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5720 		reg |= RXCSUM_IPOFL;
   5721 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5722 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5723 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5724 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5725 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5726 
   5727 	/* Set registers about MSI-X */
   5728 	if (wm_is_using_msix(sc)) {
   5729 		uint32_t ivar;
   5730 		struct wm_queue *wmq;
   5731 		int qid, qintr_idx;
   5732 
   5733 		if (sc->sc_type == WM_T_82575) {
   5734 			/* Interrupt control */
   5735 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5736 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5737 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5738 
   5739 			/* TX and RX */
   5740 			for (i = 0; i < sc->sc_nqueues; i++) {
   5741 				wmq = &sc->sc_queue[i];
   5742 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5743 				    EITR_TX_QUEUE(wmq->wmq_id)
   5744 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5745 			}
   5746 			/* Link status */
   5747 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5748 			    EITR_OTHER);
   5749 		} else if (sc->sc_type == WM_T_82574) {
   5750 			/* Interrupt control */
   5751 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5752 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5753 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5754 
   5755 			/*
   5756 			 * workaround issue with spurious interrupts
   5757 			 * in MSI-X mode.
   5758 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5759 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5760 			 */
   5761 			reg = CSR_READ(sc, WMREG_RFCTL);
   5762 			reg |= WMREG_RFCTL_ACKDIS;
   5763 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5764 
   5765 			ivar = 0;
   5766 			/* TX and RX */
   5767 			for (i = 0; i < sc->sc_nqueues; i++) {
   5768 				wmq = &sc->sc_queue[i];
   5769 				qid = wmq->wmq_id;
   5770 				qintr_idx = wmq->wmq_intr_idx;
   5771 
   5772 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5773 				    IVAR_TX_MASK_Q_82574(qid));
   5774 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5775 				    IVAR_RX_MASK_Q_82574(qid));
   5776 			}
   5777 			/* Link status */
   5778 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5779 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5780 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5781 		} else {
   5782 			/* Interrupt control */
   5783 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5784 			    | GPIE_EIAME | GPIE_PBA);
   5785 
   5786 			switch (sc->sc_type) {
   5787 			case WM_T_82580:
   5788 			case WM_T_I350:
   5789 			case WM_T_I354:
   5790 			case WM_T_I210:
   5791 			case WM_T_I211:
   5792 				/* TX and RX */
   5793 				for (i = 0; i < sc->sc_nqueues; i++) {
   5794 					wmq = &sc->sc_queue[i];
   5795 					qid = wmq->wmq_id;
   5796 					qintr_idx = wmq->wmq_intr_idx;
   5797 
   5798 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5799 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5800 					ivar |= __SHIFTIN((qintr_idx
   5801 						| IVAR_VALID),
   5802 					    IVAR_TX_MASK_Q(qid));
   5803 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5804 					ivar |= __SHIFTIN((qintr_idx
   5805 						| IVAR_VALID),
   5806 					    IVAR_RX_MASK_Q(qid));
   5807 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5808 				}
   5809 				break;
   5810 			case WM_T_82576:
   5811 				/* TX and RX */
   5812 				for (i = 0; i < sc->sc_nqueues; i++) {
   5813 					wmq = &sc->sc_queue[i];
   5814 					qid = wmq->wmq_id;
   5815 					qintr_idx = wmq->wmq_intr_idx;
   5816 
   5817 					ivar = CSR_READ(sc,
   5818 					    WMREG_IVAR_Q_82576(qid));
   5819 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5820 					ivar |= __SHIFTIN((qintr_idx
   5821 						| IVAR_VALID),
   5822 					    IVAR_TX_MASK_Q_82576(qid));
   5823 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5824 					ivar |= __SHIFTIN((qintr_idx
   5825 						| IVAR_VALID),
   5826 					    IVAR_RX_MASK_Q_82576(qid));
   5827 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5828 					    ivar);
   5829 				}
   5830 				break;
   5831 			default:
   5832 				break;
   5833 			}
   5834 
   5835 			/* Link status */
   5836 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5837 			    IVAR_MISC_OTHER);
   5838 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5839 		}
   5840 
   5841 		if (wm_is_using_multiqueue(sc)) {
   5842 			wm_init_rss(sc);
   5843 
   5844 			/*
   5845 			** NOTE: Receive Full-Packet Checksum Offload
   5846 			** is mutually exclusive with Multiqueue. However
   5847 			** this is not the same as TCP/IP checksums which
   5848 			** still work.
   5849 			*/
   5850 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5851 			reg |= RXCSUM_PCSD;
   5852 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5853 		}
   5854 	}
   5855 
   5856 	/* Set up the interrupt registers. */
   5857 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5858 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5859 	    ICR_RXO | ICR_RXT0;
   5860 	if (wm_is_using_msix(sc)) {
   5861 		uint32_t mask;
   5862 		struct wm_queue *wmq;
   5863 
   5864 		switch (sc->sc_type) {
   5865 		case WM_T_82574:
   5866 			mask = 0;
   5867 			for (i = 0; i < sc->sc_nqueues; i++) {
   5868 				wmq = &sc->sc_queue[i];
   5869 				mask |= ICR_TXQ(wmq->wmq_id);
   5870 				mask |= ICR_RXQ(wmq->wmq_id);
   5871 			}
   5872 			mask |= ICR_OTHER;
   5873 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5874 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5875 			break;
   5876 		default:
   5877 			if (sc->sc_type == WM_T_82575) {
   5878 				mask = 0;
   5879 				for (i = 0; i < sc->sc_nqueues; i++) {
   5880 					wmq = &sc->sc_queue[i];
   5881 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5882 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5883 				}
   5884 				mask |= EITR_OTHER;
   5885 			} else {
   5886 				mask = 0;
   5887 				for (i = 0; i < sc->sc_nqueues; i++) {
   5888 					wmq = &sc->sc_queue[i];
   5889 					mask |= 1 << wmq->wmq_intr_idx;
   5890 				}
   5891 				mask |= 1 << sc->sc_link_intr_idx;
   5892 			}
   5893 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5894 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5895 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5896 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5897 			break;
   5898 		}
   5899 	} else
   5900 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5901 
   5902 	/* Set up the inter-packet gap. */
   5903 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5904 
   5905 	if (sc->sc_type >= WM_T_82543) {
   5906 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5907 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5908 			wm_itrs_writereg(sc, wmq);
   5909 		}
   5910 		/*
   5911 		 * Link interrupts occur much less than TX
   5912 		 * interrupts and RX interrupts. So, we don't
   5913 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5914 		 * FreeBSD's if_igb.
   5915 		 */
   5916 	}
   5917 
   5918 	/* Set the VLAN ethernetype. */
   5919 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5920 
   5921 	/*
   5922 	 * Set up the transmit control register; we start out with
   5923 	 * a collision distance suitable for FDX, but update it whe
   5924 	 * we resolve the media type.
   5925 	 */
   5926 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5927 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5928 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5929 	if (sc->sc_type >= WM_T_82571)
   5930 		sc->sc_tctl |= TCTL_MULR;
   5931 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5932 
   5933 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5934 		/* Write TDT after TCTL.EN is set. See the document. */
   5935 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5936 	}
   5937 
   5938 	if (sc->sc_type == WM_T_80003) {
   5939 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5940 		reg &= ~TCTL_EXT_GCEX_MASK;
   5941 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5942 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5943 	}
   5944 
   5945 	/* Set the media. */
   5946 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5947 		goto out;
   5948 
   5949 	/* Configure for OS presence */
   5950 	wm_init_manageability(sc);
   5951 
   5952 	/*
   5953 	 * Set up the receive control register; we actually program the
   5954 	 * register when we set the receive filter. Use multicast address
   5955 	 * offset type 0.
   5956 	 *
   5957 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   5958 	 * don't enable that feature.
   5959 	 */
   5960 	sc->sc_mchash_type = 0;
   5961 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5962 	    | RCTL_MO(sc->sc_mchash_type);
   5963 
   5964 	/*
   5965 	 * 82574 use one buffer extended Rx descriptor.
   5966 	 */
   5967 	if (sc->sc_type == WM_T_82574)
   5968 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5969 
   5970 	/*
   5971 	 * The I350 has a bug where it always strips the CRC whether
   5972 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5973 	 */
   5974 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5975 	    || (sc->sc_type == WM_T_I210))
   5976 		sc->sc_rctl |= RCTL_SECRC;
   5977 
   5978 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5979 	    && (ifp->if_mtu > ETHERMTU)) {
   5980 		sc->sc_rctl |= RCTL_LPE;
   5981 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5982 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5983 	}
   5984 
   5985 	if (MCLBYTES == 2048)
   5986 		sc->sc_rctl |= RCTL_2k;
   5987 	else {
   5988 		if (sc->sc_type >= WM_T_82543) {
   5989 			switch (MCLBYTES) {
   5990 			case 4096:
   5991 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5992 				break;
   5993 			case 8192:
   5994 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5995 				break;
   5996 			case 16384:
   5997 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5998 				break;
   5999 			default:
   6000 				panic("wm_init: MCLBYTES %d unsupported",
   6001 				    MCLBYTES);
   6002 				break;
   6003 			}
   6004 		} else
   6005 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6006 	}
   6007 
   6008 	/* Enable ECC */
   6009 	switch (sc->sc_type) {
   6010 	case WM_T_82571:
   6011 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6012 		reg |= PBA_ECC_CORR_EN;
   6013 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6014 		break;
   6015 	case WM_T_PCH_LPT:
   6016 	case WM_T_PCH_SPT:
   6017 	case WM_T_PCH_CNP:
   6018 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6019 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6020 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6021 
   6022 		sc->sc_ctrl |= CTRL_MEHE;
   6023 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6024 		break;
   6025 	default:
   6026 		break;
   6027 	}
   6028 
   6029 	/*
   6030 	 * Set the receive filter.
   6031 	 *
   6032 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6033 	 * the setting of RCTL.EN in wm_set_filter()
   6034 	 */
   6035 	wm_set_filter(sc);
   6036 
   6037 	/* On 575 and later set RDT only if RX enabled */
   6038 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6039 		int qidx;
   6040 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6041 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6042 			for (i = 0; i < WM_NRXDESC; i++) {
   6043 				mutex_enter(rxq->rxq_lock);
   6044 				wm_init_rxdesc(rxq, i);
   6045 				mutex_exit(rxq->rxq_lock);
   6046 
   6047 			}
   6048 		}
   6049 	}
   6050 
   6051 	wm_unset_stopping_flags(sc);
   6052 
   6053 	/* Start the one second link check clock. */
   6054 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6055 
   6056 	/* ...all done! */
   6057 	ifp->if_flags |= IFF_RUNNING;
   6058 	ifp->if_flags &= ~IFF_OACTIVE;
   6059 
   6060  out:
   6061 	sc->sc_if_flags = ifp->if_flags;
   6062 	if (error)
   6063 		log(LOG_ERR, "%s: interface not running\n",
   6064 		    device_xname(sc->sc_dev));
   6065 	return error;
   6066 }
   6067 
   6068 /*
   6069  * wm_stop:		[ifnet interface function]
   6070  *
   6071  *	Stop transmission on the interface.
   6072  */
   6073 static void
   6074 wm_stop(struct ifnet *ifp, int disable)
   6075 {
   6076 	struct wm_softc *sc = ifp->if_softc;
   6077 
   6078 	WM_CORE_LOCK(sc);
   6079 	wm_stop_locked(ifp, disable);
   6080 	WM_CORE_UNLOCK(sc);
   6081 }
   6082 
   6083 static void
   6084 wm_stop_locked(struct ifnet *ifp, int disable)
   6085 {
   6086 	struct wm_softc *sc = ifp->if_softc;
   6087 	struct wm_txsoft *txs;
   6088 	int i, qidx;
   6089 
   6090 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6091 		device_xname(sc->sc_dev), __func__));
   6092 	KASSERT(WM_CORE_LOCKED(sc));
   6093 
   6094 	wm_set_stopping_flags(sc);
   6095 
   6096 	/* Stop the one second clock. */
   6097 	callout_stop(&sc->sc_tick_ch);
   6098 
   6099 	/* Stop the 82547 Tx FIFO stall check timer. */
   6100 	if (sc->sc_type == WM_T_82547)
   6101 		callout_stop(&sc->sc_txfifo_ch);
   6102 
   6103 	if (sc->sc_flags & WM_F_HAS_MII) {
   6104 		/* Down the MII. */
   6105 		mii_down(&sc->sc_mii);
   6106 	} else {
   6107 #if 0
   6108 		/* Should we clear PHY's status properly? */
   6109 		wm_reset(sc);
   6110 #endif
   6111 	}
   6112 
   6113 	/* Stop the transmit and receive processes. */
   6114 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6115 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6116 	sc->sc_rctl &= ~RCTL_EN;
   6117 
   6118 	/*
   6119 	 * Clear the interrupt mask to ensure the device cannot assert its
   6120 	 * interrupt line.
   6121 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6122 	 * service any currently pending or shared interrupt.
   6123 	 */
   6124 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6125 	sc->sc_icr = 0;
   6126 	if (wm_is_using_msix(sc)) {
   6127 		if (sc->sc_type != WM_T_82574) {
   6128 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6129 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6130 		} else
   6131 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6132 	}
   6133 
   6134 	/* Release any queued transmit buffers. */
   6135 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6136 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6137 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6138 		mutex_enter(txq->txq_lock);
   6139 		txq->txq_sending = false; /* ensure watchdog disabled */
   6140 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6141 			txs = &txq->txq_soft[i];
   6142 			if (txs->txs_mbuf != NULL) {
   6143 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6144 				m_freem(txs->txs_mbuf);
   6145 				txs->txs_mbuf = NULL;
   6146 			}
   6147 		}
   6148 		mutex_exit(txq->txq_lock);
   6149 	}
   6150 
   6151 	/* Mark the interface as down and cancel the watchdog timer. */
   6152 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6153 
   6154 	if (disable) {
   6155 		for (i = 0; i < sc->sc_nqueues; i++) {
   6156 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6157 			mutex_enter(rxq->rxq_lock);
   6158 			wm_rxdrain(rxq);
   6159 			mutex_exit(rxq->rxq_lock);
   6160 		}
   6161 	}
   6162 
   6163 #if 0 /* notyet */
   6164 	if (sc->sc_type >= WM_T_82544)
   6165 		CSR_WRITE(sc, WMREG_WUC, 0);
   6166 #endif
   6167 }
   6168 
   6169 static void
   6170 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6171 {
   6172 	struct mbuf *m;
   6173 	int i;
   6174 
   6175 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6176 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6177 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6178 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6179 		    m->m_data, m->m_len, m->m_flags);
   6180 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6181 	    i, i == 1 ? "" : "s");
   6182 }
   6183 
   6184 /*
   6185  * wm_82547_txfifo_stall:
   6186  *
   6187  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6188  *	reset the FIFO pointers, and restart packet transmission.
   6189  */
   6190 static void
   6191 wm_82547_txfifo_stall(void *arg)
   6192 {
   6193 	struct wm_softc *sc = arg;
   6194 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6195 
   6196 	mutex_enter(txq->txq_lock);
   6197 
   6198 	if (txq->txq_stopping)
   6199 		goto out;
   6200 
   6201 	if (txq->txq_fifo_stall) {
   6202 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6203 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6204 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6205 			/*
   6206 			 * Packets have drained.  Stop transmitter, reset
   6207 			 * FIFO pointers, restart transmitter, and kick
   6208 			 * the packet queue.
   6209 			 */
   6210 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6211 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6212 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6213 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6214 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6215 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6216 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6217 			CSR_WRITE_FLUSH(sc);
   6218 
   6219 			txq->txq_fifo_head = 0;
   6220 			txq->txq_fifo_stall = 0;
   6221 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6222 		} else {
   6223 			/*
   6224 			 * Still waiting for packets to drain; try again in
   6225 			 * another tick.
   6226 			 */
   6227 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6228 		}
   6229 	}
   6230 
   6231 out:
   6232 	mutex_exit(txq->txq_lock);
   6233 }
   6234 
   6235 /*
   6236  * wm_82547_txfifo_bugchk:
   6237  *
   6238  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6239  *	prevent enqueueing a packet that would wrap around the end
   6240  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6241  *
   6242  *	We do this by checking the amount of space before the end
   6243  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6244  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6245  *	the internal FIFO pointers to the beginning, and restart
   6246  *	transmission on the interface.
   6247  */
   6248 #define	WM_FIFO_HDR		0x10
   6249 #define	WM_82547_PAD_LEN	0x3e0
   6250 static int
   6251 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6252 {
   6253 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6254 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6255 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6256 
   6257 	/* Just return if already stalled. */
   6258 	if (txq->txq_fifo_stall)
   6259 		return 1;
   6260 
   6261 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6262 		/* Stall only occurs in half-duplex mode. */
   6263 		goto send_packet;
   6264 	}
   6265 
   6266 	if (len >= WM_82547_PAD_LEN + space) {
   6267 		txq->txq_fifo_stall = 1;
   6268 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6269 		return 1;
   6270 	}
   6271 
   6272  send_packet:
   6273 	txq->txq_fifo_head += len;
   6274 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6275 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6276 
   6277 	return 0;
   6278 }
   6279 
   6280 static int
   6281 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6282 {
   6283 	int error;
   6284 
   6285 	/*
   6286 	 * Allocate the control data structures, and create and load the
   6287 	 * DMA map for it.
   6288 	 *
   6289 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6290 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6291 	 * both sets within the same 4G segment.
   6292 	 */
   6293 	if (sc->sc_type < WM_T_82544)
   6294 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6295 	else
   6296 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6297 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6298 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6299 	else
   6300 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6301 
   6302 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6303 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6304 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6305 		aprint_error_dev(sc->sc_dev,
   6306 		    "unable to allocate TX control data, error = %d\n",
   6307 		    error);
   6308 		goto fail_0;
   6309 	}
   6310 
   6311 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6312 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6313 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6314 		aprint_error_dev(sc->sc_dev,
   6315 		    "unable to map TX control data, error = %d\n", error);
   6316 		goto fail_1;
   6317 	}
   6318 
   6319 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6320 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6321 		aprint_error_dev(sc->sc_dev,
   6322 		    "unable to create TX control data DMA map, error = %d\n",
   6323 		    error);
   6324 		goto fail_2;
   6325 	}
   6326 
   6327 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6328 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6329 		aprint_error_dev(sc->sc_dev,
   6330 		    "unable to load TX control data DMA map, error = %d\n",
   6331 		    error);
   6332 		goto fail_3;
   6333 	}
   6334 
   6335 	return 0;
   6336 
   6337  fail_3:
   6338 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6339  fail_2:
   6340 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6341 	    WM_TXDESCS_SIZE(txq));
   6342  fail_1:
   6343 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6344  fail_0:
   6345 	return error;
   6346 }
   6347 
   6348 static void
   6349 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6350 {
   6351 
   6352 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6353 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6354 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6355 	    WM_TXDESCS_SIZE(txq));
   6356 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6357 }
   6358 
   6359 static int
   6360 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6361 {
   6362 	int error;
   6363 	size_t rxq_descs_size;
   6364 
   6365 	/*
   6366 	 * Allocate the control data structures, and create and load the
   6367 	 * DMA map for it.
   6368 	 *
   6369 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6370 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6371 	 * both sets within the same 4G segment.
   6372 	 */
   6373 	rxq->rxq_ndesc = WM_NRXDESC;
   6374 	if (sc->sc_type == WM_T_82574)
   6375 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6376 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6377 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6378 	else
   6379 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6380 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6381 
   6382 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6383 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6384 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6385 		aprint_error_dev(sc->sc_dev,
   6386 		    "unable to allocate RX control data, error = %d\n",
   6387 		    error);
   6388 		goto fail_0;
   6389 	}
   6390 
   6391 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6392 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6393 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6394 		aprint_error_dev(sc->sc_dev,
   6395 		    "unable to map RX control data, error = %d\n", error);
   6396 		goto fail_1;
   6397 	}
   6398 
   6399 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6400 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6401 		aprint_error_dev(sc->sc_dev,
   6402 		    "unable to create RX control data DMA map, error = %d\n",
   6403 		    error);
   6404 		goto fail_2;
   6405 	}
   6406 
   6407 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6408 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6409 		aprint_error_dev(sc->sc_dev,
   6410 		    "unable to load RX control data DMA map, error = %d\n",
   6411 		    error);
   6412 		goto fail_3;
   6413 	}
   6414 
   6415 	return 0;
   6416 
   6417  fail_3:
   6418 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6419  fail_2:
   6420 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6421 	    rxq_descs_size);
   6422  fail_1:
   6423 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6424  fail_0:
   6425 	return error;
   6426 }
   6427 
   6428 static void
   6429 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6430 {
   6431 
   6432 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6433 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6434 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6435 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6436 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6437 }
   6438 
   6439 
   6440 static int
   6441 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6442 {
   6443 	int i, error;
   6444 
   6445 	/* Create the transmit buffer DMA maps. */
   6446 	WM_TXQUEUELEN(txq) =
   6447 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6448 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6449 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6450 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6451 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6452 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6453 			aprint_error_dev(sc->sc_dev,
   6454 			    "unable to create Tx DMA map %d, error = %d\n",
   6455 			    i, error);
   6456 			goto fail;
   6457 		}
   6458 	}
   6459 
   6460 	return 0;
   6461 
   6462  fail:
   6463 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6464 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6465 			bus_dmamap_destroy(sc->sc_dmat,
   6466 			    txq->txq_soft[i].txs_dmamap);
   6467 	}
   6468 	return error;
   6469 }
   6470 
   6471 static void
   6472 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6473 {
   6474 	int i;
   6475 
   6476 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6477 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6478 			bus_dmamap_destroy(sc->sc_dmat,
   6479 			    txq->txq_soft[i].txs_dmamap);
   6480 	}
   6481 }
   6482 
   6483 static int
   6484 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6485 {
   6486 	int i, error;
   6487 
   6488 	/* Create the receive buffer DMA maps. */
   6489 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6490 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6491 			    MCLBYTES, 0, 0,
   6492 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6493 			aprint_error_dev(sc->sc_dev,
   6494 			    "unable to create Rx DMA map %d error = %d\n",
   6495 			    i, error);
   6496 			goto fail;
   6497 		}
   6498 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6499 	}
   6500 
   6501 	return 0;
   6502 
   6503  fail:
   6504 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6505 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6506 			bus_dmamap_destroy(sc->sc_dmat,
   6507 			    rxq->rxq_soft[i].rxs_dmamap);
   6508 	}
   6509 	return error;
   6510 }
   6511 
   6512 static void
   6513 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6514 {
   6515 	int i;
   6516 
   6517 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6518 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6519 			bus_dmamap_destroy(sc->sc_dmat,
   6520 			    rxq->rxq_soft[i].rxs_dmamap);
   6521 	}
   6522 }
   6523 
   6524 /*
   6525  * wm_alloc_quques:
   6526  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6527  */
   6528 static int
   6529 wm_alloc_txrx_queues(struct wm_softc *sc)
   6530 {
   6531 	int i, error, tx_done, rx_done;
   6532 
   6533 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6534 	    KM_SLEEP);
   6535 	if (sc->sc_queue == NULL) {
   6536 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6537 		error = ENOMEM;
   6538 		goto fail_0;
   6539 	}
   6540 
   6541 	/*
   6542 	 * For transmission
   6543 	 */
   6544 	error = 0;
   6545 	tx_done = 0;
   6546 	for (i = 0; i < sc->sc_nqueues; i++) {
   6547 #ifdef WM_EVENT_COUNTERS
   6548 		int j;
   6549 		const char *xname;
   6550 #endif
   6551 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6552 		txq->txq_sc = sc;
   6553 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6554 
   6555 		error = wm_alloc_tx_descs(sc, txq);
   6556 		if (error)
   6557 			break;
   6558 		error = wm_alloc_tx_buffer(sc, txq);
   6559 		if (error) {
   6560 			wm_free_tx_descs(sc, txq);
   6561 			break;
   6562 		}
   6563 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6564 		if (txq->txq_interq == NULL) {
   6565 			wm_free_tx_descs(sc, txq);
   6566 			wm_free_tx_buffer(sc, txq);
   6567 			error = ENOMEM;
   6568 			break;
   6569 		}
   6570 
   6571 #ifdef WM_EVENT_COUNTERS
   6572 		xname = device_xname(sc->sc_dev);
   6573 
   6574 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6575 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6576 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6577 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6578 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6579 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6580 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6581 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6582 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6583 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6584 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6585 
   6586 		for (j = 0; j < WM_NTXSEGS; j++) {
   6587 			snprintf(txq->txq_txseg_evcnt_names[j],
   6588 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6589 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6590 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6591 		}
   6592 
   6593 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6594 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6595 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6596 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6597 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6598 #endif /* WM_EVENT_COUNTERS */
   6599 
   6600 		tx_done++;
   6601 	}
   6602 	if (error)
   6603 		goto fail_1;
   6604 
   6605 	/*
   6606 	 * For recieve
   6607 	 */
   6608 	error = 0;
   6609 	rx_done = 0;
   6610 	for (i = 0; i < sc->sc_nqueues; i++) {
   6611 #ifdef WM_EVENT_COUNTERS
   6612 		const char *xname;
   6613 #endif
   6614 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6615 		rxq->rxq_sc = sc;
   6616 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6617 
   6618 		error = wm_alloc_rx_descs(sc, rxq);
   6619 		if (error)
   6620 			break;
   6621 
   6622 		error = wm_alloc_rx_buffer(sc, rxq);
   6623 		if (error) {
   6624 			wm_free_rx_descs(sc, rxq);
   6625 			break;
   6626 		}
   6627 
   6628 #ifdef WM_EVENT_COUNTERS
   6629 		xname = device_xname(sc->sc_dev);
   6630 
   6631 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6632 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6633 
   6634 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6635 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6636 #endif /* WM_EVENT_COUNTERS */
   6637 
   6638 		rx_done++;
   6639 	}
   6640 	if (error)
   6641 		goto fail_2;
   6642 
   6643 	return 0;
   6644 
   6645  fail_2:
   6646 	for (i = 0; i < rx_done; i++) {
   6647 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6648 		wm_free_rx_buffer(sc, rxq);
   6649 		wm_free_rx_descs(sc, rxq);
   6650 		if (rxq->rxq_lock)
   6651 			mutex_obj_free(rxq->rxq_lock);
   6652 	}
   6653  fail_1:
   6654 	for (i = 0; i < tx_done; i++) {
   6655 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6656 		pcq_destroy(txq->txq_interq);
   6657 		wm_free_tx_buffer(sc, txq);
   6658 		wm_free_tx_descs(sc, txq);
   6659 		if (txq->txq_lock)
   6660 			mutex_obj_free(txq->txq_lock);
   6661 	}
   6662 
   6663 	kmem_free(sc->sc_queue,
   6664 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6665  fail_0:
   6666 	return error;
   6667 }
   6668 
   6669 /*
   6670  * wm_free_quques:
   6671  *	Free {tx,rx}descs and {tx,rx} buffers
   6672  */
   6673 static void
   6674 wm_free_txrx_queues(struct wm_softc *sc)
   6675 {
   6676 	int i;
   6677 
   6678 	for (i = 0; i < sc->sc_nqueues; i++) {
   6679 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6680 
   6681 #ifdef WM_EVENT_COUNTERS
   6682 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6683 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6684 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6685 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6686 #endif /* WM_EVENT_COUNTERS */
   6687 
   6688 		wm_free_rx_buffer(sc, rxq);
   6689 		wm_free_rx_descs(sc, rxq);
   6690 		if (rxq->rxq_lock)
   6691 			mutex_obj_free(rxq->rxq_lock);
   6692 	}
   6693 
   6694 	for (i = 0; i < sc->sc_nqueues; i++) {
   6695 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6696 		struct mbuf *m;
   6697 #ifdef WM_EVENT_COUNTERS
   6698 		int j;
   6699 
   6700 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6701 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6702 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6703 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6704 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6705 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6706 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6707 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6708 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6709 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6710 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6711 
   6712 		for (j = 0; j < WM_NTXSEGS; j++)
   6713 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6714 
   6715 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6716 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6717 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6718 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6719 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6720 #endif /* WM_EVENT_COUNTERS */
   6721 
   6722 		/* drain txq_interq */
   6723 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6724 			m_freem(m);
   6725 		pcq_destroy(txq->txq_interq);
   6726 
   6727 		wm_free_tx_buffer(sc, txq);
   6728 		wm_free_tx_descs(sc, txq);
   6729 		if (txq->txq_lock)
   6730 			mutex_obj_free(txq->txq_lock);
   6731 	}
   6732 
   6733 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6734 }
   6735 
   6736 static void
   6737 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6738 {
   6739 
   6740 	KASSERT(mutex_owned(txq->txq_lock));
   6741 
   6742 	/* Initialize the transmit descriptor ring. */
   6743 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6744 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6745 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6746 	txq->txq_free = WM_NTXDESC(txq);
   6747 	txq->txq_next = 0;
   6748 }
   6749 
   6750 static void
   6751 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6752     struct wm_txqueue *txq)
   6753 {
   6754 
   6755 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6756 		device_xname(sc->sc_dev), __func__));
   6757 	KASSERT(mutex_owned(txq->txq_lock));
   6758 
   6759 	if (sc->sc_type < WM_T_82543) {
   6760 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6761 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6762 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6763 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6764 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6765 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6766 	} else {
   6767 		int qid = wmq->wmq_id;
   6768 
   6769 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6770 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6771 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6772 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6773 
   6774 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6775 			/*
   6776 			 * Don't write TDT before TCTL.EN is set.
   6777 			 * See the document.
   6778 			 */
   6779 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6780 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6781 			    | TXDCTL_WTHRESH(0));
   6782 		else {
   6783 			/* XXX should update with AIM? */
   6784 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6785 			if (sc->sc_type >= WM_T_82540) {
   6786 				/* should be same */
   6787 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6788 			}
   6789 
   6790 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6791 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6792 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6793 		}
   6794 	}
   6795 }
   6796 
   6797 static void
   6798 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6799 {
   6800 	int i;
   6801 
   6802 	KASSERT(mutex_owned(txq->txq_lock));
   6803 
   6804 	/* Initialize the transmit job descriptors. */
   6805 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6806 		txq->txq_soft[i].txs_mbuf = NULL;
   6807 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6808 	txq->txq_snext = 0;
   6809 	txq->txq_sdirty = 0;
   6810 }
   6811 
   6812 static void
   6813 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6814     struct wm_txqueue *txq)
   6815 {
   6816 
   6817 	KASSERT(mutex_owned(txq->txq_lock));
   6818 
   6819 	/*
   6820 	 * Set up some register offsets that are different between
   6821 	 * the i82542 and the i82543 and later chips.
   6822 	 */
   6823 	if (sc->sc_type < WM_T_82543)
   6824 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6825 	else
   6826 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6827 
   6828 	wm_init_tx_descs(sc, txq);
   6829 	wm_init_tx_regs(sc, wmq, txq);
   6830 	wm_init_tx_buffer(sc, txq);
   6831 
   6832 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6833 	txq->txq_sending = false;
   6834 }
   6835 
   6836 static void
   6837 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6838     struct wm_rxqueue *rxq)
   6839 {
   6840 
   6841 	KASSERT(mutex_owned(rxq->rxq_lock));
   6842 
   6843 	/*
   6844 	 * Initialize the receive descriptor and receive job
   6845 	 * descriptor rings.
   6846 	 */
   6847 	if (sc->sc_type < WM_T_82543) {
   6848 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6849 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6850 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6851 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6852 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6853 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6854 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6855 
   6856 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6857 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6858 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6859 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6860 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6861 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6862 	} else {
   6863 		int qid = wmq->wmq_id;
   6864 
   6865 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6866 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6867 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6868 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6869 
   6870 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6871 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6872 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6873 
   6874 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6875 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6876 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6877 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6878 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6879 			    | RXDCTL_WTHRESH(1));
   6880 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6881 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6882 		} else {
   6883 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6884 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6885 			/* XXX should update with AIM? */
   6886 			CSR_WRITE(sc, WMREG_RDTR,
   6887 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6888 			/* MUST be same */
   6889 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6890 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6891 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6892 		}
   6893 	}
   6894 }
   6895 
   6896 static int
   6897 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6898 {
   6899 	struct wm_rxsoft *rxs;
   6900 	int error, i;
   6901 
   6902 	KASSERT(mutex_owned(rxq->rxq_lock));
   6903 
   6904 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6905 		rxs = &rxq->rxq_soft[i];
   6906 		if (rxs->rxs_mbuf == NULL) {
   6907 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6908 				log(LOG_ERR, "%s: unable to allocate or map "
   6909 				    "rx buffer %d, error = %d\n",
   6910 				    device_xname(sc->sc_dev), i, error);
   6911 				/*
   6912 				 * XXX Should attempt to run with fewer receive
   6913 				 * XXX buffers instead of just failing.
   6914 				 */
   6915 				wm_rxdrain(rxq);
   6916 				return ENOMEM;
   6917 			}
   6918 		} else {
   6919 			/*
   6920 			 * For 82575 and 82576, the RX descriptors must be
   6921 			 * initialized after the setting of RCTL.EN in
   6922 			 * wm_set_filter()
   6923 			 */
   6924 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6925 				wm_init_rxdesc(rxq, i);
   6926 		}
   6927 	}
   6928 	rxq->rxq_ptr = 0;
   6929 	rxq->rxq_discard = 0;
   6930 	WM_RXCHAIN_RESET(rxq);
   6931 
   6932 	return 0;
   6933 }
   6934 
   6935 static int
   6936 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6937     struct wm_rxqueue *rxq)
   6938 {
   6939 
   6940 	KASSERT(mutex_owned(rxq->rxq_lock));
   6941 
   6942 	/*
   6943 	 * Set up some register offsets that are different between
   6944 	 * the i82542 and the i82543 and later chips.
   6945 	 */
   6946 	if (sc->sc_type < WM_T_82543)
   6947 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6948 	else
   6949 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6950 
   6951 	wm_init_rx_regs(sc, wmq, rxq);
   6952 	return wm_init_rx_buffer(sc, rxq);
   6953 }
   6954 
   6955 /*
   6956  * wm_init_quques:
   6957  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6958  */
   6959 static int
   6960 wm_init_txrx_queues(struct wm_softc *sc)
   6961 {
   6962 	int i, error = 0;
   6963 
   6964 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6965 		device_xname(sc->sc_dev), __func__));
   6966 
   6967 	for (i = 0; i < sc->sc_nqueues; i++) {
   6968 		struct wm_queue *wmq = &sc->sc_queue[i];
   6969 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6970 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6971 
   6972 		/*
   6973 		 * TODO
   6974 		 * Currently, use constant variable instead of AIM.
   6975 		 * Furthermore, the interrupt interval of multiqueue which use
   6976 		 * polling mode is less than default value.
   6977 		 * More tuning and AIM are required.
   6978 		 */
   6979 		if (wm_is_using_multiqueue(sc))
   6980 			wmq->wmq_itr = 50;
   6981 		else
   6982 			wmq->wmq_itr = sc->sc_itr_init;
   6983 		wmq->wmq_set_itr = true;
   6984 
   6985 		mutex_enter(txq->txq_lock);
   6986 		wm_init_tx_queue(sc, wmq, txq);
   6987 		mutex_exit(txq->txq_lock);
   6988 
   6989 		mutex_enter(rxq->rxq_lock);
   6990 		error = wm_init_rx_queue(sc, wmq, rxq);
   6991 		mutex_exit(rxq->rxq_lock);
   6992 		if (error)
   6993 			break;
   6994 	}
   6995 
   6996 	return error;
   6997 }
   6998 
   6999 /*
   7000  * wm_tx_offload:
   7001  *
   7002  *	Set up TCP/IP checksumming parameters for the
   7003  *	specified packet.
   7004  */
   7005 static int
   7006 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7007     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7008 {
   7009 	struct mbuf *m0 = txs->txs_mbuf;
   7010 	struct livengood_tcpip_ctxdesc *t;
   7011 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7012 	uint32_t ipcse;
   7013 	struct ether_header *eh;
   7014 	int offset, iphl;
   7015 	uint8_t fields;
   7016 
   7017 	/*
   7018 	 * XXX It would be nice if the mbuf pkthdr had offset
   7019 	 * fields for the protocol headers.
   7020 	 */
   7021 
   7022 	eh = mtod(m0, struct ether_header *);
   7023 	switch (htons(eh->ether_type)) {
   7024 	case ETHERTYPE_IP:
   7025 	case ETHERTYPE_IPV6:
   7026 		offset = ETHER_HDR_LEN;
   7027 		break;
   7028 
   7029 	case ETHERTYPE_VLAN:
   7030 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7031 		break;
   7032 
   7033 	default:
   7034 		/*
   7035 		 * Don't support this protocol or encapsulation.
   7036 		 */
   7037 		*fieldsp = 0;
   7038 		*cmdp = 0;
   7039 		return 0;
   7040 	}
   7041 
   7042 	if ((m0->m_pkthdr.csum_flags &
   7043 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7044 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7045 	} else
   7046 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7047 
   7048 	ipcse = offset + iphl - 1;
   7049 
   7050 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7051 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7052 	seg = 0;
   7053 	fields = 0;
   7054 
   7055 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7056 		int hlen = offset + iphl;
   7057 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7058 
   7059 		if (__predict_false(m0->m_len <
   7060 				    (hlen + sizeof(struct tcphdr)))) {
   7061 			/*
   7062 			 * TCP/IP headers are not in the first mbuf; we need
   7063 			 * to do this the slow and painful way. Let's just
   7064 			 * hope this doesn't happen very often.
   7065 			 */
   7066 			struct tcphdr th;
   7067 
   7068 			WM_Q_EVCNT_INCR(txq, tsopain);
   7069 
   7070 			m_copydata(m0, hlen, sizeof(th), &th);
   7071 			if (v4) {
   7072 				struct ip ip;
   7073 
   7074 				m_copydata(m0, offset, sizeof(ip), &ip);
   7075 				ip.ip_len = 0;
   7076 				m_copyback(m0,
   7077 				    offset + offsetof(struct ip, ip_len),
   7078 				    sizeof(ip.ip_len), &ip.ip_len);
   7079 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7080 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7081 			} else {
   7082 				struct ip6_hdr ip6;
   7083 
   7084 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7085 				ip6.ip6_plen = 0;
   7086 				m_copyback(m0,
   7087 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7088 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7089 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7090 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7091 			}
   7092 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7093 			    sizeof(th.th_sum), &th.th_sum);
   7094 
   7095 			hlen += th.th_off << 2;
   7096 		} else {
   7097 			/*
   7098 			 * TCP/IP headers are in the first mbuf; we can do
   7099 			 * this the easy way.
   7100 			 */
   7101 			struct tcphdr *th;
   7102 
   7103 			if (v4) {
   7104 				struct ip *ip =
   7105 				    (void *)(mtod(m0, char *) + offset);
   7106 				th = (void *)(mtod(m0, char *) + hlen);
   7107 
   7108 				ip->ip_len = 0;
   7109 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7110 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7111 			} else {
   7112 				struct ip6_hdr *ip6 =
   7113 				    (void *)(mtod(m0, char *) + offset);
   7114 				th = (void *)(mtod(m0, char *) + hlen);
   7115 
   7116 				ip6->ip6_plen = 0;
   7117 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7118 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7119 			}
   7120 			hlen += th->th_off << 2;
   7121 		}
   7122 
   7123 		if (v4) {
   7124 			WM_Q_EVCNT_INCR(txq, tso);
   7125 			cmdlen |= WTX_TCPIP_CMD_IP;
   7126 		} else {
   7127 			WM_Q_EVCNT_INCR(txq, tso6);
   7128 			ipcse = 0;
   7129 		}
   7130 		cmd |= WTX_TCPIP_CMD_TSE;
   7131 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7132 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7133 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7134 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7135 	}
   7136 
   7137 	/*
   7138 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7139 	 * offload feature, if we load the context descriptor, we
   7140 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7141 	 */
   7142 
   7143 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7144 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7145 	    WTX_TCPIP_IPCSE(ipcse);
   7146 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7147 		WM_Q_EVCNT_INCR(txq, ipsum);
   7148 		fields |= WTX_IXSM;
   7149 	}
   7150 
   7151 	offset += iphl;
   7152 
   7153 	if (m0->m_pkthdr.csum_flags &
   7154 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7155 		WM_Q_EVCNT_INCR(txq, tusum);
   7156 		fields |= WTX_TXSM;
   7157 		tucs = WTX_TCPIP_TUCSS(offset) |
   7158 		    WTX_TCPIP_TUCSO(offset +
   7159 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7160 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7161 	} else if ((m0->m_pkthdr.csum_flags &
   7162 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7163 		WM_Q_EVCNT_INCR(txq, tusum6);
   7164 		fields |= WTX_TXSM;
   7165 		tucs = WTX_TCPIP_TUCSS(offset) |
   7166 		    WTX_TCPIP_TUCSO(offset +
   7167 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7168 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7169 	} else {
   7170 		/* Just initialize it to a valid TCP context. */
   7171 		tucs = WTX_TCPIP_TUCSS(offset) |
   7172 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7173 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7174 	}
   7175 
   7176 	/*
   7177 	 * We don't have to write context descriptor for every packet
   7178 	 * except for 82574. For 82574, we must write context descriptor
   7179 	 * for every packet when we use two descriptor queues.
   7180 	 * It would be overhead to write context descriptor for every packet,
   7181 	 * however it does not cause problems.
   7182 	 */
   7183 	/* Fill in the context descriptor. */
   7184 	t = (struct livengood_tcpip_ctxdesc *)
   7185 	    &txq->txq_descs[txq->txq_next];
   7186 	t->tcpip_ipcs = htole32(ipcs);
   7187 	t->tcpip_tucs = htole32(tucs);
   7188 	t->tcpip_cmdlen = htole32(cmdlen);
   7189 	t->tcpip_seg = htole32(seg);
   7190 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7191 
   7192 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7193 	txs->txs_ndesc++;
   7194 
   7195 	*cmdp = cmd;
   7196 	*fieldsp = fields;
   7197 
   7198 	return 0;
   7199 }
   7200 
   7201 static inline int
   7202 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7203 {
   7204 	struct wm_softc *sc = ifp->if_softc;
   7205 	u_int cpuid = cpu_index(curcpu());
   7206 
   7207 	/*
   7208 	 * Currently, simple distribute strategy.
   7209 	 * TODO:
   7210 	 * distribute by flowid(RSS has value).
   7211 	 */
   7212 	return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7213 }
   7214 
   7215 /*
   7216  * wm_start:		[ifnet interface function]
   7217  *
   7218  *	Start packet transmission on the interface.
   7219  */
   7220 static void
   7221 wm_start(struct ifnet *ifp)
   7222 {
   7223 	struct wm_softc *sc = ifp->if_softc;
   7224 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7225 
   7226 #ifdef WM_MPSAFE
   7227 	KASSERT(if_is_mpsafe(ifp));
   7228 #endif
   7229 	/*
   7230 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7231 	 */
   7232 
   7233 	mutex_enter(txq->txq_lock);
   7234 	if (!txq->txq_stopping)
   7235 		wm_start_locked(ifp);
   7236 	mutex_exit(txq->txq_lock);
   7237 }
   7238 
   7239 static void
   7240 wm_start_locked(struct ifnet *ifp)
   7241 {
   7242 	struct wm_softc *sc = ifp->if_softc;
   7243 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7244 
   7245 	wm_send_common_locked(ifp, txq, false);
   7246 }
   7247 
   7248 static int
   7249 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7250 {
   7251 	int qid;
   7252 	struct wm_softc *sc = ifp->if_softc;
   7253 	struct wm_txqueue *txq;
   7254 
   7255 	qid = wm_select_txqueue(ifp, m);
   7256 	txq = &sc->sc_queue[qid].wmq_txq;
   7257 
   7258 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7259 		m_freem(m);
   7260 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7261 		return ENOBUFS;
   7262 	}
   7263 
   7264 	/*
   7265 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7266 	 */
   7267 	ifp->if_obytes += m->m_pkthdr.len;
   7268 	if (m->m_flags & M_MCAST)
   7269 		ifp->if_omcasts++;
   7270 
   7271 	if (mutex_tryenter(txq->txq_lock)) {
   7272 		if (!txq->txq_stopping)
   7273 			wm_transmit_locked(ifp, txq);
   7274 		mutex_exit(txq->txq_lock);
   7275 	}
   7276 
   7277 	return 0;
   7278 }
   7279 
   7280 static void
   7281 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7282 {
   7283 
   7284 	wm_send_common_locked(ifp, txq, true);
   7285 }
   7286 
   7287 static void
   7288 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7289     bool is_transmit)
   7290 {
   7291 	struct wm_softc *sc = ifp->if_softc;
   7292 	struct mbuf *m0;
   7293 	struct wm_txsoft *txs;
   7294 	bus_dmamap_t dmamap;
   7295 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7296 	bus_addr_t curaddr;
   7297 	bus_size_t seglen, curlen;
   7298 	uint32_t cksumcmd;
   7299 	uint8_t cksumfields;
   7300 	bool remap = true;
   7301 
   7302 	KASSERT(mutex_owned(txq->txq_lock));
   7303 
   7304 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7305 		return;
   7306 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7307 		return;
   7308 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7309 		return;
   7310 
   7311 	/* Remember the previous number of free descriptors. */
   7312 	ofree = txq->txq_free;
   7313 
   7314 	/*
   7315 	 * Loop through the send queue, setting up transmit descriptors
   7316 	 * until we drain the queue, or use up all available transmit
   7317 	 * descriptors.
   7318 	 */
   7319 	for (;;) {
   7320 		m0 = NULL;
   7321 
   7322 		/* Get a work queue entry. */
   7323 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7324 			wm_txeof(txq, UINT_MAX);
   7325 			if (txq->txq_sfree == 0) {
   7326 				DPRINTF(WM_DEBUG_TX,
   7327 				    ("%s: TX: no free job descriptors\n",
   7328 					device_xname(sc->sc_dev)));
   7329 				WM_Q_EVCNT_INCR(txq, txsstall);
   7330 				break;
   7331 			}
   7332 		}
   7333 
   7334 		/* Grab a packet off the queue. */
   7335 		if (is_transmit)
   7336 			m0 = pcq_get(txq->txq_interq);
   7337 		else
   7338 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7339 		if (m0 == NULL)
   7340 			break;
   7341 
   7342 		DPRINTF(WM_DEBUG_TX,
   7343 		    ("%s: TX: have packet to transmit: %p\n",
   7344 			device_xname(sc->sc_dev), m0));
   7345 
   7346 		txs = &txq->txq_soft[txq->txq_snext];
   7347 		dmamap = txs->txs_dmamap;
   7348 
   7349 		use_tso = (m0->m_pkthdr.csum_flags &
   7350 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7351 
   7352 		/*
   7353 		 * So says the Linux driver:
   7354 		 * The controller does a simple calculation to make sure
   7355 		 * there is enough room in the FIFO before initiating the
   7356 		 * DMA for each buffer. The calc is:
   7357 		 *	4 = ceil(buffer len / MSS)
   7358 		 * To make sure we don't overrun the FIFO, adjust the max
   7359 		 * buffer len if the MSS drops.
   7360 		 */
   7361 		dmamap->dm_maxsegsz =
   7362 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7363 		    ? m0->m_pkthdr.segsz << 2
   7364 		    : WTX_MAX_LEN;
   7365 
   7366 		/*
   7367 		 * Load the DMA map.  If this fails, the packet either
   7368 		 * didn't fit in the allotted number of segments, or we
   7369 		 * were short on resources.  For the too-many-segments
   7370 		 * case, we simply report an error and drop the packet,
   7371 		 * since we can't sanely copy a jumbo packet to a single
   7372 		 * buffer.
   7373 		 */
   7374 retry:
   7375 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7376 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7377 		if (__predict_false(error)) {
   7378 			if (error == EFBIG) {
   7379 				if (remap == true) {
   7380 					struct mbuf *m;
   7381 
   7382 					remap = false;
   7383 					m = m_defrag(m0, M_NOWAIT);
   7384 					if (m != NULL) {
   7385 						WM_Q_EVCNT_INCR(txq, defrag);
   7386 						m0 = m;
   7387 						goto retry;
   7388 					}
   7389 				}
   7390 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7391 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7392 				    "DMA segments, dropping...\n",
   7393 				    device_xname(sc->sc_dev));
   7394 				wm_dump_mbuf_chain(sc, m0);
   7395 				m_freem(m0);
   7396 				continue;
   7397 			}
   7398 			/*  Short on resources, just stop for now. */
   7399 			DPRINTF(WM_DEBUG_TX,
   7400 			    ("%s: TX: dmamap load failed: %d\n",
   7401 				device_xname(sc->sc_dev), error));
   7402 			break;
   7403 		}
   7404 
   7405 		segs_needed = dmamap->dm_nsegs;
   7406 		if (use_tso) {
   7407 			/* For sentinel descriptor; see below. */
   7408 			segs_needed++;
   7409 		}
   7410 
   7411 		/*
   7412 		 * Ensure we have enough descriptors free to describe
   7413 		 * the packet. Note, we always reserve one descriptor
   7414 		 * at the end of the ring due to the semantics of the
   7415 		 * TDT register, plus one more in the event we need
   7416 		 * to load offload context.
   7417 		 */
   7418 		if (segs_needed > txq->txq_free - 2) {
   7419 			/*
   7420 			 * Not enough free descriptors to transmit this
   7421 			 * packet.  We haven't committed anything yet,
   7422 			 * so just unload the DMA map, put the packet
   7423 			 * pack on the queue, and punt. Notify the upper
   7424 			 * layer that there are no more slots left.
   7425 			 */
   7426 			DPRINTF(WM_DEBUG_TX,
   7427 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7428 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7429 				segs_needed, txq->txq_free - 1));
   7430 			if (!is_transmit)
   7431 				ifp->if_flags |= IFF_OACTIVE;
   7432 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7433 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7434 			WM_Q_EVCNT_INCR(txq, txdstall);
   7435 			break;
   7436 		}
   7437 
   7438 		/*
   7439 		 * Check for 82547 Tx FIFO bug. We need to do this
   7440 		 * once we know we can transmit the packet, since we
   7441 		 * do some internal FIFO space accounting here.
   7442 		 */
   7443 		if (sc->sc_type == WM_T_82547 &&
   7444 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7445 			DPRINTF(WM_DEBUG_TX,
   7446 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7447 				device_xname(sc->sc_dev)));
   7448 			if (!is_transmit)
   7449 				ifp->if_flags |= IFF_OACTIVE;
   7450 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7451 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7452 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7453 			break;
   7454 		}
   7455 
   7456 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7457 
   7458 		DPRINTF(WM_DEBUG_TX,
   7459 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7460 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7461 
   7462 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7463 
   7464 		/*
   7465 		 * Store a pointer to the packet so that we can free it
   7466 		 * later.
   7467 		 *
   7468 		 * Initially, we consider the number of descriptors the
   7469 		 * packet uses the number of DMA segments.  This may be
   7470 		 * incremented by 1 if we do checksum offload (a descriptor
   7471 		 * is used to set the checksum context).
   7472 		 */
   7473 		txs->txs_mbuf = m0;
   7474 		txs->txs_firstdesc = txq->txq_next;
   7475 		txs->txs_ndesc = segs_needed;
   7476 
   7477 		/* Set up offload parameters for this packet. */
   7478 		if (m0->m_pkthdr.csum_flags &
   7479 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7480 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7481 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7482 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7483 					  &cksumfields) != 0) {
   7484 				/* Error message already displayed. */
   7485 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7486 				continue;
   7487 			}
   7488 		} else {
   7489 			cksumcmd = 0;
   7490 			cksumfields = 0;
   7491 		}
   7492 
   7493 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7494 
   7495 		/* Sync the DMA map. */
   7496 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7497 		    BUS_DMASYNC_PREWRITE);
   7498 
   7499 		/* Initialize the transmit descriptor. */
   7500 		for (nexttx = txq->txq_next, seg = 0;
   7501 		     seg < dmamap->dm_nsegs; seg++) {
   7502 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7503 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7504 			     seglen != 0;
   7505 			     curaddr += curlen, seglen -= curlen,
   7506 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7507 				curlen = seglen;
   7508 
   7509 				/*
   7510 				 * So says the Linux driver:
   7511 				 * Work around for premature descriptor
   7512 				 * write-backs in TSO mode.  Append a
   7513 				 * 4-byte sentinel descriptor.
   7514 				 */
   7515 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7516 				    curlen > 8)
   7517 					curlen -= 4;
   7518 
   7519 				wm_set_dma_addr(
   7520 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7521 				txq->txq_descs[nexttx].wtx_cmdlen
   7522 				    = htole32(cksumcmd | curlen);
   7523 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7524 				    = 0;
   7525 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7526 				    = cksumfields;
   7527 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7528 				lasttx = nexttx;
   7529 
   7530 				DPRINTF(WM_DEBUG_TX,
   7531 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7532 					"len %#04zx\n",
   7533 					device_xname(sc->sc_dev), nexttx,
   7534 					(uint64_t)curaddr, curlen));
   7535 			}
   7536 		}
   7537 
   7538 		KASSERT(lasttx != -1);
   7539 
   7540 		/*
   7541 		 * Set up the command byte on the last descriptor of
   7542 		 * the packet. If we're in the interrupt delay window,
   7543 		 * delay the interrupt.
   7544 		 */
   7545 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7546 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7547 
   7548 		/*
   7549 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7550 		 * up the descriptor to encapsulate the packet for us.
   7551 		 *
   7552 		 * This is only valid on the last descriptor of the packet.
   7553 		 */
   7554 		if (vlan_has_tag(m0)) {
   7555 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7556 			    htole32(WTX_CMD_VLE);
   7557 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7558 			    = htole16(vlan_get_tag(m0));
   7559 		}
   7560 
   7561 		txs->txs_lastdesc = lasttx;
   7562 
   7563 		DPRINTF(WM_DEBUG_TX,
   7564 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7565 			device_xname(sc->sc_dev),
   7566 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7567 
   7568 		/* Sync the descriptors we're using. */
   7569 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7570 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7571 
   7572 		/* Give the packet to the chip. */
   7573 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7574 
   7575 		DPRINTF(WM_DEBUG_TX,
   7576 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7577 
   7578 		DPRINTF(WM_DEBUG_TX,
   7579 		    ("%s: TX: finished transmitting packet, job %d\n",
   7580 			device_xname(sc->sc_dev), txq->txq_snext));
   7581 
   7582 		/* Advance the tx pointer. */
   7583 		txq->txq_free -= txs->txs_ndesc;
   7584 		txq->txq_next = nexttx;
   7585 
   7586 		txq->txq_sfree--;
   7587 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7588 
   7589 		/* Pass the packet to any BPF listeners. */
   7590 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7591 	}
   7592 
   7593 	if (m0 != NULL) {
   7594 		if (!is_transmit)
   7595 			ifp->if_flags |= IFF_OACTIVE;
   7596 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7597 		WM_Q_EVCNT_INCR(txq, descdrop);
   7598 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7599 			__func__));
   7600 		m_freem(m0);
   7601 	}
   7602 
   7603 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7604 		/* No more slots; notify upper layer. */
   7605 		if (!is_transmit)
   7606 			ifp->if_flags |= IFF_OACTIVE;
   7607 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7608 	}
   7609 
   7610 	if (txq->txq_free != ofree) {
   7611 		/* Set a watchdog timer in case the chip flakes out. */
   7612 		txq->txq_lastsent = time_uptime;
   7613 		txq->txq_sending = true;
   7614 	}
   7615 }
   7616 
   7617 /*
   7618  * wm_nq_tx_offload:
   7619  *
   7620  *	Set up TCP/IP checksumming parameters for the
   7621  *	specified packet, for NEWQUEUE devices
   7622  */
   7623 static int
   7624 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7625     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7626 {
   7627 	struct mbuf *m0 = txs->txs_mbuf;
   7628 	uint32_t vl_len, mssidx, cmdc;
   7629 	struct ether_header *eh;
   7630 	int offset, iphl;
   7631 
   7632 	/*
   7633 	 * XXX It would be nice if the mbuf pkthdr had offset
   7634 	 * fields for the protocol headers.
   7635 	 */
   7636 	*cmdlenp = 0;
   7637 	*fieldsp = 0;
   7638 
   7639 	eh = mtod(m0, struct ether_header *);
   7640 	switch (htons(eh->ether_type)) {
   7641 	case ETHERTYPE_IP:
   7642 	case ETHERTYPE_IPV6:
   7643 		offset = ETHER_HDR_LEN;
   7644 		break;
   7645 
   7646 	case ETHERTYPE_VLAN:
   7647 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7648 		break;
   7649 
   7650 	default:
   7651 		/* Don't support this protocol or encapsulation. */
   7652 		*do_csum = false;
   7653 		return 0;
   7654 	}
   7655 	*do_csum = true;
   7656 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7657 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7658 
   7659 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7660 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7661 
   7662 	if ((m0->m_pkthdr.csum_flags &
   7663 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7664 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7665 	} else {
   7666 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7667 	}
   7668 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7669 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7670 
   7671 	if (vlan_has_tag(m0)) {
   7672 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7673 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7674 		*cmdlenp |= NQTX_CMD_VLE;
   7675 	}
   7676 
   7677 	mssidx = 0;
   7678 
   7679 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7680 		int hlen = offset + iphl;
   7681 		int tcp_hlen;
   7682 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7683 
   7684 		if (__predict_false(m0->m_len <
   7685 				    (hlen + sizeof(struct tcphdr)))) {
   7686 			/*
   7687 			 * TCP/IP headers are not in the first mbuf; we need
   7688 			 * to do this the slow and painful way. Let's just
   7689 			 * hope this doesn't happen very often.
   7690 			 */
   7691 			struct tcphdr th;
   7692 
   7693 			WM_Q_EVCNT_INCR(txq, tsopain);
   7694 
   7695 			m_copydata(m0, hlen, sizeof(th), &th);
   7696 			if (v4) {
   7697 				struct ip ip;
   7698 
   7699 				m_copydata(m0, offset, sizeof(ip), &ip);
   7700 				ip.ip_len = 0;
   7701 				m_copyback(m0,
   7702 				    offset + offsetof(struct ip, ip_len),
   7703 				    sizeof(ip.ip_len), &ip.ip_len);
   7704 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7705 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7706 			} else {
   7707 				struct ip6_hdr ip6;
   7708 
   7709 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7710 				ip6.ip6_plen = 0;
   7711 				m_copyback(m0,
   7712 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7713 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7714 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7715 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7716 			}
   7717 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7718 			    sizeof(th.th_sum), &th.th_sum);
   7719 
   7720 			tcp_hlen = th.th_off << 2;
   7721 		} else {
   7722 			/*
   7723 			 * TCP/IP headers are in the first mbuf; we can do
   7724 			 * this the easy way.
   7725 			 */
   7726 			struct tcphdr *th;
   7727 
   7728 			if (v4) {
   7729 				struct ip *ip =
   7730 				    (void *)(mtod(m0, char *) + offset);
   7731 				th = (void *)(mtod(m0, char *) + hlen);
   7732 
   7733 				ip->ip_len = 0;
   7734 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7735 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7736 			} else {
   7737 				struct ip6_hdr *ip6 =
   7738 				    (void *)(mtod(m0, char *) + offset);
   7739 				th = (void *)(mtod(m0, char *) + hlen);
   7740 
   7741 				ip6->ip6_plen = 0;
   7742 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7743 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7744 			}
   7745 			tcp_hlen = th->th_off << 2;
   7746 		}
   7747 		hlen += tcp_hlen;
   7748 		*cmdlenp |= NQTX_CMD_TSE;
   7749 
   7750 		if (v4) {
   7751 			WM_Q_EVCNT_INCR(txq, tso);
   7752 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7753 		} else {
   7754 			WM_Q_EVCNT_INCR(txq, tso6);
   7755 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7756 		}
   7757 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7758 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7759 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7760 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7761 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7762 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7763 	} else {
   7764 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7765 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7766 	}
   7767 
   7768 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7769 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7770 		cmdc |= NQTXC_CMD_IP4;
   7771 	}
   7772 
   7773 	if (m0->m_pkthdr.csum_flags &
   7774 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7775 		WM_Q_EVCNT_INCR(txq, tusum);
   7776 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7777 			cmdc |= NQTXC_CMD_TCP;
   7778 		else
   7779 			cmdc |= NQTXC_CMD_UDP;
   7780 
   7781 		cmdc |= NQTXC_CMD_IP4;
   7782 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7783 	}
   7784 	if (m0->m_pkthdr.csum_flags &
   7785 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7786 		WM_Q_EVCNT_INCR(txq, tusum6);
   7787 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7788 			cmdc |= NQTXC_CMD_TCP;
   7789 		else
   7790 			cmdc |= NQTXC_CMD_UDP;
   7791 
   7792 		cmdc |= NQTXC_CMD_IP6;
   7793 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7794 	}
   7795 
   7796 	/*
   7797 	 * We don't have to write context descriptor for every packet to
   7798 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7799 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7800 	 * controllers.
   7801 	 * It would be overhead to write context descriptor for every packet,
   7802 	 * however it does not cause problems.
   7803 	 */
   7804 	/* Fill in the context descriptor. */
   7805 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7806 	    htole32(vl_len);
   7807 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7808 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7809 	    htole32(cmdc);
   7810 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7811 	    htole32(mssidx);
   7812 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7813 	DPRINTF(WM_DEBUG_TX,
   7814 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7815 		txq->txq_next, 0, vl_len));
   7816 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7817 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7818 	txs->txs_ndesc++;
   7819 	return 0;
   7820 }
   7821 
   7822 /*
   7823  * wm_nq_start:		[ifnet interface function]
   7824  *
   7825  *	Start packet transmission on the interface for NEWQUEUE devices
   7826  */
   7827 static void
   7828 wm_nq_start(struct ifnet *ifp)
   7829 {
   7830 	struct wm_softc *sc = ifp->if_softc;
   7831 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7832 
   7833 #ifdef WM_MPSAFE
   7834 	KASSERT(if_is_mpsafe(ifp));
   7835 #endif
   7836 	/*
   7837 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7838 	 */
   7839 
   7840 	mutex_enter(txq->txq_lock);
   7841 	if (!txq->txq_stopping)
   7842 		wm_nq_start_locked(ifp);
   7843 	mutex_exit(txq->txq_lock);
   7844 }
   7845 
   7846 static void
   7847 wm_nq_start_locked(struct ifnet *ifp)
   7848 {
   7849 	struct wm_softc *sc = ifp->if_softc;
   7850 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7851 
   7852 	wm_nq_send_common_locked(ifp, txq, false);
   7853 }
   7854 
   7855 static int
   7856 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7857 {
   7858 	int qid;
   7859 	struct wm_softc *sc = ifp->if_softc;
   7860 	struct wm_txqueue *txq;
   7861 
   7862 	qid = wm_select_txqueue(ifp, m);
   7863 	txq = &sc->sc_queue[qid].wmq_txq;
   7864 
   7865 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7866 		m_freem(m);
   7867 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7868 		return ENOBUFS;
   7869 	}
   7870 
   7871 	/*
   7872 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7873 	 */
   7874 	ifp->if_obytes += m->m_pkthdr.len;
   7875 	if (m->m_flags & M_MCAST)
   7876 		ifp->if_omcasts++;
   7877 
   7878 	/*
   7879 	 * The situations which this mutex_tryenter() fails at running time
   7880 	 * are below two patterns.
   7881 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7882 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7883 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7884 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7885 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7886 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7887 	 * stuck, either.
   7888 	 */
   7889 	if (mutex_tryenter(txq->txq_lock)) {
   7890 		if (!txq->txq_stopping)
   7891 			wm_nq_transmit_locked(ifp, txq);
   7892 		mutex_exit(txq->txq_lock);
   7893 	}
   7894 
   7895 	return 0;
   7896 }
   7897 
   7898 static void
   7899 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7900 {
   7901 
   7902 	wm_nq_send_common_locked(ifp, txq, true);
   7903 }
   7904 
   7905 static void
   7906 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7907     bool is_transmit)
   7908 {
   7909 	struct wm_softc *sc = ifp->if_softc;
   7910 	struct mbuf *m0;
   7911 	struct wm_txsoft *txs;
   7912 	bus_dmamap_t dmamap;
   7913 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7914 	bool do_csum, sent;
   7915 	bool remap = true;
   7916 
   7917 	KASSERT(mutex_owned(txq->txq_lock));
   7918 
   7919 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7920 		return;
   7921 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7922 		return;
   7923 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7924 		return;
   7925 
   7926 	sent = false;
   7927 
   7928 	/*
   7929 	 * Loop through the send queue, setting up transmit descriptors
   7930 	 * until we drain the queue, or use up all available transmit
   7931 	 * descriptors.
   7932 	 */
   7933 	for (;;) {
   7934 		m0 = NULL;
   7935 
   7936 		/* Get a work queue entry. */
   7937 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7938 			wm_txeof(txq, UINT_MAX);
   7939 			if (txq->txq_sfree == 0) {
   7940 				DPRINTF(WM_DEBUG_TX,
   7941 				    ("%s: TX: no free job descriptors\n",
   7942 					device_xname(sc->sc_dev)));
   7943 				WM_Q_EVCNT_INCR(txq, txsstall);
   7944 				break;
   7945 			}
   7946 		}
   7947 
   7948 		/* Grab a packet off the queue. */
   7949 		if (is_transmit)
   7950 			m0 = pcq_get(txq->txq_interq);
   7951 		else
   7952 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7953 		if (m0 == NULL)
   7954 			break;
   7955 
   7956 		DPRINTF(WM_DEBUG_TX,
   7957 		    ("%s: TX: have packet to transmit: %p\n",
   7958 		    device_xname(sc->sc_dev), m0));
   7959 
   7960 		txs = &txq->txq_soft[txq->txq_snext];
   7961 		dmamap = txs->txs_dmamap;
   7962 
   7963 		/*
   7964 		 * Load the DMA map.  If this fails, the packet either
   7965 		 * didn't fit in the allotted number of segments, or we
   7966 		 * were short on resources.  For the too-many-segments
   7967 		 * case, we simply report an error and drop the packet,
   7968 		 * since we can't sanely copy a jumbo packet to a single
   7969 		 * buffer.
   7970 		 */
   7971 retry:
   7972 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7973 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7974 		if (__predict_false(error)) {
   7975 			if (error == EFBIG) {
   7976 				if (remap == true) {
   7977 					struct mbuf *m;
   7978 
   7979 					remap = false;
   7980 					m = m_defrag(m0, M_NOWAIT);
   7981 					if (m != NULL) {
   7982 						WM_Q_EVCNT_INCR(txq, defrag);
   7983 						m0 = m;
   7984 						goto retry;
   7985 					}
   7986 				}
   7987 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7988 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7989 				    "DMA segments, dropping...\n",
   7990 				    device_xname(sc->sc_dev));
   7991 				wm_dump_mbuf_chain(sc, m0);
   7992 				m_freem(m0);
   7993 				continue;
   7994 			}
   7995 			/* Short on resources, just stop for now. */
   7996 			DPRINTF(WM_DEBUG_TX,
   7997 			    ("%s: TX: dmamap load failed: %d\n",
   7998 				device_xname(sc->sc_dev), error));
   7999 			break;
   8000 		}
   8001 
   8002 		segs_needed = dmamap->dm_nsegs;
   8003 
   8004 		/*
   8005 		 * Ensure we have enough descriptors free to describe
   8006 		 * the packet. Note, we always reserve one descriptor
   8007 		 * at the end of the ring due to the semantics of the
   8008 		 * TDT register, plus one more in the event we need
   8009 		 * to load offload context.
   8010 		 */
   8011 		if (segs_needed > txq->txq_free - 2) {
   8012 			/*
   8013 			 * Not enough free descriptors to transmit this
   8014 			 * packet.  We haven't committed anything yet,
   8015 			 * so just unload the DMA map, put the packet
   8016 			 * pack on the queue, and punt. Notify the upper
   8017 			 * layer that there are no more slots left.
   8018 			 */
   8019 			DPRINTF(WM_DEBUG_TX,
   8020 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8021 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8022 				segs_needed, txq->txq_free - 1));
   8023 			if (!is_transmit)
   8024 				ifp->if_flags |= IFF_OACTIVE;
   8025 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8026 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8027 			WM_Q_EVCNT_INCR(txq, txdstall);
   8028 			break;
   8029 		}
   8030 
   8031 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8032 
   8033 		DPRINTF(WM_DEBUG_TX,
   8034 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8035 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8036 
   8037 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8038 
   8039 		/*
   8040 		 * Store a pointer to the packet so that we can free it
   8041 		 * later.
   8042 		 *
   8043 		 * Initially, we consider the number of descriptors the
   8044 		 * packet uses the number of DMA segments.  This may be
   8045 		 * incremented by 1 if we do checksum offload (a descriptor
   8046 		 * is used to set the checksum context).
   8047 		 */
   8048 		txs->txs_mbuf = m0;
   8049 		txs->txs_firstdesc = txq->txq_next;
   8050 		txs->txs_ndesc = segs_needed;
   8051 
   8052 		/* Set up offload parameters for this packet. */
   8053 		uint32_t cmdlen, fields, dcmdlen;
   8054 		if (m0->m_pkthdr.csum_flags &
   8055 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8056 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8057 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8058 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8059 			    &do_csum) != 0) {
   8060 				/* Error message already displayed. */
   8061 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8062 				continue;
   8063 			}
   8064 		} else {
   8065 			do_csum = false;
   8066 			cmdlen = 0;
   8067 			fields = 0;
   8068 		}
   8069 
   8070 		/* Sync the DMA map. */
   8071 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8072 		    BUS_DMASYNC_PREWRITE);
   8073 
   8074 		/* Initialize the first transmit descriptor. */
   8075 		nexttx = txq->txq_next;
   8076 		if (!do_csum) {
   8077 			/* setup a legacy descriptor */
   8078 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8079 			    dmamap->dm_segs[0].ds_addr);
   8080 			txq->txq_descs[nexttx].wtx_cmdlen =
   8081 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8082 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8083 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8084 			if (vlan_has_tag(m0)) {
   8085 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8086 				    htole32(WTX_CMD_VLE);
   8087 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8088 				    htole16(vlan_get_tag(m0));
   8089 			} else
   8090 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8091 
   8092 			dcmdlen = 0;
   8093 		} else {
   8094 			/* setup an advanced data descriptor */
   8095 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8096 			    htole64(dmamap->dm_segs[0].ds_addr);
   8097 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8098 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8099 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8100 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8101 			    htole32(fields);
   8102 			DPRINTF(WM_DEBUG_TX,
   8103 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8104 				device_xname(sc->sc_dev), nexttx,
   8105 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8106 			DPRINTF(WM_DEBUG_TX,
   8107 			    ("\t 0x%08x%08x\n", fields,
   8108 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8109 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8110 		}
   8111 
   8112 		lasttx = nexttx;
   8113 		nexttx = WM_NEXTTX(txq, nexttx);
   8114 		/*
   8115 		 * fill in the next descriptors. legacy or advanced format
   8116 		 * is the same here
   8117 		 */
   8118 		for (seg = 1; seg < dmamap->dm_nsegs;
   8119 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8120 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8121 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8122 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8123 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8124 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8125 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8126 			lasttx = nexttx;
   8127 
   8128 			DPRINTF(WM_DEBUG_TX,
   8129 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8130 				device_xname(sc->sc_dev), nexttx,
   8131 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8132 				dmamap->dm_segs[seg].ds_len));
   8133 		}
   8134 
   8135 		KASSERT(lasttx != -1);
   8136 
   8137 		/*
   8138 		 * Set up the command byte on the last descriptor of
   8139 		 * the packet. If we're in the interrupt delay window,
   8140 		 * delay the interrupt.
   8141 		 */
   8142 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8143 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8144 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8145 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8146 
   8147 		txs->txs_lastdesc = lasttx;
   8148 
   8149 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8150 		    device_xname(sc->sc_dev),
   8151 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8152 
   8153 		/* Sync the descriptors we're using. */
   8154 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8155 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8156 
   8157 		/* Give the packet to the chip. */
   8158 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8159 		sent = true;
   8160 
   8161 		DPRINTF(WM_DEBUG_TX,
   8162 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8163 
   8164 		DPRINTF(WM_DEBUG_TX,
   8165 		    ("%s: TX: finished transmitting packet, job %d\n",
   8166 			device_xname(sc->sc_dev), txq->txq_snext));
   8167 
   8168 		/* Advance the tx pointer. */
   8169 		txq->txq_free -= txs->txs_ndesc;
   8170 		txq->txq_next = nexttx;
   8171 
   8172 		txq->txq_sfree--;
   8173 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8174 
   8175 		/* Pass the packet to any BPF listeners. */
   8176 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8177 	}
   8178 
   8179 	if (m0 != NULL) {
   8180 		if (!is_transmit)
   8181 			ifp->if_flags |= IFF_OACTIVE;
   8182 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8183 		WM_Q_EVCNT_INCR(txq, descdrop);
   8184 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8185 			__func__));
   8186 		m_freem(m0);
   8187 	}
   8188 
   8189 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8190 		/* No more slots; notify upper layer. */
   8191 		if (!is_transmit)
   8192 			ifp->if_flags |= IFF_OACTIVE;
   8193 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8194 	}
   8195 
   8196 	if (sent) {
   8197 		/* Set a watchdog timer in case the chip flakes out. */
   8198 		txq->txq_lastsent = time_uptime;
   8199 		txq->txq_sending = true;
   8200 	}
   8201 }
   8202 
   8203 static void
   8204 wm_deferred_start_locked(struct wm_txqueue *txq)
   8205 {
   8206 	struct wm_softc *sc = txq->txq_sc;
   8207 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8208 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8209 	int qid = wmq->wmq_id;
   8210 
   8211 	KASSERT(mutex_owned(txq->txq_lock));
   8212 
   8213 	if (txq->txq_stopping) {
   8214 		mutex_exit(txq->txq_lock);
   8215 		return;
   8216 	}
   8217 
   8218 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8219 		/* XXX need for ALTQ or one CPU system */
   8220 		if (qid == 0)
   8221 			wm_nq_start_locked(ifp);
   8222 		wm_nq_transmit_locked(ifp, txq);
   8223 	} else {
   8224 		/* XXX need for ALTQ or one CPU system */
   8225 		if (qid == 0)
   8226 			wm_start_locked(ifp);
   8227 		wm_transmit_locked(ifp, txq);
   8228 	}
   8229 }
   8230 
   8231 /* Interrupt */
   8232 
   8233 /*
   8234  * wm_txeof:
   8235  *
   8236  *	Helper; handle transmit interrupts.
   8237  */
   8238 static bool
   8239 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8240 {
   8241 	struct wm_softc *sc = txq->txq_sc;
   8242 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8243 	struct wm_txsoft *txs;
   8244 	int count = 0;
   8245 	int i;
   8246 	uint8_t status;
   8247 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8248 	bool more = false;
   8249 
   8250 	KASSERT(mutex_owned(txq->txq_lock));
   8251 
   8252 	if (txq->txq_stopping)
   8253 		return false;
   8254 
   8255 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8256 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8257 	if (wmq->wmq_id == 0)
   8258 		ifp->if_flags &= ~IFF_OACTIVE;
   8259 
   8260 	/*
   8261 	 * Go through the Tx list and free mbufs for those
   8262 	 * frames which have been transmitted.
   8263 	 */
   8264 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8265 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8266 		if (limit-- == 0) {
   8267 			more = true;
   8268 			DPRINTF(WM_DEBUG_TX,
   8269 			    ("%s: TX: loop limited, job %d is not processed\n",
   8270 				device_xname(sc->sc_dev), i));
   8271 			break;
   8272 		}
   8273 
   8274 		txs = &txq->txq_soft[i];
   8275 
   8276 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8277 			device_xname(sc->sc_dev), i));
   8278 
   8279 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8280 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8281 
   8282 		status =
   8283 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8284 		if ((status & WTX_ST_DD) == 0) {
   8285 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8286 			    BUS_DMASYNC_PREREAD);
   8287 			break;
   8288 		}
   8289 
   8290 		count++;
   8291 		DPRINTF(WM_DEBUG_TX,
   8292 		    ("%s: TX: job %d done: descs %d..%d\n",
   8293 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8294 		    txs->txs_lastdesc));
   8295 
   8296 		/*
   8297 		 * XXX We should probably be using the statistics
   8298 		 * XXX registers, but I don't know if they exist
   8299 		 * XXX on chips before the i82544.
   8300 		 */
   8301 
   8302 #ifdef WM_EVENT_COUNTERS
   8303 		if (status & WTX_ST_TU)
   8304 			WM_Q_EVCNT_INCR(txq, underrun);
   8305 #endif /* WM_EVENT_COUNTERS */
   8306 
   8307 		/*
   8308 		 * 82574 and newer's document says the status field has neither
   8309 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8310 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8311 		 * Developer's Manual", 82574 datasheet and newer.
   8312 		 *
   8313 		 * XXX I saw the LC bit was set on I218 even though the media
   8314 		 * was full duplex, so the bit might be used for other
   8315 		 * meaning ...(I have no document).
   8316 		 */
   8317 
   8318 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8319 		    && ((sc->sc_type < WM_T_82574)
   8320 			|| (sc->sc_type == WM_T_80003))) {
   8321 			ifp->if_oerrors++;
   8322 			if (status & WTX_ST_LC)
   8323 				log(LOG_WARNING, "%s: late collision\n",
   8324 				    device_xname(sc->sc_dev));
   8325 			else if (status & WTX_ST_EC) {
   8326 				ifp->if_collisions +=
   8327 				    TX_COLLISION_THRESHOLD + 1;
   8328 				log(LOG_WARNING, "%s: excessive collisions\n",
   8329 				    device_xname(sc->sc_dev));
   8330 			}
   8331 		} else
   8332 			ifp->if_opackets++;
   8333 
   8334 		txq->txq_packets++;
   8335 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8336 
   8337 		txq->txq_free += txs->txs_ndesc;
   8338 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8339 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8340 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8341 		m_freem(txs->txs_mbuf);
   8342 		txs->txs_mbuf = NULL;
   8343 	}
   8344 
   8345 	/* Update the dirty transmit buffer pointer. */
   8346 	txq->txq_sdirty = i;
   8347 	DPRINTF(WM_DEBUG_TX,
   8348 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8349 
   8350 	if (count != 0)
   8351 		rnd_add_uint32(&sc->rnd_source, count);
   8352 
   8353 	/*
   8354 	 * If there are no more pending transmissions, cancel the watchdog
   8355 	 * timer.
   8356 	 */
   8357 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8358 		txq->txq_sending = false;
   8359 
   8360 	return more;
   8361 }
   8362 
   8363 static inline uint32_t
   8364 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8365 {
   8366 	struct wm_softc *sc = rxq->rxq_sc;
   8367 
   8368 	if (sc->sc_type == WM_T_82574)
   8369 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8370 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8371 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8372 	else
   8373 		return rxq->rxq_descs[idx].wrx_status;
   8374 }
   8375 
   8376 static inline uint32_t
   8377 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8378 {
   8379 	struct wm_softc *sc = rxq->rxq_sc;
   8380 
   8381 	if (sc->sc_type == WM_T_82574)
   8382 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8383 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8384 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8385 	else
   8386 		return rxq->rxq_descs[idx].wrx_errors;
   8387 }
   8388 
   8389 static inline uint16_t
   8390 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8391 {
   8392 	struct wm_softc *sc = rxq->rxq_sc;
   8393 
   8394 	if (sc->sc_type == WM_T_82574)
   8395 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8396 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8397 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8398 	else
   8399 		return rxq->rxq_descs[idx].wrx_special;
   8400 }
   8401 
   8402 static inline int
   8403 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8404 {
   8405 	struct wm_softc *sc = rxq->rxq_sc;
   8406 
   8407 	if (sc->sc_type == WM_T_82574)
   8408 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8409 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8410 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8411 	else
   8412 		return rxq->rxq_descs[idx].wrx_len;
   8413 }
   8414 
   8415 #ifdef WM_DEBUG
   8416 static inline uint32_t
   8417 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8418 {
   8419 	struct wm_softc *sc = rxq->rxq_sc;
   8420 
   8421 	if (sc->sc_type == WM_T_82574)
   8422 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8423 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8424 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8425 	else
   8426 		return 0;
   8427 }
   8428 
   8429 static inline uint8_t
   8430 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8431 {
   8432 	struct wm_softc *sc = rxq->rxq_sc;
   8433 
   8434 	if (sc->sc_type == WM_T_82574)
   8435 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8436 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8437 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8438 	else
   8439 		return 0;
   8440 }
   8441 #endif /* WM_DEBUG */
   8442 
   8443 static inline bool
   8444 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8445     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8446 {
   8447 
   8448 	if (sc->sc_type == WM_T_82574)
   8449 		return (status & ext_bit) != 0;
   8450 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8451 		return (status & nq_bit) != 0;
   8452 	else
   8453 		return (status & legacy_bit) != 0;
   8454 }
   8455 
   8456 static inline bool
   8457 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8458     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8459 {
   8460 
   8461 	if (sc->sc_type == WM_T_82574)
   8462 		return (error & ext_bit) != 0;
   8463 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8464 		return (error & nq_bit) != 0;
   8465 	else
   8466 		return (error & legacy_bit) != 0;
   8467 }
   8468 
   8469 static inline bool
   8470 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8471 {
   8472 
   8473 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8474 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8475 		return true;
   8476 	else
   8477 		return false;
   8478 }
   8479 
   8480 static inline bool
   8481 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8482 {
   8483 	struct wm_softc *sc = rxq->rxq_sc;
   8484 
   8485 	/* XXXX missing error bit for newqueue? */
   8486 	if (wm_rxdesc_is_set_error(sc, errors,
   8487 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8488 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8489 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8490 		NQRXC_ERROR_RXE)) {
   8491 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8492 		    EXTRXC_ERROR_SE, 0))
   8493 			log(LOG_WARNING, "%s: symbol error\n",
   8494 			    device_xname(sc->sc_dev));
   8495 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8496 		    EXTRXC_ERROR_SEQ, 0))
   8497 			log(LOG_WARNING, "%s: receive sequence error\n",
   8498 			    device_xname(sc->sc_dev));
   8499 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8500 		    EXTRXC_ERROR_CE, 0))
   8501 			log(LOG_WARNING, "%s: CRC error\n",
   8502 			    device_xname(sc->sc_dev));
   8503 		return true;
   8504 	}
   8505 
   8506 	return false;
   8507 }
   8508 
   8509 static inline bool
   8510 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8511 {
   8512 	struct wm_softc *sc = rxq->rxq_sc;
   8513 
   8514 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8515 		NQRXC_STATUS_DD)) {
   8516 		/* We have processed all of the receive descriptors. */
   8517 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8518 		return false;
   8519 	}
   8520 
   8521 	return true;
   8522 }
   8523 
   8524 static inline bool
   8525 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8526     uint16_t vlantag, struct mbuf *m)
   8527 {
   8528 
   8529 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8530 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8531 		vlan_set_tag(m, le16toh(vlantag));
   8532 	}
   8533 
   8534 	return true;
   8535 }
   8536 
   8537 static inline void
   8538 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8539     uint32_t errors, struct mbuf *m)
   8540 {
   8541 	struct wm_softc *sc = rxq->rxq_sc;
   8542 
   8543 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8544 		if (wm_rxdesc_is_set_status(sc, status,
   8545 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8546 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8547 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8548 			if (wm_rxdesc_is_set_error(sc, errors,
   8549 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8550 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8551 		}
   8552 		if (wm_rxdesc_is_set_status(sc, status,
   8553 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8554 			/*
   8555 			 * Note: we don't know if this was TCP or UDP,
   8556 			 * so we just set both bits, and expect the
   8557 			 * upper layers to deal.
   8558 			 */
   8559 			WM_Q_EVCNT_INCR(rxq, tusum);
   8560 			m->m_pkthdr.csum_flags |=
   8561 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8562 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8563 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8564 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8565 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8566 		}
   8567 	}
   8568 }
   8569 
   8570 /*
   8571  * wm_rxeof:
   8572  *
   8573  *	Helper; handle receive interrupts.
   8574  */
   8575 static bool
   8576 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8577 {
   8578 	struct wm_softc *sc = rxq->rxq_sc;
   8579 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8580 	struct wm_rxsoft *rxs;
   8581 	struct mbuf *m;
   8582 	int i, len;
   8583 	int count = 0;
   8584 	uint32_t status, errors;
   8585 	uint16_t vlantag;
   8586 	bool more = false;
   8587 
   8588 	KASSERT(mutex_owned(rxq->rxq_lock));
   8589 
   8590 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8591 		if (limit-- == 0) {
   8592 			rxq->rxq_ptr = i;
   8593 			more = true;
   8594 			DPRINTF(WM_DEBUG_RX,
   8595 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8596 				device_xname(sc->sc_dev), i));
   8597 			break;
   8598 		}
   8599 
   8600 		rxs = &rxq->rxq_soft[i];
   8601 
   8602 		DPRINTF(WM_DEBUG_RX,
   8603 		    ("%s: RX: checking descriptor %d\n",
   8604 			device_xname(sc->sc_dev), i));
   8605 		wm_cdrxsync(rxq, i,
   8606 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8607 
   8608 		status = wm_rxdesc_get_status(rxq, i);
   8609 		errors = wm_rxdesc_get_errors(rxq, i);
   8610 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8611 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8612 #ifdef WM_DEBUG
   8613 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8614 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8615 #endif
   8616 
   8617 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8618 			/*
   8619 			 * Update the receive pointer holding rxq_lock
   8620 			 * consistent with increment counter.
   8621 			 */
   8622 			rxq->rxq_ptr = i;
   8623 			break;
   8624 		}
   8625 
   8626 		count++;
   8627 		if (__predict_false(rxq->rxq_discard)) {
   8628 			DPRINTF(WM_DEBUG_RX,
   8629 			    ("%s: RX: discarding contents of descriptor %d\n",
   8630 				device_xname(sc->sc_dev), i));
   8631 			wm_init_rxdesc(rxq, i);
   8632 			if (wm_rxdesc_is_eop(rxq, status)) {
   8633 				/* Reset our state. */
   8634 				DPRINTF(WM_DEBUG_RX,
   8635 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8636 					device_xname(sc->sc_dev)));
   8637 				rxq->rxq_discard = 0;
   8638 			}
   8639 			continue;
   8640 		}
   8641 
   8642 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8643 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8644 
   8645 		m = rxs->rxs_mbuf;
   8646 
   8647 		/*
   8648 		 * Add a new receive buffer to the ring, unless of
   8649 		 * course the length is zero. Treat the latter as a
   8650 		 * failed mapping.
   8651 		 */
   8652 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8653 			/*
   8654 			 * Failed, throw away what we've done so
   8655 			 * far, and discard the rest of the packet.
   8656 			 */
   8657 			ifp->if_ierrors++;
   8658 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8659 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8660 			wm_init_rxdesc(rxq, i);
   8661 			if (!wm_rxdesc_is_eop(rxq, status))
   8662 				rxq->rxq_discard = 1;
   8663 			if (rxq->rxq_head != NULL)
   8664 				m_freem(rxq->rxq_head);
   8665 			WM_RXCHAIN_RESET(rxq);
   8666 			DPRINTF(WM_DEBUG_RX,
   8667 			    ("%s: RX: Rx buffer allocation failed, "
   8668 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8669 				rxq->rxq_discard ? " (discard)" : ""));
   8670 			continue;
   8671 		}
   8672 
   8673 		m->m_len = len;
   8674 		rxq->rxq_len += len;
   8675 		DPRINTF(WM_DEBUG_RX,
   8676 		    ("%s: RX: buffer at %p len %d\n",
   8677 			device_xname(sc->sc_dev), m->m_data, len));
   8678 
   8679 		/* If this is not the end of the packet, keep looking. */
   8680 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8681 			WM_RXCHAIN_LINK(rxq, m);
   8682 			DPRINTF(WM_DEBUG_RX,
   8683 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8684 				device_xname(sc->sc_dev), rxq->rxq_len));
   8685 			continue;
   8686 		}
   8687 
   8688 		/*
   8689 		 * Okay, we have the entire packet now. The chip is
   8690 		 * configured to include the FCS except I350 and I21[01]
   8691 		 * (not all chips can be configured to strip it),
   8692 		 * so we need to trim it.
   8693 		 * May need to adjust length of previous mbuf in the
   8694 		 * chain if the current mbuf is too short.
   8695 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8696 		 * is always set in I350, so we don't trim it.
   8697 		 */
   8698 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8699 		    && (sc->sc_type != WM_T_I210)
   8700 		    && (sc->sc_type != WM_T_I211)) {
   8701 			if (m->m_len < ETHER_CRC_LEN) {
   8702 				rxq->rxq_tail->m_len
   8703 				    -= (ETHER_CRC_LEN - m->m_len);
   8704 				m->m_len = 0;
   8705 			} else
   8706 				m->m_len -= ETHER_CRC_LEN;
   8707 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8708 		} else
   8709 			len = rxq->rxq_len;
   8710 
   8711 		WM_RXCHAIN_LINK(rxq, m);
   8712 
   8713 		*rxq->rxq_tailp = NULL;
   8714 		m = rxq->rxq_head;
   8715 
   8716 		WM_RXCHAIN_RESET(rxq);
   8717 
   8718 		DPRINTF(WM_DEBUG_RX,
   8719 		    ("%s: RX: have entire packet, len -> %d\n",
   8720 			device_xname(sc->sc_dev), len));
   8721 
   8722 		/* If an error occurred, update stats and drop the packet. */
   8723 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8724 			m_freem(m);
   8725 			continue;
   8726 		}
   8727 
   8728 		/* No errors.  Receive the packet. */
   8729 		m_set_rcvif(m, ifp);
   8730 		m->m_pkthdr.len = len;
   8731 		/*
   8732 		 * TODO
   8733 		 * should be save rsshash and rsstype to this mbuf.
   8734 		 */
   8735 		DPRINTF(WM_DEBUG_RX,
   8736 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8737 			device_xname(sc->sc_dev), rsstype, rsshash));
   8738 
   8739 		/*
   8740 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8741 		 * for us.  Associate the tag with the packet.
   8742 		 */
   8743 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8744 			continue;
   8745 
   8746 		/* Set up checksum info for this packet. */
   8747 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8748 		/*
   8749 		 * Update the receive pointer holding rxq_lock consistent with
   8750 		 * increment counter.
   8751 		 */
   8752 		rxq->rxq_ptr = i;
   8753 		rxq->rxq_packets++;
   8754 		rxq->rxq_bytes += len;
   8755 		mutex_exit(rxq->rxq_lock);
   8756 
   8757 		/* Pass it on. */
   8758 		if_percpuq_enqueue(sc->sc_ipq, m);
   8759 
   8760 		mutex_enter(rxq->rxq_lock);
   8761 
   8762 		if (rxq->rxq_stopping)
   8763 			break;
   8764 	}
   8765 
   8766 	if (count != 0)
   8767 		rnd_add_uint32(&sc->rnd_source, count);
   8768 
   8769 	DPRINTF(WM_DEBUG_RX,
   8770 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8771 
   8772 	return more;
   8773 }
   8774 
   8775 /*
   8776  * wm_linkintr_gmii:
   8777  *
   8778  *	Helper; handle link interrupts for GMII.
   8779  */
   8780 static void
   8781 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8782 {
   8783 
   8784 	KASSERT(WM_CORE_LOCKED(sc));
   8785 
   8786 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8787 		__func__));
   8788 
   8789 	if (icr & ICR_LSC) {
   8790 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8791 		uint32_t reg;
   8792 		bool link;
   8793 
   8794 		link = status & STATUS_LU;
   8795 		if (link) {
   8796 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8797 				device_xname(sc->sc_dev),
   8798 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8799 		} else {
   8800 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8801 				device_xname(sc->sc_dev)));
   8802 		}
   8803 		if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8804 			wm_gig_downshift_workaround_ich8lan(sc);
   8805 
   8806 		if ((sc->sc_type == WM_T_ICH8)
   8807 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8808 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8809 		}
   8810 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8811 			device_xname(sc->sc_dev)));
   8812 		mii_pollstat(&sc->sc_mii);
   8813 		if (sc->sc_type == WM_T_82543) {
   8814 			int miistatus, active;
   8815 
   8816 			/*
   8817 			 * With 82543, we need to force speed and
   8818 			 * duplex on the MAC equal to what the PHY
   8819 			 * speed and duplex configuration is.
   8820 			 */
   8821 			miistatus = sc->sc_mii.mii_media_status;
   8822 
   8823 			if (miistatus & IFM_ACTIVE) {
   8824 				active = sc->sc_mii.mii_media_active;
   8825 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8826 				switch (IFM_SUBTYPE(active)) {
   8827 				case IFM_10_T:
   8828 					sc->sc_ctrl |= CTRL_SPEED_10;
   8829 					break;
   8830 				case IFM_100_TX:
   8831 					sc->sc_ctrl |= CTRL_SPEED_100;
   8832 					break;
   8833 				case IFM_1000_T:
   8834 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8835 					break;
   8836 				default:
   8837 					/*
   8838 					 * fiber?
   8839 					 * Shoud not enter here.
   8840 					 */
   8841 					printf("unknown media (%x)\n", active);
   8842 					break;
   8843 				}
   8844 				if (active & IFM_FDX)
   8845 					sc->sc_ctrl |= CTRL_FD;
   8846 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8847 			}
   8848 		} else if (sc->sc_type == WM_T_PCH) {
   8849 			wm_k1_gig_workaround_hv(sc,
   8850 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8851 		}
   8852 
   8853 		if ((sc->sc_phytype == WMPHY_82578)
   8854 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8855 			== IFM_1000_T)) {
   8856 
   8857 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8858 				delay(200*1000); /* XXX too big */
   8859 
   8860 				/* Link stall fix for link up */
   8861 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8862 				    HV_MUX_DATA_CTRL,
   8863 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8864 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8865 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8866 				    HV_MUX_DATA_CTRL,
   8867 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8868 			}
   8869 		}
   8870 		/*
   8871 		 * I217 Packet Loss issue:
   8872 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8873 		 * on power up.
   8874 		 * Set the Beacon Duration for I217 to 8 usec
   8875 		 */
   8876 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8877 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8878 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8879 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8880 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8881 		}
   8882 
   8883 		/* Work-around I218 hang issue */
   8884 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   8885 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   8886 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   8887 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   8888 			wm_k1_workaround_lpt_lp(sc, link);
   8889 
   8890 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8891 			/*
   8892 			 * Set platform power management values for Latency
   8893 			 * Tolerance Reporting (LTR)
   8894 			 */
   8895 			wm_platform_pm_pch_lpt(sc,
   8896 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8897 		}
   8898 
   8899 		/* FEXTNVM6 K1-off workaround */
   8900 		if (sc->sc_type == WM_T_PCH_SPT) {
   8901 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8902 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8903 			    & FEXTNVM6_K1_OFF_ENABLE)
   8904 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8905 			else
   8906 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8907 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8908 		}
   8909 	} else if (icr & ICR_RXSEQ) {
   8910 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8911 			device_xname(sc->sc_dev)));
   8912 	}
   8913 }
   8914 
   8915 /*
   8916  * wm_linkintr_tbi:
   8917  *
   8918  *	Helper; handle link interrupts for TBI mode.
   8919  */
   8920 static void
   8921 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8922 {
   8923 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8924 	uint32_t status;
   8925 
   8926 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8927 		__func__));
   8928 
   8929 	status = CSR_READ(sc, WMREG_STATUS);
   8930 	if (icr & ICR_LSC) {
   8931 		wm_check_for_link(sc);
   8932 		if (status & STATUS_LU) {
   8933 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8934 				device_xname(sc->sc_dev),
   8935 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8936 			/*
   8937 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8938 			 * so we should update sc->sc_ctrl
   8939 			 */
   8940 
   8941 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8942 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8943 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8944 			if (status & STATUS_FD)
   8945 				sc->sc_tctl |=
   8946 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8947 			else
   8948 				sc->sc_tctl |=
   8949 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8950 			if (sc->sc_ctrl & CTRL_TFCE)
   8951 				sc->sc_fcrtl |= FCRTL_XONE;
   8952 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8953 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8954 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   8955 			sc->sc_tbi_linkup = 1;
   8956 			if_link_state_change(ifp, LINK_STATE_UP);
   8957 		} else {
   8958 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8959 				device_xname(sc->sc_dev)));
   8960 			sc->sc_tbi_linkup = 0;
   8961 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8962 		}
   8963 		/* Update LED */
   8964 		wm_tbi_serdes_set_linkled(sc);
   8965 	} else if (icr & ICR_RXSEQ) {
   8966 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8967 			device_xname(sc->sc_dev)));
   8968 	}
   8969 }
   8970 
   8971 /*
   8972  * wm_linkintr_serdes:
   8973  *
   8974  *	Helper; handle link interrupts for TBI mode.
   8975  */
   8976 static void
   8977 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8978 {
   8979 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8980 	struct mii_data *mii = &sc->sc_mii;
   8981 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8982 	uint32_t pcs_adv, pcs_lpab, reg;
   8983 
   8984 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8985 		__func__));
   8986 
   8987 	if (icr & ICR_LSC) {
   8988 		/* Check PCS */
   8989 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8990 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8991 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8992 				device_xname(sc->sc_dev)));
   8993 			mii->mii_media_status |= IFM_ACTIVE;
   8994 			sc->sc_tbi_linkup = 1;
   8995 			if_link_state_change(ifp, LINK_STATE_UP);
   8996 		} else {
   8997 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8998 				device_xname(sc->sc_dev)));
   8999 			mii->mii_media_status |= IFM_NONE;
   9000 			sc->sc_tbi_linkup = 0;
   9001 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9002 			wm_tbi_serdes_set_linkled(sc);
   9003 			return;
   9004 		}
   9005 		mii->mii_media_active |= IFM_1000_SX;
   9006 		if ((reg & PCS_LSTS_FDX) != 0)
   9007 			mii->mii_media_active |= IFM_FDX;
   9008 		else
   9009 			mii->mii_media_active |= IFM_HDX;
   9010 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9011 			/* Check flow */
   9012 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9013 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9014 				DPRINTF(WM_DEBUG_LINK,
   9015 				    ("XXX LINKOK but not ACOMP\n"));
   9016 				return;
   9017 			}
   9018 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9019 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9020 			DPRINTF(WM_DEBUG_LINK,
   9021 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9022 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9023 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9024 				mii->mii_media_active |= IFM_FLOW
   9025 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9026 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9027 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9028 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9029 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9030 				mii->mii_media_active |= IFM_FLOW
   9031 				    | IFM_ETH_TXPAUSE;
   9032 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9033 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9034 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9035 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9036 				mii->mii_media_active |= IFM_FLOW
   9037 				    | IFM_ETH_RXPAUSE;
   9038 		}
   9039 		/* Update LED */
   9040 		wm_tbi_serdes_set_linkled(sc);
   9041 	} else {
   9042 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9043 		    device_xname(sc->sc_dev)));
   9044 	}
   9045 }
   9046 
   9047 /*
   9048  * wm_linkintr:
   9049  *
   9050  *	Helper; handle link interrupts.
   9051  */
   9052 static void
   9053 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9054 {
   9055 
   9056 	KASSERT(WM_CORE_LOCKED(sc));
   9057 
   9058 	if (sc->sc_flags & WM_F_HAS_MII)
   9059 		wm_linkintr_gmii(sc, icr);
   9060 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9061 	    && (sc->sc_type >= WM_T_82575))
   9062 		wm_linkintr_serdes(sc, icr);
   9063 	else
   9064 		wm_linkintr_tbi(sc, icr);
   9065 }
   9066 
   9067 /*
   9068  * wm_intr_legacy:
   9069  *
   9070  *	Interrupt service routine for INTx and MSI.
   9071  */
   9072 static int
   9073 wm_intr_legacy(void *arg)
   9074 {
   9075 	struct wm_softc *sc = arg;
   9076 	struct wm_queue *wmq = &sc->sc_queue[0];
   9077 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9078 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9079 	uint32_t icr, rndval = 0;
   9080 	int handled = 0;
   9081 
   9082 	while (1 /* CONSTCOND */) {
   9083 		icr = CSR_READ(sc, WMREG_ICR);
   9084 		if ((icr & sc->sc_icr) == 0)
   9085 			break;
   9086 		if (handled == 0) {
   9087 			DPRINTF(WM_DEBUG_TX,
   9088 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9089 		}
   9090 		if (rndval == 0)
   9091 			rndval = icr;
   9092 
   9093 		mutex_enter(rxq->rxq_lock);
   9094 
   9095 		if (rxq->rxq_stopping) {
   9096 			mutex_exit(rxq->rxq_lock);
   9097 			break;
   9098 		}
   9099 
   9100 		handled = 1;
   9101 
   9102 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9103 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9104 			DPRINTF(WM_DEBUG_RX,
   9105 			    ("%s: RX: got Rx intr 0x%08x\n",
   9106 				device_xname(sc->sc_dev),
   9107 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9108 			WM_Q_EVCNT_INCR(rxq, intr);
   9109 		}
   9110 #endif
   9111 		/*
   9112 		 * wm_rxeof() does *not* call upper layer functions directly,
   9113 		 * as if_percpuq_enqueue() just call softint_schedule().
   9114 		 * So, we can call wm_rxeof() in interrupt context.
   9115 		 */
   9116 		wm_rxeof(rxq, UINT_MAX);
   9117 
   9118 		mutex_exit(rxq->rxq_lock);
   9119 		mutex_enter(txq->txq_lock);
   9120 
   9121 		if (txq->txq_stopping) {
   9122 			mutex_exit(txq->txq_lock);
   9123 			break;
   9124 		}
   9125 
   9126 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9127 		if (icr & ICR_TXDW) {
   9128 			DPRINTF(WM_DEBUG_TX,
   9129 			    ("%s: TX: got TXDW interrupt\n",
   9130 				device_xname(sc->sc_dev)));
   9131 			WM_Q_EVCNT_INCR(txq, txdw);
   9132 		}
   9133 #endif
   9134 		wm_txeof(txq, UINT_MAX);
   9135 
   9136 		mutex_exit(txq->txq_lock);
   9137 		WM_CORE_LOCK(sc);
   9138 
   9139 		if (sc->sc_core_stopping) {
   9140 			WM_CORE_UNLOCK(sc);
   9141 			break;
   9142 		}
   9143 
   9144 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9145 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9146 			wm_linkintr(sc, icr);
   9147 		}
   9148 
   9149 		WM_CORE_UNLOCK(sc);
   9150 
   9151 		if (icr & ICR_RXO) {
   9152 #if defined(WM_DEBUG)
   9153 			log(LOG_WARNING, "%s: Receive overrun\n",
   9154 			    device_xname(sc->sc_dev));
   9155 #endif /* defined(WM_DEBUG) */
   9156 		}
   9157 	}
   9158 
   9159 	rnd_add_uint32(&sc->rnd_source, rndval);
   9160 
   9161 	if (handled) {
   9162 		/* Try to get more packets going. */
   9163 		softint_schedule(wmq->wmq_si);
   9164 	}
   9165 
   9166 	return handled;
   9167 }
   9168 
   9169 static inline void
   9170 wm_txrxintr_disable(struct wm_queue *wmq)
   9171 {
   9172 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9173 
   9174 	if (sc->sc_type == WM_T_82574)
   9175 		CSR_WRITE(sc, WMREG_IMC,
   9176 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9177 	else if (sc->sc_type == WM_T_82575)
   9178 		CSR_WRITE(sc, WMREG_EIMC,
   9179 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9180 	else
   9181 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9182 }
   9183 
   9184 static inline void
   9185 wm_txrxintr_enable(struct wm_queue *wmq)
   9186 {
   9187 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9188 
   9189 	wm_itrs_calculate(sc, wmq);
   9190 
   9191 	/*
   9192 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9193 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9194 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9195 	 * while each wm_handle_queue(wmq) is runnig.
   9196 	 */
   9197 	if (sc->sc_type == WM_T_82574)
   9198 		CSR_WRITE(sc, WMREG_IMS,
   9199 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9200 	else if (sc->sc_type == WM_T_82575)
   9201 		CSR_WRITE(sc, WMREG_EIMS,
   9202 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9203 	else
   9204 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9205 }
   9206 
   9207 static int
   9208 wm_txrxintr_msix(void *arg)
   9209 {
   9210 	struct wm_queue *wmq = arg;
   9211 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9212 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9213 	struct wm_softc *sc = txq->txq_sc;
   9214 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9215 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9216 	bool txmore;
   9217 	bool rxmore;
   9218 
   9219 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9220 
   9221 	DPRINTF(WM_DEBUG_TX,
   9222 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9223 
   9224 	wm_txrxintr_disable(wmq);
   9225 
   9226 	mutex_enter(txq->txq_lock);
   9227 
   9228 	if (txq->txq_stopping) {
   9229 		mutex_exit(txq->txq_lock);
   9230 		return 0;
   9231 	}
   9232 
   9233 	WM_Q_EVCNT_INCR(txq, txdw);
   9234 	txmore = wm_txeof(txq, txlimit);
   9235 	/* wm_deferred start() is done in wm_handle_queue(). */
   9236 	mutex_exit(txq->txq_lock);
   9237 
   9238 	DPRINTF(WM_DEBUG_RX,
   9239 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9240 	mutex_enter(rxq->rxq_lock);
   9241 
   9242 	if (rxq->rxq_stopping) {
   9243 		mutex_exit(rxq->rxq_lock);
   9244 		return 0;
   9245 	}
   9246 
   9247 	WM_Q_EVCNT_INCR(rxq, intr);
   9248 	rxmore = wm_rxeof(rxq, rxlimit);
   9249 	mutex_exit(rxq->rxq_lock);
   9250 
   9251 	wm_itrs_writereg(sc, wmq);
   9252 
   9253 	if (txmore || rxmore)
   9254 		softint_schedule(wmq->wmq_si);
   9255 	else
   9256 		wm_txrxintr_enable(wmq);
   9257 
   9258 	return 1;
   9259 }
   9260 
   9261 static void
   9262 wm_handle_queue(void *arg)
   9263 {
   9264 	struct wm_queue *wmq = arg;
   9265 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9266 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9267 	struct wm_softc *sc = txq->txq_sc;
   9268 	u_int txlimit = sc->sc_tx_process_limit;
   9269 	u_int rxlimit = sc->sc_rx_process_limit;
   9270 	bool txmore;
   9271 	bool rxmore;
   9272 
   9273 	mutex_enter(txq->txq_lock);
   9274 	if (txq->txq_stopping) {
   9275 		mutex_exit(txq->txq_lock);
   9276 		return;
   9277 	}
   9278 	txmore = wm_txeof(txq, txlimit);
   9279 	wm_deferred_start_locked(txq);
   9280 	mutex_exit(txq->txq_lock);
   9281 
   9282 	mutex_enter(rxq->rxq_lock);
   9283 	if (rxq->rxq_stopping) {
   9284 		mutex_exit(rxq->rxq_lock);
   9285 		return;
   9286 	}
   9287 	WM_Q_EVCNT_INCR(rxq, defer);
   9288 	rxmore = wm_rxeof(rxq, rxlimit);
   9289 	mutex_exit(rxq->rxq_lock);
   9290 
   9291 	if (txmore || rxmore)
   9292 		softint_schedule(wmq->wmq_si);
   9293 	else
   9294 		wm_txrxintr_enable(wmq);
   9295 }
   9296 
   9297 /*
   9298  * wm_linkintr_msix:
   9299  *
   9300  *	Interrupt service routine for link status change for MSI-X.
   9301  */
   9302 static int
   9303 wm_linkintr_msix(void *arg)
   9304 {
   9305 	struct wm_softc *sc = arg;
   9306 	uint32_t reg;
   9307 	bool has_rxo;
   9308 
   9309 	DPRINTF(WM_DEBUG_LINK,
   9310 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9311 
   9312 	reg = CSR_READ(sc, WMREG_ICR);
   9313 	WM_CORE_LOCK(sc);
   9314 	if (sc->sc_core_stopping)
   9315 		goto out;
   9316 
   9317 	if ((reg & ICR_LSC) != 0) {
   9318 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9319 		wm_linkintr(sc, ICR_LSC);
   9320 	}
   9321 
   9322 	/*
   9323 	 * XXX 82574 MSI-X mode workaround
   9324 	 *
   9325 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9326 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9327 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9328 	 * interrupts by writing WMREG_ICS to process receive packets.
   9329 	 */
   9330 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9331 #if defined(WM_DEBUG)
   9332 		log(LOG_WARNING, "%s: Receive overrun\n",
   9333 		    device_xname(sc->sc_dev));
   9334 #endif /* defined(WM_DEBUG) */
   9335 
   9336 		has_rxo = true;
   9337 		/*
   9338 		 * The RXO interrupt is very high rate when receive traffic is
   9339 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9340 		 * interrupts. ICR_OTHER will be enabled at the end of
   9341 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9342 		 * ICR_RXQ(1) interrupts.
   9343 		 */
   9344 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9345 
   9346 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9347 	}
   9348 
   9349 
   9350 
   9351 out:
   9352 	WM_CORE_UNLOCK(sc);
   9353 
   9354 	if (sc->sc_type == WM_T_82574) {
   9355 		if (!has_rxo)
   9356 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9357 		else
   9358 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9359 	} else if (sc->sc_type == WM_T_82575)
   9360 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9361 	else
   9362 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9363 
   9364 	return 1;
   9365 }
   9366 
   9367 /*
   9368  * Media related.
   9369  * GMII, SGMII, TBI (and SERDES)
   9370  */
   9371 
   9372 /* Common */
   9373 
   9374 /*
   9375  * wm_tbi_serdes_set_linkled:
   9376  *
   9377  *	Update the link LED on TBI and SERDES devices.
   9378  */
   9379 static void
   9380 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9381 {
   9382 
   9383 	if (sc->sc_tbi_linkup)
   9384 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9385 	else
   9386 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9387 
   9388 	/* 82540 or newer devices are active low */
   9389 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9390 
   9391 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9392 }
   9393 
   9394 /* GMII related */
   9395 
   9396 /*
   9397  * wm_gmii_reset:
   9398  *
   9399  *	Reset the PHY.
   9400  */
   9401 static void
   9402 wm_gmii_reset(struct wm_softc *sc)
   9403 {
   9404 	uint32_t reg;
   9405 	int rv;
   9406 
   9407 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9408 		device_xname(sc->sc_dev), __func__));
   9409 
   9410 	rv = sc->phy.acquire(sc);
   9411 	if (rv != 0) {
   9412 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9413 		    __func__);
   9414 		return;
   9415 	}
   9416 
   9417 	switch (sc->sc_type) {
   9418 	case WM_T_82542_2_0:
   9419 	case WM_T_82542_2_1:
   9420 		/* null */
   9421 		break;
   9422 	case WM_T_82543:
   9423 		/*
   9424 		 * With 82543, we need to force speed and duplex on the MAC
   9425 		 * equal to what the PHY speed and duplex configuration is.
   9426 		 * In addition, we need to perform a hardware reset on the PHY
   9427 		 * to take it out of reset.
   9428 		 */
   9429 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9430 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9431 
   9432 		/* The PHY reset pin is active-low. */
   9433 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9434 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9435 		    CTRL_EXT_SWDPIN(4));
   9436 		reg |= CTRL_EXT_SWDPIO(4);
   9437 
   9438 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9439 		CSR_WRITE_FLUSH(sc);
   9440 		delay(10*1000);
   9441 
   9442 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9443 		CSR_WRITE_FLUSH(sc);
   9444 		delay(150);
   9445 #if 0
   9446 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9447 #endif
   9448 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9449 		break;
   9450 	case WM_T_82544:	/* reset 10000us */
   9451 	case WM_T_82540:
   9452 	case WM_T_82545:
   9453 	case WM_T_82545_3:
   9454 	case WM_T_82546:
   9455 	case WM_T_82546_3:
   9456 	case WM_T_82541:
   9457 	case WM_T_82541_2:
   9458 	case WM_T_82547:
   9459 	case WM_T_82547_2:
   9460 	case WM_T_82571:	/* reset 100us */
   9461 	case WM_T_82572:
   9462 	case WM_T_82573:
   9463 	case WM_T_82574:
   9464 	case WM_T_82575:
   9465 	case WM_T_82576:
   9466 	case WM_T_82580:
   9467 	case WM_T_I350:
   9468 	case WM_T_I354:
   9469 	case WM_T_I210:
   9470 	case WM_T_I211:
   9471 	case WM_T_82583:
   9472 	case WM_T_80003:
   9473 		/* generic reset */
   9474 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9475 		CSR_WRITE_FLUSH(sc);
   9476 		delay(20000);
   9477 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9478 		CSR_WRITE_FLUSH(sc);
   9479 		delay(20000);
   9480 
   9481 		if ((sc->sc_type == WM_T_82541)
   9482 		    || (sc->sc_type == WM_T_82541_2)
   9483 		    || (sc->sc_type == WM_T_82547)
   9484 		    || (sc->sc_type == WM_T_82547_2)) {
   9485 			/* workaround for igp are done in igp_reset() */
   9486 			/* XXX add code to set LED after phy reset */
   9487 		}
   9488 		break;
   9489 	case WM_T_ICH8:
   9490 	case WM_T_ICH9:
   9491 	case WM_T_ICH10:
   9492 	case WM_T_PCH:
   9493 	case WM_T_PCH2:
   9494 	case WM_T_PCH_LPT:
   9495 	case WM_T_PCH_SPT:
   9496 	case WM_T_PCH_CNP:
   9497 		/* generic reset */
   9498 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9499 		CSR_WRITE_FLUSH(sc);
   9500 		delay(100);
   9501 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9502 		CSR_WRITE_FLUSH(sc);
   9503 		delay(150);
   9504 		break;
   9505 	default:
   9506 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9507 		    __func__);
   9508 		break;
   9509 	}
   9510 
   9511 	sc->phy.release(sc);
   9512 
   9513 	/* get_cfg_done */
   9514 	wm_get_cfg_done(sc);
   9515 
   9516 	/* extra setup */
   9517 	switch (sc->sc_type) {
   9518 	case WM_T_82542_2_0:
   9519 	case WM_T_82542_2_1:
   9520 	case WM_T_82543:
   9521 	case WM_T_82544:
   9522 	case WM_T_82540:
   9523 	case WM_T_82545:
   9524 	case WM_T_82545_3:
   9525 	case WM_T_82546:
   9526 	case WM_T_82546_3:
   9527 	case WM_T_82541_2:
   9528 	case WM_T_82547_2:
   9529 	case WM_T_82571:
   9530 	case WM_T_82572:
   9531 	case WM_T_82573:
   9532 	case WM_T_82574:
   9533 	case WM_T_82583:
   9534 	case WM_T_82575:
   9535 	case WM_T_82576:
   9536 	case WM_T_82580:
   9537 	case WM_T_I350:
   9538 	case WM_T_I354:
   9539 	case WM_T_I210:
   9540 	case WM_T_I211:
   9541 	case WM_T_80003:
   9542 		/* null */
   9543 		break;
   9544 	case WM_T_82541:
   9545 	case WM_T_82547:
   9546 		/* XXX Configure actively LED after PHY reset */
   9547 		break;
   9548 	case WM_T_ICH8:
   9549 	case WM_T_ICH9:
   9550 	case WM_T_ICH10:
   9551 	case WM_T_PCH:
   9552 	case WM_T_PCH2:
   9553 	case WM_T_PCH_LPT:
   9554 	case WM_T_PCH_SPT:
   9555 	case WM_T_PCH_CNP:
   9556 		wm_phy_post_reset(sc);
   9557 		break;
   9558 	default:
   9559 		panic("%s: unknown type\n", __func__);
   9560 		break;
   9561 	}
   9562 }
   9563 
   9564 /*
   9565  * Setup sc_phytype and mii_{read|write}reg.
   9566  *
   9567  *  To identify PHY type, correct read/write function should be selected.
   9568  * To select correct read/write function, PCI ID or MAC type are required
   9569  * without accessing PHY registers.
   9570  *
   9571  *  On the first call of this function, PHY ID is not known yet. Check
   9572  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9573  * result might be incorrect.
   9574  *
   9575  *  In the second call, PHY OUI and model is used to identify PHY type.
   9576  * It might not be perfpect because of the lack of compared entry, but it
   9577  * would be better than the first call.
   9578  *
   9579  *  If the detected new result and previous assumption is different,
   9580  * diagnous message will be printed.
   9581  */
   9582 static void
   9583 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9584     uint16_t phy_model)
   9585 {
   9586 	device_t dev = sc->sc_dev;
   9587 	struct mii_data *mii = &sc->sc_mii;
   9588 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9589 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9590 	mii_readreg_t new_readreg;
   9591 	mii_writereg_t new_writereg;
   9592 
   9593 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9594 		device_xname(sc->sc_dev), __func__));
   9595 
   9596 	if (mii->mii_readreg == NULL) {
   9597 		/*
   9598 		 *  This is the first call of this function. For ICH and PCH
   9599 		 * variants, it's difficult to determine the PHY access method
   9600 		 * by sc_type, so use the PCI product ID for some devices.
   9601 		 */
   9602 
   9603 		switch (sc->sc_pcidevid) {
   9604 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9605 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9606 			/* 82577 */
   9607 			new_phytype = WMPHY_82577;
   9608 			break;
   9609 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9610 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9611 			/* 82578 */
   9612 			new_phytype = WMPHY_82578;
   9613 			break;
   9614 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9615 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9616 			/* 82579 */
   9617 			new_phytype = WMPHY_82579;
   9618 			break;
   9619 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9620 		case PCI_PRODUCT_INTEL_82801I_BM:
   9621 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9622 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9623 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9624 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9625 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9626 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9627 			/* ICH8, 9, 10 with 82567 */
   9628 			new_phytype = WMPHY_BM;
   9629 			break;
   9630 		default:
   9631 			break;
   9632 		}
   9633 	} else {
   9634 		/* It's not the first call. Use PHY OUI and model */
   9635 		switch (phy_oui) {
   9636 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9637 			switch (phy_model) {
   9638 			case 0x0004: /* XXX */
   9639 				new_phytype = WMPHY_82578;
   9640 				break;
   9641 			default:
   9642 				break;
   9643 			}
   9644 			break;
   9645 		case MII_OUI_xxMARVELL:
   9646 			switch (phy_model) {
   9647 			case MII_MODEL_xxMARVELL_I210:
   9648 				new_phytype = WMPHY_I210;
   9649 				break;
   9650 			case MII_MODEL_xxMARVELL_E1011:
   9651 			case MII_MODEL_xxMARVELL_E1000_3:
   9652 			case MII_MODEL_xxMARVELL_E1000_5:
   9653 			case MII_MODEL_xxMARVELL_E1112:
   9654 				new_phytype = WMPHY_M88;
   9655 				break;
   9656 			case MII_MODEL_xxMARVELL_E1149:
   9657 				new_phytype = WMPHY_BM;
   9658 				break;
   9659 			case MII_MODEL_xxMARVELL_E1111:
   9660 			case MII_MODEL_xxMARVELL_I347:
   9661 			case MII_MODEL_xxMARVELL_E1512:
   9662 			case MII_MODEL_xxMARVELL_E1340M:
   9663 			case MII_MODEL_xxMARVELL_E1543:
   9664 				new_phytype = WMPHY_M88;
   9665 				break;
   9666 			case MII_MODEL_xxMARVELL_I82563:
   9667 				new_phytype = WMPHY_GG82563;
   9668 				break;
   9669 			default:
   9670 				break;
   9671 			}
   9672 			break;
   9673 		case MII_OUI_INTEL:
   9674 			switch (phy_model) {
   9675 			case MII_MODEL_INTEL_I82577:
   9676 				new_phytype = WMPHY_82577;
   9677 				break;
   9678 			case MII_MODEL_INTEL_I82579:
   9679 				new_phytype = WMPHY_82579;
   9680 				break;
   9681 			case MII_MODEL_INTEL_I217:
   9682 				new_phytype = WMPHY_I217;
   9683 				break;
   9684 			case MII_MODEL_INTEL_I82580:
   9685 			case MII_MODEL_INTEL_I350:
   9686 				new_phytype = WMPHY_82580;
   9687 				break;
   9688 			default:
   9689 				break;
   9690 			}
   9691 			break;
   9692 		case MII_OUI_yyINTEL:
   9693 			switch (phy_model) {
   9694 			case MII_MODEL_yyINTEL_I82562G:
   9695 			case MII_MODEL_yyINTEL_I82562EM:
   9696 			case MII_MODEL_yyINTEL_I82562ET:
   9697 				new_phytype = WMPHY_IFE;
   9698 				break;
   9699 			case MII_MODEL_yyINTEL_IGP01E1000:
   9700 				new_phytype = WMPHY_IGP;
   9701 				break;
   9702 			case MII_MODEL_yyINTEL_I82566:
   9703 				new_phytype = WMPHY_IGP_3;
   9704 				break;
   9705 			default:
   9706 				break;
   9707 			}
   9708 			break;
   9709 		default:
   9710 			break;
   9711 		}
   9712 		if (new_phytype == WMPHY_UNKNOWN)
   9713 			aprint_verbose_dev(dev,
   9714 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   9715 			    __func__, phy_oui, phy_model);
   9716 
   9717 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9718 		    && (sc->sc_phytype != new_phytype )) {
   9719 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9720 			    "was incorrect. PHY type from PHY ID = %u\n",
   9721 			    sc->sc_phytype, new_phytype);
   9722 		}
   9723 	}
   9724 
   9725 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9726 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9727 		/* SGMII */
   9728 		new_readreg = wm_sgmii_readreg;
   9729 		new_writereg = wm_sgmii_writereg;
   9730 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9731 		/* BM2 (phyaddr == 1) */
   9732 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9733 		    && (new_phytype != WMPHY_BM)
   9734 		    && (new_phytype != WMPHY_UNKNOWN))
   9735 			doubt_phytype = new_phytype;
   9736 		new_phytype = WMPHY_BM;
   9737 		new_readreg = wm_gmii_bm_readreg;
   9738 		new_writereg = wm_gmii_bm_writereg;
   9739 	} else if (sc->sc_type >= WM_T_PCH) {
   9740 		/* All PCH* use _hv_ */
   9741 		new_readreg = wm_gmii_hv_readreg;
   9742 		new_writereg = wm_gmii_hv_writereg;
   9743 	} else if (sc->sc_type >= WM_T_ICH8) {
   9744 		/* non-82567 ICH8, 9 and 10 */
   9745 		new_readreg = wm_gmii_i82544_readreg;
   9746 		new_writereg = wm_gmii_i82544_writereg;
   9747 	} else if (sc->sc_type >= WM_T_80003) {
   9748 		/* 80003 */
   9749 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9750 		    && (new_phytype != WMPHY_GG82563)
   9751 		    && (new_phytype != WMPHY_UNKNOWN))
   9752 			doubt_phytype = new_phytype;
   9753 		new_phytype = WMPHY_GG82563;
   9754 		new_readreg = wm_gmii_i80003_readreg;
   9755 		new_writereg = wm_gmii_i80003_writereg;
   9756 	} else if (sc->sc_type >= WM_T_I210) {
   9757 		/* I210 and I211 */
   9758 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9759 		    && (new_phytype != WMPHY_I210)
   9760 		    && (new_phytype != WMPHY_UNKNOWN))
   9761 			doubt_phytype = new_phytype;
   9762 		new_phytype = WMPHY_I210;
   9763 		new_readreg = wm_gmii_gs40g_readreg;
   9764 		new_writereg = wm_gmii_gs40g_writereg;
   9765 	} else if (sc->sc_type >= WM_T_82580) {
   9766 		/* 82580, I350 and I354 */
   9767 		new_readreg = wm_gmii_82580_readreg;
   9768 		new_writereg = wm_gmii_82580_writereg;
   9769 	} else if (sc->sc_type >= WM_T_82544) {
   9770 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9771 		new_readreg = wm_gmii_i82544_readreg;
   9772 		new_writereg = wm_gmii_i82544_writereg;
   9773 	} else {
   9774 		new_readreg = wm_gmii_i82543_readreg;
   9775 		new_writereg = wm_gmii_i82543_writereg;
   9776 	}
   9777 
   9778 	if (new_phytype == WMPHY_BM) {
   9779 		/* All BM use _bm_ */
   9780 		new_readreg = wm_gmii_bm_readreg;
   9781 		new_writereg = wm_gmii_bm_writereg;
   9782 	}
   9783 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9784 		/* All PCH* use _hv_ */
   9785 		new_readreg = wm_gmii_hv_readreg;
   9786 		new_writereg = wm_gmii_hv_writereg;
   9787 	}
   9788 
   9789 	/* Diag output */
   9790 	if (doubt_phytype != WMPHY_UNKNOWN)
   9791 		aprint_error_dev(dev, "Assumed new PHY type was "
   9792 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9793 		    new_phytype);
   9794 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9795 	    && (sc->sc_phytype != new_phytype ))
   9796 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9797 		    "was incorrect. New PHY type = %u\n",
   9798 		    sc->sc_phytype, new_phytype);
   9799 
   9800 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9801 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9802 
   9803 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9804 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9805 		    "function was incorrect.\n");
   9806 
   9807 	/* Update now */
   9808 	sc->sc_phytype = new_phytype;
   9809 	mii->mii_readreg = new_readreg;
   9810 	mii->mii_writereg = new_writereg;
   9811 	if (new_readreg == wm_gmii_hv_readreg) {
   9812 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   9813 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   9814 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   9815 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   9816 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   9817 	}
   9818 }
   9819 
   9820 /*
   9821  * wm_get_phy_id_82575:
   9822  *
   9823  * Return PHY ID. Return -1 if it failed.
   9824  */
   9825 static int
   9826 wm_get_phy_id_82575(struct wm_softc *sc)
   9827 {
   9828 	uint32_t reg;
   9829 	int phyid = -1;
   9830 
   9831 	/* XXX */
   9832 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9833 		return -1;
   9834 
   9835 	if (wm_sgmii_uses_mdio(sc)) {
   9836 		switch (sc->sc_type) {
   9837 		case WM_T_82575:
   9838 		case WM_T_82576:
   9839 			reg = CSR_READ(sc, WMREG_MDIC);
   9840 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9841 			break;
   9842 		case WM_T_82580:
   9843 		case WM_T_I350:
   9844 		case WM_T_I354:
   9845 		case WM_T_I210:
   9846 		case WM_T_I211:
   9847 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9848 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9849 			break;
   9850 		default:
   9851 			return -1;
   9852 		}
   9853 	}
   9854 
   9855 	return phyid;
   9856 }
   9857 
   9858 
   9859 /*
   9860  * wm_gmii_mediainit:
   9861  *
   9862  *	Initialize media for use on 1000BASE-T devices.
   9863  */
   9864 static void
   9865 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9866 {
   9867 	device_t dev = sc->sc_dev;
   9868 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9869 	struct mii_data *mii = &sc->sc_mii;
   9870 	uint32_t reg;
   9871 
   9872 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9873 		device_xname(sc->sc_dev), __func__));
   9874 
   9875 	/* We have GMII. */
   9876 	sc->sc_flags |= WM_F_HAS_MII;
   9877 
   9878 	if (sc->sc_type == WM_T_80003)
   9879 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9880 	else
   9881 		sc->sc_tipg = TIPG_1000T_DFLT;
   9882 
   9883 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9884 	if ((sc->sc_type == WM_T_82580)
   9885 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9886 	    || (sc->sc_type == WM_T_I211)) {
   9887 		reg = CSR_READ(sc, WMREG_PHPM);
   9888 		reg &= ~PHPM_GO_LINK_D;
   9889 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9890 	}
   9891 
   9892 	/*
   9893 	 * Let the chip set speed/duplex on its own based on
   9894 	 * signals from the PHY.
   9895 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9896 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9897 	 */
   9898 	sc->sc_ctrl |= CTRL_SLU;
   9899 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9900 
   9901 	/* Initialize our media structures and probe the GMII. */
   9902 	mii->mii_ifp = ifp;
   9903 
   9904 	mii->mii_statchg = wm_gmii_statchg;
   9905 
   9906 	/* get PHY control from SMBus to PCIe */
   9907 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9908 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9909 	    || (sc->sc_type == WM_T_PCH_CNP))
   9910 		wm_smbustopci(sc);
   9911 
   9912 	wm_gmii_reset(sc);
   9913 
   9914 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9915 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9916 	    wm_gmii_mediastatus);
   9917 
   9918 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9919 	    || (sc->sc_type == WM_T_82580)
   9920 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9921 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9922 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9923 			/* Attach only one port */
   9924 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9925 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9926 		} else {
   9927 			int i, id;
   9928 			uint32_t ctrl_ext;
   9929 
   9930 			id = wm_get_phy_id_82575(sc);
   9931 			if (id != -1) {
   9932 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9933 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9934 			}
   9935 			if ((id == -1)
   9936 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9937 				/* Power on sgmii phy if it is disabled */
   9938 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9939 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9940 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9941 				CSR_WRITE_FLUSH(sc);
   9942 				delay(300*1000); /* XXX too long */
   9943 
   9944 				/* from 1 to 8 */
   9945 				for (i = 1; i < 8; i++)
   9946 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9947 					    0xffffffff, i, MII_OFFSET_ANY,
   9948 					    MIIF_DOPAUSE);
   9949 
   9950 				/* restore previous sfp cage power state */
   9951 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9952 			}
   9953 		}
   9954 	} else
   9955 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9956 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9957 
   9958 	/*
   9959 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9960 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9961 	 */
   9962 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9963 		|| (sc->sc_type == WM_T_PCH_SPT)
   9964 		|| (sc->sc_type == WM_T_PCH_CNP))
   9965 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9966 		wm_set_mdio_slow_mode_hv(sc);
   9967 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9968 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9969 	}
   9970 
   9971 	/*
   9972 	 * (For ICH8 variants)
   9973 	 * If PHY detection failed, use BM's r/w function and retry.
   9974 	 */
   9975 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9976 		/* if failed, retry with *_bm_* */
   9977 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9978 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9979 		    sc->sc_phytype);
   9980 		sc->sc_phytype = WMPHY_BM;
   9981 		mii->mii_readreg = wm_gmii_bm_readreg;
   9982 		mii->mii_writereg = wm_gmii_bm_writereg;
   9983 
   9984 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9985 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9986 	}
   9987 
   9988 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9989 		/* Any PHY wasn't find */
   9990 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9991 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9992 		sc->sc_phytype = WMPHY_NONE;
   9993 	} else {
   9994 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9995 
   9996 		/*
   9997 		 * PHY Found! Check PHY type again by the second call of
   9998 		 * wm_gmii_setup_phytype.
   9999 		 */
   10000 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10001 		    child->mii_mpd_model);
   10002 
   10003 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10004 	}
   10005 }
   10006 
   10007 /*
   10008  * wm_gmii_mediachange:	[ifmedia interface function]
   10009  *
   10010  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10011  */
   10012 static int
   10013 wm_gmii_mediachange(struct ifnet *ifp)
   10014 {
   10015 	struct wm_softc *sc = ifp->if_softc;
   10016 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10017 	int rc;
   10018 
   10019 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10020 		device_xname(sc->sc_dev), __func__));
   10021 	if ((ifp->if_flags & IFF_UP) == 0)
   10022 		return 0;
   10023 
   10024 	/* Disable D0 LPLU. */
   10025 	wm_lplu_d0_disable(sc);
   10026 
   10027 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10028 	sc->sc_ctrl |= CTRL_SLU;
   10029 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10030 	    || (sc->sc_type > WM_T_82543)) {
   10031 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10032 	} else {
   10033 		sc->sc_ctrl &= ~CTRL_ASDE;
   10034 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10035 		if (ife->ifm_media & IFM_FDX)
   10036 			sc->sc_ctrl |= CTRL_FD;
   10037 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10038 		case IFM_10_T:
   10039 			sc->sc_ctrl |= CTRL_SPEED_10;
   10040 			break;
   10041 		case IFM_100_TX:
   10042 			sc->sc_ctrl |= CTRL_SPEED_100;
   10043 			break;
   10044 		case IFM_1000_T:
   10045 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10046 			break;
   10047 		default:
   10048 			panic("wm_gmii_mediachange: bad media 0x%x",
   10049 			    ife->ifm_media);
   10050 		}
   10051 	}
   10052 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10053 	CSR_WRITE_FLUSH(sc);
   10054 	if (sc->sc_type <= WM_T_82543)
   10055 		wm_gmii_reset(sc);
   10056 
   10057 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10058 		return 0;
   10059 	return rc;
   10060 }
   10061 
   10062 /*
   10063  * wm_gmii_mediastatus:	[ifmedia interface function]
   10064  *
   10065  *	Get the current interface media status on a 1000BASE-T device.
   10066  */
   10067 static void
   10068 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10069 {
   10070 	struct wm_softc *sc = ifp->if_softc;
   10071 
   10072 	ether_mediastatus(ifp, ifmr);
   10073 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10074 	    | sc->sc_flowflags;
   10075 }
   10076 
   10077 #define	MDI_IO		CTRL_SWDPIN(2)
   10078 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10079 #define	MDI_CLK		CTRL_SWDPIN(3)
   10080 
   10081 static void
   10082 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10083 {
   10084 	uint32_t i, v;
   10085 
   10086 	v = CSR_READ(sc, WMREG_CTRL);
   10087 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10088 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10089 
   10090 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10091 		if (data & i)
   10092 			v |= MDI_IO;
   10093 		else
   10094 			v &= ~MDI_IO;
   10095 		CSR_WRITE(sc, WMREG_CTRL, v);
   10096 		CSR_WRITE_FLUSH(sc);
   10097 		delay(10);
   10098 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10099 		CSR_WRITE_FLUSH(sc);
   10100 		delay(10);
   10101 		CSR_WRITE(sc, WMREG_CTRL, v);
   10102 		CSR_WRITE_FLUSH(sc);
   10103 		delay(10);
   10104 	}
   10105 }
   10106 
   10107 static uint32_t
   10108 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10109 {
   10110 	uint32_t v, i, data = 0;
   10111 
   10112 	v = CSR_READ(sc, WMREG_CTRL);
   10113 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10114 	v |= CTRL_SWDPIO(3);
   10115 
   10116 	CSR_WRITE(sc, WMREG_CTRL, v);
   10117 	CSR_WRITE_FLUSH(sc);
   10118 	delay(10);
   10119 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10120 	CSR_WRITE_FLUSH(sc);
   10121 	delay(10);
   10122 	CSR_WRITE(sc, WMREG_CTRL, v);
   10123 	CSR_WRITE_FLUSH(sc);
   10124 	delay(10);
   10125 
   10126 	for (i = 0; i < 16; i++) {
   10127 		data <<= 1;
   10128 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10129 		CSR_WRITE_FLUSH(sc);
   10130 		delay(10);
   10131 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10132 			data |= 1;
   10133 		CSR_WRITE(sc, WMREG_CTRL, v);
   10134 		CSR_WRITE_FLUSH(sc);
   10135 		delay(10);
   10136 	}
   10137 
   10138 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10139 	CSR_WRITE_FLUSH(sc);
   10140 	delay(10);
   10141 	CSR_WRITE(sc, WMREG_CTRL, v);
   10142 	CSR_WRITE_FLUSH(sc);
   10143 	delay(10);
   10144 
   10145 	return data;
   10146 }
   10147 
   10148 #undef MDI_IO
   10149 #undef MDI_DIR
   10150 #undef MDI_CLK
   10151 
   10152 /*
   10153  * wm_gmii_i82543_readreg:	[mii interface function]
   10154  *
   10155  *	Read a PHY register on the GMII (i82543 version).
   10156  */
   10157 static int
   10158 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   10159 {
   10160 	struct wm_softc *sc = device_private(dev);
   10161 	int rv;
   10162 
   10163 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10164 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10165 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10166 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   10167 
   10168 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   10169 		device_xname(dev), phy, reg, rv));
   10170 
   10171 	return rv;
   10172 }
   10173 
   10174 /*
   10175  * wm_gmii_i82543_writereg:	[mii interface function]
   10176  *
   10177  *	Write a PHY register on the GMII (i82543 version).
   10178  */
   10179 static void
   10180 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10181 {
   10182 	struct wm_softc *sc = device_private(dev);
   10183 
   10184 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10185 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10186 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10187 	    (MII_COMMAND_START << 30), 32);
   10188 }
   10189 
   10190 /*
   10191  * wm_gmii_mdic_readreg:	[mii interface function]
   10192  *
   10193  *	Read a PHY register on the GMII.
   10194  */
   10195 static int
   10196 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10197 {
   10198 	struct wm_softc *sc = device_private(dev);
   10199 	uint32_t mdic = 0;
   10200 	int i, rv;
   10201 
   10202 	if (reg > MII_ADDRMASK) {
   10203 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10204 		    __func__, sc->sc_phytype, reg);
   10205 		reg &= MII_ADDRMASK;
   10206 	}
   10207 
   10208 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10209 	    MDIC_REGADD(reg));
   10210 
   10211 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10212 		delay(50);
   10213 		mdic = CSR_READ(sc, WMREG_MDIC);
   10214 		if (mdic & MDIC_READY)
   10215 			break;
   10216 	}
   10217 
   10218 	if ((mdic & MDIC_READY) == 0) {
   10219 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10220 		    device_xname(dev), phy, reg);
   10221 		return 0;
   10222 	} else if (mdic & MDIC_E) {
   10223 #if 0 /* This is normal if no PHY is present. */
   10224 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10225 		    device_xname(dev), phy, reg);
   10226 #endif
   10227 		return 0;
   10228 	} else {
   10229 		rv = MDIC_DATA(mdic);
   10230 		if (rv == 0xffff)
   10231 			rv = 0;
   10232 	}
   10233 
   10234 	/*
   10235 	 * Allow some time after each MDIC transaction to avoid
   10236 	 * reading duplicate data in the next MDIC transaction.
   10237 	 */
   10238 	if (sc->sc_type == WM_T_PCH2)
   10239 		delay(100);
   10240 
   10241 	return rv;
   10242 }
   10243 
   10244 /*
   10245  * wm_gmii_mdic_writereg:	[mii interface function]
   10246  *
   10247  *	Write a PHY register on the GMII.
   10248  */
   10249 static void
   10250 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10251 {
   10252 	struct wm_softc *sc = device_private(dev);
   10253 	uint32_t mdic = 0;
   10254 	int i;
   10255 
   10256 	if (reg > MII_ADDRMASK) {
   10257 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10258 		    __func__, sc->sc_phytype, reg);
   10259 		reg &= MII_ADDRMASK;
   10260 	}
   10261 
   10262 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10263 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10264 
   10265 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10266 		delay(50);
   10267 		mdic = CSR_READ(sc, WMREG_MDIC);
   10268 		if (mdic & MDIC_READY)
   10269 			break;
   10270 	}
   10271 
   10272 	if ((mdic & MDIC_READY) == 0) {
   10273 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10274 		    device_xname(dev), phy, reg);
   10275 		return;
   10276 	} else if (mdic & MDIC_E) {
   10277 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10278 		    device_xname(dev), phy, reg);
   10279 		return;
   10280 	}
   10281 
   10282 	/*
   10283 	 * Allow some time after each MDIC transaction to avoid
   10284 	 * reading duplicate data in the next MDIC transaction.
   10285 	 */
   10286 	if (sc->sc_type == WM_T_PCH2)
   10287 		delay(100);
   10288 }
   10289 
   10290 /*
   10291  * wm_gmii_i82544_readreg:	[mii interface function]
   10292  *
   10293  *	Read a PHY register on the GMII.
   10294  */
   10295 static int
   10296 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10297 {
   10298 	struct wm_softc *sc = device_private(dev);
   10299 	uint16_t val;
   10300 
   10301 	if (sc->phy.acquire(sc)) {
   10302 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10303 		return 0;
   10304 	}
   10305 
   10306 	wm_gmii_i82544_readreg_locked(dev, phy, reg, &val);
   10307 
   10308 	sc->phy.release(sc);
   10309 
   10310 	return val;
   10311 }
   10312 
   10313 static int
   10314 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10315 {
   10316 	struct wm_softc *sc = device_private(dev);
   10317 
   10318 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10319 		switch (sc->sc_phytype) {
   10320 		case WMPHY_IGP:
   10321 		case WMPHY_IGP_2:
   10322 		case WMPHY_IGP_3:
   10323 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10324 			    reg);
   10325 			break;
   10326 		default:
   10327 #ifdef WM_DEBUG
   10328 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10329 			    __func__, sc->sc_phytype, reg);
   10330 #endif
   10331 			break;
   10332 		}
   10333 	}
   10334 
   10335 	*val = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10336 
   10337 	return 0;
   10338 }
   10339 
   10340 /*
   10341  * wm_gmii_i82544_writereg:	[mii interface function]
   10342  *
   10343  *	Write a PHY register on the GMII.
   10344  */
   10345 static void
   10346 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10347 {
   10348 	struct wm_softc *sc = device_private(dev);
   10349 
   10350 	if (sc->phy.acquire(sc)) {
   10351 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10352 		return;
   10353 	}
   10354 
   10355 	wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10356 	sc->phy.release(sc);
   10357 }
   10358 
   10359 static int
   10360 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10361 {
   10362 	struct wm_softc *sc = device_private(dev);
   10363 
   10364 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10365 		switch (sc->sc_phytype) {
   10366 		case WMPHY_IGP:
   10367 		case WMPHY_IGP_2:
   10368 		case WMPHY_IGP_3:
   10369 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10370 			    reg);
   10371 			break;
   10372 		default:
   10373 #ifdef WM_DEBUG
   10374 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10375 			    __func__, sc->sc_phytype, reg);
   10376 #endif
   10377 			break;
   10378 		}
   10379 	}
   10380 
   10381 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10382 
   10383 	return 0;
   10384 }
   10385 
   10386 /*
   10387  * wm_gmii_i80003_readreg:	[mii interface function]
   10388  *
   10389  *	Read a PHY register on the kumeran
   10390  * This could be handled by the PHY layer if we didn't have to lock the
   10391  * ressource ...
   10392  */
   10393 static int
   10394 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10395 {
   10396 	struct wm_softc *sc = device_private(dev);
   10397 	int page_select, temp;
   10398 	int rv;
   10399 
   10400 	if (phy != 1) /* only one PHY on kumeran bus */
   10401 		return 0;
   10402 
   10403 	if (sc->phy.acquire(sc)) {
   10404 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10405 		return 0;
   10406 	}
   10407 
   10408 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10409 		page_select = GG82563_PHY_PAGE_SELECT;
   10410 	else {
   10411 		/*
   10412 		 * Use Alternative Page Select register to access registers
   10413 		 * 30 and 31.
   10414 		 */
   10415 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10416 	}
   10417 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10418 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10419 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10420 		/*
   10421 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10422 		 * register.
   10423 		 */
   10424 		delay(200);
   10425 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10426 			device_printf(dev, "%s failed\n", __func__);
   10427 			rv = 0; /* XXX */
   10428 			goto out;
   10429 		}
   10430 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10431 		delay(200);
   10432 	} else
   10433 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10434 
   10435 out:
   10436 	sc->phy.release(sc);
   10437 	return rv;
   10438 }
   10439 
   10440 /*
   10441  * wm_gmii_i80003_writereg:	[mii interface function]
   10442  *
   10443  *	Write a PHY register on the kumeran.
   10444  * This could be handled by the PHY layer if we didn't have to lock the
   10445  * ressource ...
   10446  */
   10447 static void
   10448 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10449 {
   10450 	struct wm_softc *sc = device_private(dev);
   10451 	int page_select, temp;
   10452 
   10453 	if (phy != 1) /* only one PHY on kumeran bus */
   10454 		return;
   10455 
   10456 	if (sc->phy.acquire(sc)) {
   10457 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10458 		return;
   10459 	}
   10460 
   10461 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10462 		page_select = GG82563_PHY_PAGE_SELECT;
   10463 	else {
   10464 		/*
   10465 		 * Use Alternative Page Select register to access registers
   10466 		 * 30 and 31.
   10467 		 */
   10468 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10469 	}
   10470 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10471 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10472 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10473 		/*
   10474 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10475 		 * register.
   10476 		 */
   10477 		delay(200);
   10478 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10479 			device_printf(dev, "%s failed\n", __func__);
   10480 			goto out;
   10481 		}
   10482 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10483 		delay(200);
   10484 	} else
   10485 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10486 
   10487 out:
   10488 	sc->phy.release(sc);
   10489 }
   10490 
   10491 /*
   10492  * wm_gmii_bm_readreg:	[mii interface function]
   10493  *
   10494  *	Read a PHY register on the kumeran
   10495  * This could be handled by the PHY layer if we didn't have to lock the
   10496  * ressource ...
   10497  */
   10498 static int
   10499 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10500 {
   10501 	struct wm_softc *sc = device_private(dev);
   10502 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10503 	uint16_t val;
   10504 	int rv;
   10505 
   10506 	if (sc->phy.acquire(sc)) {
   10507 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10508 		return 0;
   10509 	}
   10510 
   10511 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10512 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10513 		    || (reg == 31)) ? 1 : phy;
   10514 	/* Page 800 works differently than the rest so it has its own func */
   10515 	if (page == BM_WUC_PAGE) {
   10516 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10517 		rv = val;
   10518 		goto release;
   10519 	}
   10520 
   10521 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10522 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10523 		    && (sc->sc_type != WM_T_82583))
   10524 			wm_gmii_mdic_writereg(dev, phy,
   10525 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10526 		else
   10527 			wm_gmii_mdic_writereg(dev, phy,
   10528 			    BME1000_PHY_PAGE_SELECT, page);
   10529 	}
   10530 
   10531 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10532 
   10533 release:
   10534 	sc->phy.release(sc);
   10535 	return rv;
   10536 }
   10537 
   10538 /*
   10539  * wm_gmii_bm_writereg:	[mii interface function]
   10540  *
   10541  *	Write a PHY register on the kumeran.
   10542  * This could be handled by the PHY layer if we didn't have to lock the
   10543  * ressource ...
   10544  */
   10545 static void
   10546 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10547 {
   10548 	struct wm_softc *sc = device_private(dev);
   10549 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10550 
   10551 	if (sc->phy.acquire(sc)) {
   10552 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10553 		return;
   10554 	}
   10555 
   10556 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10557 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10558 		    || (reg == 31)) ? 1 : phy;
   10559 	/* Page 800 works differently than the rest so it has its own func */
   10560 	if (page == BM_WUC_PAGE) {
   10561 		uint16_t tmp;
   10562 
   10563 		tmp = val;
   10564 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10565 		goto release;
   10566 	}
   10567 
   10568 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10569 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10570 		    && (sc->sc_type != WM_T_82583))
   10571 			wm_gmii_mdic_writereg(dev, phy,
   10572 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10573 		else
   10574 			wm_gmii_mdic_writereg(dev, phy,
   10575 			    BME1000_PHY_PAGE_SELECT, page);
   10576 	}
   10577 
   10578 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10579 
   10580 release:
   10581 	sc->phy.release(sc);
   10582 }
   10583 
   10584 static void
   10585 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10586 {
   10587 	struct wm_softc *sc = device_private(dev);
   10588 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10589 	uint16_t wuce, reg;
   10590 
   10591 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10592 		device_xname(dev), __func__));
   10593 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10594 	if (sc->sc_type == WM_T_PCH) {
   10595 		/* XXX e1000 driver do nothing... why? */
   10596 	}
   10597 
   10598 	/*
   10599 	 * 1) Enable PHY wakeup register first.
   10600 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10601 	 */
   10602 
   10603 	/* Set page 769 */
   10604 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10605 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10606 
   10607 	/* Read WUCE and save it */
   10608 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10609 
   10610 	reg = wuce | BM_WUC_ENABLE_BIT;
   10611 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10612 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10613 
   10614 	/* Select page 800 */
   10615 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10616 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10617 
   10618 	/*
   10619 	 * 2) Access PHY wakeup register.
   10620 	 * See e1000_access_phy_wakeup_reg_bm.
   10621 	 */
   10622 
   10623 	/* Write page 800 */
   10624 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10625 
   10626 	if (rd)
   10627 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10628 	else
   10629 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10630 
   10631 	/*
   10632 	 * 3) Disable PHY wakeup register.
   10633 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10634 	 */
   10635 	/* Set page 769 */
   10636 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10637 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10638 
   10639 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10640 }
   10641 
   10642 /*
   10643  * wm_gmii_hv_readreg:	[mii interface function]
   10644  *
   10645  *	Read a PHY register on the kumeran
   10646  * This could be handled by the PHY layer if we didn't have to lock the
   10647  * ressource ...
   10648  */
   10649 static int
   10650 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10651 {
   10652 	struct wm_softc *sc = device_private(dev);
   10653 	uint16_t val;
   10654 
   10655 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10656 		device_xname(dev), __func__));
   10657 	if (sc->phy.acquire(sc)) {
   10658 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10659 		return 0;
   10660 	}
   10661 
   10662 	wm_gmii_hv_readreg_locked(dev, phy, reg, &val);
   10663 	sc->phy.release(sc);
   10664 	return val;
   10665 }
   10666 
   10667 static int
   10668 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10669 {
   10670 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10671 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10672 
   10673 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10674 
   10675 	/* Page 800 works differently than the rest so it has its own func */
   10676 	if (page == BM_WUC_PAGE) {
   10677 		wm_access_phy_wakeup_reg_bm(dev, reg, val, 1);
   10678 		return 0;
   10679 	}
   10680 
   10681 	/*
   10682 	 * Lower than page 768 works differently than the rest so it has its
   10683 	 * own func
   10684 	 */
   10685 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10686 		printf("gmii_hv_readreg!!!\n");
   10687 		return 0;
   10688 	}
   10689 
   10690 	/*
   10691 	 * XXX I21[789] documents say that the SMBus Address register is at
   10692 	 * PHY address 01, Page 0 (not 768), Register 26.
   10693 	 */
   10694 	if (page == HV_INTC_FC_PAGE_START)
   10695 		page = 0;
   10696 
   10697 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10698 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10699 		    page << BME1000_PAGE_SHIFT);
   10700 	}
   10701 
   10702 	*val = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10703 	return 0;
   10704 }
   10705 
   10706 /*
   10707  * wm_gmii_hv_writereg:	[mii interface function]
   10708  *
   10709  *	Write a PHY register on the kumeran.
   10710  * This could be handled by the PHY layer if we didn't have to lock the
   10711  * ressource ...
   10712  */
   10713 static void
   10714 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10715 {
   10716 	struct wm_softc *sc = device_private(dev);
   10717 
   10718 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10719 		device_xname(dev), __func__));
   10720 
   10721 	if (sc->phy.acquire(sc)) {
   10722 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10723 		return;
   10724 	}
   10725 
   10726 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10727 	sc->phy.release(sc);
   10728 }
   10729 
   10730 static int
   10731 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10732 {
   10733 	struct wm_softc *sc = device_private(dev);
   10734 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10735 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10736 
   10737 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10738 
   10739 	/* Page 800 works differently than the rest so it has its own func */
   10740 	if (page == BM_WUC_PAGE) {
   10741 		uint16_t tmp;
   10742 
   10743 		tmp = val;
   10744 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10745 		return 0;
   10746 	}
   10747 
   10748 	/*
   10749 	 * Lower than page 768 works differently than the rest so it has its
   10750 	 * own func
   10751 	 */
   10752 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10753 		printf("gmii_hv_writereg!!!\n");
   10754 		return -1;
   10755 	}
   10756 
   10757 	{
   10758 		/*
   10759 		 * XXX I21[789] documents say that the SMBus Address register
   10760 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10761 		 */
   10762 		if (page == HV_INTC_FC_PAGE_START)
   10763 			page = 0;
   10764 
   10765 		/*
   10766 		 * XXX Workaround MDIO accesses being disabled after entering
   10767 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10768 		 * register is set)
   10769 		 */
   10770 		if (sc->sc_phytype == WMPHY_82578) {
   10771 			struct mii_softc *child;
   10772 
   10773 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10774 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10775 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10776 			    && ((val & (1 << 11)) != 0)) {
   10777 				printf("XXX need workaround\n");
   10778 			}
   10779 		}
   10780 
   10781 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10782 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10783 			    page << BME1000_PAGE_SHIFT);
   10784 		}
   10785 	}
   10786 
   10787 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10788 
   10789 	return 0;
   10790 }
   10791 
   10792 /*
   10793  * wm_gmii_82580_readreg:	[mii interface function]
   10794  *
   10795  *	Read a PHY register on the 82580 and I350.
   10796  * This could be handled by the PHY layer if we didn't have to lock the
   10797  * ressource ...
   10798  */
   10799 static int
   10800 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10801 {
   10802 	struct wm_softc *sc = device_private(dev);
   10803 	int rv;
   10804 
   10805 	if (sc->phy.acquire(sc) != 0) {
   10806 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10807 		return 0;
   10808 	}
   10809 
   10810 #ifdef DIAGNOSTIC
   10811 	if (reg > MII_ADDRMASK) {
   10812 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10813 		    __func__, sc->sc_phytype, reg);
   10814 		reg &= MII_ADDRMASK;
   10815 	}
   10816 #endif
   10817 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10818 
   10819 	sc->phy.release(sc);
   10820 	return rv;
   10821 }
   10822 
   10823 /*
   10824  * wm_gmii_82580_writereg:	[mii interface function]
   10825  *
   10826  *	Write a PHY register on the 82580 and I350.
   10827  * This could be handled by the PHY layer if we didn't have to lock the
   10828  * ressource ...
   10829  */
   10830 static void
   10831 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10832 {
   10833 	struct wm_softc *sc = device_private(dev);
   10834 
   10835 	if (sc->phy.acquire(sc) != 0) {
   10836 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10837 		return;
   10838 	}
   10839 
   10840 #ifdef DIAGNOSTIC
   10841 	if (reg > MII_ADDRMASK) {
   10842 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10843 		    __func__, sc->sc_phytype, reg);
   10844 		reg &= MII_ADDRMASK;
   10845 	}
   10846 #endif
   10847 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10848 
   10849 	sc->phy.release(sc);
   10850 }
   10851 
   10852 /*
   10853  * wm_gmii_gs40g_readreg:	[mii interface function]
   10854  *
   10855  *	Read a PHY register on the I2100 and I211.
   10856  * This could be handled by the PHY layer if we didn't have to lock the
   10857  * ressource ...
   10858  */
   10859 static int
   10860 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10861 {
   10862 	struct wm_softc *sc = device_private(dev);
   10863 	int page, offset;
   10864 	int rv;
   10865 
   10866 	/* Acquire semaphore */
   10867 	if (sc->phy.acquire(sc)) {
   10868 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10869 		return 0;
   10870 	}
   10871 
   10872 	/* Page select */
   10873 	page = reg >> GS40G_PAGE_SHIFT;
   10874 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10875 
   10876 	/* Read reg */
   10877 	offset = reg & GS40G_OFFSET_MASK;
   10878 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10879 
   10880 	sc->phy.release(sc);
   10881 	return rv;
   10882 }
   10883 
   10884 /*
   10885  * wm_gmii_gs40g_writereg:	[mii interface function]
   10886  *
   10887  *	Write a PHY register on the I210 and I211.
   10888  * This could be handled by the PHY layer if we didn't have to lock the
   10889  * ressource ...
   10890  */
   10891 static void
   10892 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10893 {
   10894 	struct wm_softc *sc = device_private(dev);
   10895 	int page, offset;
   10896 
   10897 	/* Acquire semaphore */
   10898 	if (sc->phy.acquire(sc)) {
   10899 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10900 		return;
   10901 	}
   10902 
   10903 	/* Page select */
   10904 	page = reg >> GS40G_PAGE_SHIFT;
   10905 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10906 
   10907 	/* Write reg */
   10908 	offset = reg & GS40G_OFFSET_MASK;
   10909 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10910 
   10911 	/* Release semaphore */
   10912 	sc->phy.release(sc);
   10913 }
   10914 
   10915 /*
   10916  * wm_gmii_statchg:	[mii interface function]
   10917  *
   10918  *	Callback from MII layer when media changes.
   10919  */
   10920 static void
   10921 wm_gmii_statchg(struct ifnet *ifp)
   10922 {
   10923 	struct wm_softc *sc = ifp->if_softc;
   10924 	struct mii_data *mii = &sc->sc_mii;
   10925 
   10926 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10927 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10928 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10929 
   10930 	/*
   10931 	 * Get flow control negotiation result.
   10932 	 */
   10933 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10934 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10935 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10936 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10937 	}
   10938 
   10939 	if (sc->sc_flowflags & IFM_FLOW) {
   10940 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10941 			sc->sc_ctrl |= CTRL_TFCE;
   10942 			sc->sc_fcrtl |= FCRTL_XONE;
   10943 		}
   10944 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10945 			sc->sc_ctrl |= CTRL_RFCE;
   10946 	}
   10947 
   10948 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10949 		DPRINTF(WM_DEBUG_LINK,
   10950 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10951 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10952 	} else {
   10953 		DPRINTF(WM_DEBUG_LINK,
   10954 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10955 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10956 	}
   10957 
   10958 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10959 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10960 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10961 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10962 	if (sc->sc_type == WM_T_80003) {
   10963 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10964 		case IFM_1000_T:
   10965 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10966 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10967 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10968 			break;
   10969 		default:
   10970 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10971 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10972 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10973 			break;
   10974 		}
   10975 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10976 	}
   10977 }
   10978 
   10979 /* kumeran related (80003, ICH* and PCH*) */
   10980 
   10981 /*
   10982  * wm_kmrn_readreg:
   10983  *
   10984  *	Read a kumeran register
   10985  */
   10986 static int
   10987 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10988 {
   10989 	int rv;
   10990 
   10991 	if (sc->sc_type == WM_T_80003)
   10992 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10993 	else
   10994 		rv = sc->phy.acquire(sc);
   10995 	if (rv != 0) {
   10996 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10997 		    __func__);
   10998 		return rv;
   10999 	}
   11000 
   11001 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11002 
   11003 	if (sc->sc_type == WM_T_80003)
   11004 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11005 	else
   11006 		sc->phy.release(sc);
   11007 
   11008 	return rv;
   11009 }
   11010 
   11011 static int
   11012 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11013 {
   11014 
   11015 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11016 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11017 	    KUMCTRLSTA_REN);
   11018 	CSR_WRITE_FLUSH(sc);
   11019 	delay(2);
   11020 
   11021 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11022 
   11023 	return 0;
   11024 }
   11025 
   11026 /*
   11027  * wm_kmrn_writereg:
   11028  *
   11029  *	Write a kumeran register
   11030  */
   11031 static int
   11032 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11033 {
   11034 	int rv;
   11035 
   11036 	if (sc->sc_type == WM_T_80003)
   11037 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11038 	else
   11039 		rv = sc->phy.acquire(sc);
   11040 	if (rv != 0) {
   11041 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11042 		    __func__);
   11043 		return rv;
   11044 	}
   11045 
   11046 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11047 
   11048 	if (sc->sc_type == WM_T_80003)
   11049 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11050 	else
   11051 		sc->phy.release(sc);
   11052 
   11053 	return rv;
   11054 }
   11055 
   11056 static int
   11057 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11058 {
   11059 
   11060 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11061 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11062 
   11063 	return 0;
   11064 }
   11065 
   11066 /* SGMII related */
   11067 
   11068 /*
   11069  * wm_sgmii_uses_mdio
   11070  *
   11071  * Check whether the transaction is to the internal PHY or the external
   11072  * MDIO interface. Return true if it's MDIO.
   11073  */
   11074 static bool
   11075 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11076 {
   11077 	uint32_t reg;
   11078 	bool ismdio = false;
   11079 
   11080 	switch (sc->sc_type) {
   11081 	case WM_T_82575:
   11082 	case WM_T_82576:
   11083 		reg = CSR_READ(sc, WMREG_MDIC);
   11084 		ismdio = ((reg & MDIC_DEST) != 0);
   11085 		break;
   11086 	case WM_T_82580:
   11087 	case WM_T_I350:
   11088 	case WM_T_I354:
   11089 	case WM_T_I210:
   11090 	case WM_T_I211:
   11091 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11092 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11093 		break;
   11094 	default:
   11095 		break;
   11096 	}
   11097 
   11098 	return ismdio;
   11099 }
   11100 
   11101 /*
   11102  * wm_sgmii_readreg:	[mii interface function]
   11103  *
   11104  *	Read a PHY register on the SGMII
   11105  * This could be handled by the PHY layer if we didn't have to lock the
   11106  * ressource ...
   11107  */
   11108 static int
   11109 wm_sgmii_readreg(device_t dev, int phy, int reg)
   11110 {
   11111 	struct wm_softc *sc = device_private(dev);
   11112 	uint32_t i2ccmd;
   11113 	int i, rv;
   11114 
   11115 	if (sc->phy.acquire(sc)) {
   11116 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11117 		return 0;
   11118 	}
   11119 
   11120 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11121 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11122 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11123 
   11124 	/* Poll the ready bit */
   11125 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11126 		delay(50);
   11127 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11128 		if (i2ccmd & I2CCMD_READY)
   11129 			break;
   11130 	}
   11131 	if ((i2ccmd & I2CCMD_READY) == 0)
   11132 		device_printf(dev, "I2CCMD Read did not complete\n");
   11133 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11134 		device_printf(dev, "I2CCMD Error bit set\n");
   11135 
   11136 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11137 
   11138 	sc->phy.release(sc);
   11139 	return rv;
   11140 }
   11141 
   11142 /*
   11143  * wm_sgmii_writereg:	[mii interface function]
   11144  *
   11145  *	Write a PHY register on the SGMII.
   11146  * This could be handled by the PHY layer if we didn't have to lock the
   11147  * ressource ...
   11148  */
   11149 static void
   11150 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   11151 {
   11152 	struct wm_softc *sc = device_private(dev);
   11153 	uint32_t i2ccmd;
   11154 	int i;
   11155 	int swapdata;
   11156 
   11157 	if (sc->phy.acquire(sc) != 0) {
   11158 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11159 		return;
   11160 	}
   11161 	/* Swap the data bytes for the I2C interface */
   11162 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11163 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11164 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11165 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11166 
   11167 	/* Poll the ready bit */
   11168 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11169 		delay(50);
   11170 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11171 		if (i2ccmd & I2CCMD_READY)
   11172 			break;
   11173 	}
   11174 	if ((i2ccmd & I2CCMD_READY) == 0)
   11175 		device_printf(dev, "I2CCMD Write did not complete\n");
   11176 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11177 		device_printf(dev, "I2CCMD Error bit set\n");
   11178 
   11179 	sc->phy.release(sc);
   11180 }
   11181 
   11182 /* TBI related */
   11183 
   11184 static bool
   11185 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11186 {
   11187 	bool sig;
   11188 
   11189 	sig = ctrl & CTRL_SWDPIN(1);
   11190 
   11191 	/*
   11192 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11193 	 * detect a signal, 1 if they don't.
   11194 	 */
   11195 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11196 		sig = !sig;
   11197 
   11198 	return sig;
   11199 }
   11200 
   11201 /*
   11202  * wm_tbi_mediainit:
   11203  *
   11204  *	Initialize media for use on 1000BASE-X devices.
   11205  */
   11206 static void
   11207 wm_tbi_mediainit(struct wm_softc *sc)
   11208 {
   11209 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11210 	const char *sep = "";
   11211 
   11212 	if (sc->sc_type < WM_T_82543)
   11213 		sc->sc_tipg = TIPG_WM_DFLT;
   11214 	else
   11215 		sc->sc_tipg = TIPG_LG_DFLT;
   11216 
   11217 	sc->sc_tbi_serdes_anegticks = 5;
   11218 
   11219 	/* Initialize our media structures */
   11220 	sc->sc_mii.mii_ifp = ifp;
   11221 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11222 
   11223 	if ((sc->sc_type >= WM_T_82575)
   11224 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11225 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11226 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11227 	else
   11228 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11229 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11230 
   11231 	/*
   11232 	 * SWD Pins:
   11233 	 *
   11234 	 *	0 = Link LED (output)
   11235 	 *	1 = Loss Of Signal (input)
   11236 	 */
   11237 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11238 
   11239 	/* XXX Perhaps this is only for TBI */
   11240 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11241 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11242 
   11243 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11244 		sc->sc_ctrl &= ~CTRL_LRST;
   11245 
   11246 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11247 
   11248 #define	ADD(ss, mm, dd)							\
   11249 do {									\
   11250 	aprint_normal("%s%s", sep, ss);					\
   11251 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11252 	sep = ", ";							\
   11253 } while (/*CONSTCOND*/0)
   11254 
   11255 	aprint_normal_dev(sc->sc_dev, "");
   11256 
   11257 	if (sc->sc_type == WM_T_I354) {
   11258 		uint32_t status;
   11259 
   11260 		status = CSR_READ(sc, WMREG_STATUS);
   11261 		if (((status & STATUS_2P5_SKU) != 0)
   11262 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11263 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11264 		} else
   11265 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11266 	} else if (sc->sc_type == WM_T_82545) {
   11267 		/* Only 82545 is LX (XXX except SFP) */
   11268 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11269 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11270 	} else {
   11271 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11272 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11273 	}
   11274 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11275 	aprint_normal("\n");
   11276 
   11277 #undef ADD
   11278 
   11279 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11280 }
   11281 
   11282 /*
   11283  * wm_tbi_mediachange:	[ifmedia interface function]
   11284  *
   11285  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11286  */
   11287 static int
   11288 wm_tbi_mediachange(struct ifnet *ifp)
   11289 {
   11290 	struct wm_softc *sc = ifp->if_softc;
   11291 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11292 	uint32_t status, ctrl;
   11293 	bool signal;
   11294 	int i;
   11295 
   11296 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11297 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11298 		/* XXX need some work for >= 82571 and < 82575 */
   11299 		if (sc->sc_type < WM_T_82575)
   11300 			return 0;
   11301 	}
   11302 
   11303 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11304 	    || (sc->sc_type >= WM_T_82575))
   11305 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11306 
   11307 	sc->sc_ctrl &= ~CTRL_LRST;
   11308 	sc->sc_txcw = TXCW_ANE;
   11309 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11310 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11311 	else if (ife->ifm_media & IFM_FDX)
   11312 		sc->sc_txcw |= TXCW_FD;
   11313 	else
   11314 		sc->sc_txcw |= TXCW_HD;
   11315 
   11316 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11317 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11318 
   11319 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11320 		device_xname(sc->sc_dev), sc->sc_txcw));
   11321 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11322 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11323 	CSR_WRITE_FLUSH(sc);
   11324 	delay(1000);
   11325 
   11326 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11327 	signal = wm_tbi_havesignal(sc, ctrl);
   11328 
   11329 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11330 		signal));
   11331 
   11332 	if (signal) {
   11333 		/* Have signal; wait for the link to come up. */
   11334 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11335 			delay(10000);
   11336 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11337 				break;
   11338 		}
   11339 
   11340 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11341 			device_xname(sc->sc_dev),i));
   11342 
   11343 		status = CSR_READ(sc, WMREG_STATUS);
   11344 		DPRINTF(WM_DEBUG_LINK,
   11345 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11346 			device_xname(sc->sc_dev),status, STATUS_LU));
   11347 		if (status & STATUS_LU) {
   11348 			/* Link is up. */
   11349 			DPRINTF(WM_DEBUG_LINK,
   11350 			    ("%s: LINK: set media -> link up %s\n",
   11351 				device_xname(sc->sc_dev),
   11352 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11353 
   11354 			/*
   11355 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11356 			 * so we should update sc->sc_ctrl
   11357 			 */
   11358 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11359 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11360 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11361 			if (status & STATUS_FD)
   11362 				sc->sc_tctl |=
   11363 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11364 			else
   11365 				sc->sc_tctl |=
   11366 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11367 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11368 				sc->sc_fcrtl |= FCRTL_XONE;
   11369 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11370 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11371 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11372 			sc->sc_tbi_linkup = 1;
   11373 		} else {
   11374 			if (i == WM_LINKUP_TIMEOUT)
   11375 				wm_check_for_link(sc);
   11376 			/* Link is down. */
   11377 			DPRINTF(WM_DEBUG_LINK,
   11378 			    ("%s: LINK: set media -> link down\n",
   11379 				device_xname(sc->sc_dev)));
   11380 			sc->sc_tbi_linkup = 0;
   11381 		}
   11382 	} else {
   11383 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11384 			device_xname(sc->sc_dev)));
   11385 		sc->sc_tbi_linkup = 0;
   11386 	}
   11387 
   11388 	wm_tbi_serdes_set_linkled(sc);
   11389 
   11390 	return 0;
   11391 }
   11392 
   11393 /*
   11394  * wm_tbi_mediastatus:	[ifmedia interface function]
   11395  *
   11396  *	Get the current interface media status on a 1000BASE-X device.
   11397  */
   11398 static void
   11399 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11400 {
   11401 	struct wm_softc *sc = ifp->if_softc;
   11402 	uint32_t ctrl, status;
   11403 
   11404 	ifmr->ifm_status = IFM_AVALID;
   11405 	ifmr->ifm_active = IFM_ETHER;
   11406 
   11407 	status = CSR_READ(sc, WMREG_STATUS);
   11408 	if ((status & STATUS_LU) == 0) {
   11409 		ifmr->ifm_active |= IFM_NONE;
   11410 		return;
   11411 	}
   11412 
   11413 	ifmr->ifm_status |= IFM_ACTIVE;
   11414 	/* Only 82545 is LX */
   11415 	if (sc->sc_type == WM_T_82545)
   11416 		ifmr->ifm_active |= IFM_1000_LX;
   11417 	else
   11418 		ifmr->ifm_active |= IFM_1000_SX;
   11419 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11420 		ifmr->ifm_active |= IFM_FDX;
   11421 	else
   11422 		ifmr->ifm_active |= IFM_HDX;
   11423 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11424 	if (ctrl & CTRL_RFCE)
   11425 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11426 	if (ctrl & CTRL_TFCE)
   11427 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11428 }
   11429 
   11430 /* XXX TBI only */
   11431 static int
   11432 wm_check_for_link(struct wm_softc *sc)
   11433 {
   11434 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11435 	uint32_t rxcw;
   11436 	uint32_t ctrl;
   11437 	uint32_t status;
   11438 	bool signal;
   11439 
   11440 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11441 		device_xname(sc->sc_dev), __func__));
   11442 
   11443 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11444 		/* XXX need some work for >= 82571 */
   11445 		if (sc->sc_type >= WM_T_82571) {
   11446 			sc->sc_tbi_linkup = 1;
   11447 			return 0;
   11448 		}
   11449 	}
   11450 
   11451 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11452 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11453 	status = CSR_READ(sc, WMREG_STATUS);
   11454 	signal = wm_tbi_havesignal(sc, ctrl);
   11455 
   11456 	DPRINTF(WM_DEBUG_LINK,
   11457 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11458 		device_xname(sc->sc_dev), __func__, signal,
   11459 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11460 
   11461 	/*
   11462 	 * SWDPIN   LU RXCW
   11463 	 *	0    0	  0
   11464 	 *	0    0	  1	(should not happen)
   11465 	 *	0    1	  0	(should not happen)
   11466 	 *	0    1	  1	(should not happen)
   11467 	 *	1    0	  0	Disable autonego and force linkup
   11468 	 *	1    0	  1	got /C/ but not linkup yet
   11469 	 *	1    1	  0	(linkup)
   11470 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11471 	 *
   11472 	 */
   11473 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11474 		DPRINTF(WM_DEBUG_LINK,
   11475 		    ("%s: %s: force linkup and fullduplex\n",
   11476 			device_xname(sc->sc_dev), __func__));
   11477 		sc->sc_tbi_linkup = 0;
   11478 		/* Disable auto-negotiation in the TXCW register */
   11479 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11480 
   11481 		/*
   11482 		 * Force link-up and also force full-duplex.
   11483 		 *
   11484 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11485 		 * so we should update sc->sc_ctrl
   11486 		 */
   11487 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11488 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11489 	} else if (((status & STATUS_LU) != 0)
   11490 	    && ((rxcw & RXCW_C) != 0)
   11491 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11492 		sc->sc_tbi_linkup = 1;
   11493 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11494 			device_xname(sc->sc_dev),
   11495 			__func__));
   11496 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11497 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11498 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11499 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11500 			device_xname(sc->sc_dev), __func__));
   11501 	} else {
   11502 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11503 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11504 			status));
   11505 	}
   11506 
   11507 	return 0;
   11508 }
   11509 
   11510 /*
   11511  * wm_tbi_tick:
   11512  *
   11513  *	Check the link on TBI devices.
   11514  *	This function acts as mii_tick().
   11515  */
   11516 static void
   11517 wm_tbi_tick(struct wm_softc *sc)
   11518 {
   11519 	struct mii_data *mii = &sc->sc_mii;
   11520 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11521 	uint32_t status;
   11522 
   11523 	KASSERT(WM_CORE_LOCKED(sc));
   11524 
   11525 	status = CSR_READ(sc, WMREG_STATUS);
   11526 
   11527 	/* XXX is this needed? */
   11528 	(void)CSR_READ(sc, WMREG_RXCW);
   11529 	(void)CSR_READ(sc, WMREG_CTRL);
   11530 
   11531 	/* set link status */
   11532 	if ((status & STATUS_LU) == 0) {
   11533 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11534 			device_xname(sc->sc_dev)));
   11535 		sc->sc_tbi_linkup = 0;
   11536 	} else if (sc->sc_tbi_linkup == 0) {
   11537 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11538 			device_xname(sc->sc_dev),
   11539 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11540 		sc->sc_tbi_linkup = 1;
   11541 		sc->sc_tbi_serdes_ticks = 0;
   11542 	}
   11543 
   11544 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11545 		goto setled;
   11546 
   11547 	if ((status & STATUS_LU) == 0) {
   11548 		sc->sc_tbi_linkup = 0;
   11549 		/* If the timer expired, retry autonegotiation */
   11550 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11551 		    && (++sc->sc_tbi_serdes_ticks
   11552 			>= sc->sc_tbi_serdes_anegticks)) {
   11553 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11554 			sc->sc_tbi_serdes_ticks = 0;
   11555 			/*
   11556 			 * Reset the link, and let autonegotiation do
   11557 			 * its thing
   11558 			 */
   11559 			sc->sc_ctrl |= CTRL_LRST;
   11560 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11561 			CSR_WRITE_FLUSH(sc);
   11562 			delay(1000);
   11563 			sc->sc_ctrl &= ~CTRL_LRST;
   11564 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11565 			CSR_WRITE_FLUSH(sc);
   11566 			delay(1000);
   11567 			CSR_WRITE(sc, WMREG_TXCW,
   11568 			    sc->sc_txcw & ~TXCW_ANE);
   11569 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11570 		}
   11571 	}
   11572 
   11573 setled:
   11574 	wm_tbi_serdes_set_linkled(sc);
   11575 }
   11576 
   11577 /* SERDES related */
   11578 static void
   11579 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11580 {
   11581 	uint32_t reg;
   11582 
   11583 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11584 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11585 		return;
   11586 
   11587 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11588 	reg |= PCS_CFG_PCS_EN;
   11589 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11590 
   11591 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11592 	reg &= ~CTRL_EXT_SWDPIN(3);
   11593 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11594 	CSR_WRITE_FLUSH(sc);
   11595 }
   11596 
   11597 static int
   11598 wm_serdes_mediachange(struct ifnet *ifp)
   11599 {
   11600 	struct wm_softc *sc = ifp->if_softc;
   11601 	bool pcs_autoneg = true; /* XXX */
   11602 	uint32_t ctrl_ext, pcs_lctl, reg;
   11603 
   11604 	/* XXX Currently, this function is not called on 8257[12] */
   11605 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11606 	    || (sc->sc_type >= WM_T_82575))
   11607 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11608 
   11609 	wm_serdes_power_up_link_82575(sc);
   11610 
   11611 	sc->sc_ctrl |= CTRL_SLU;
   11612 
   11613 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11614 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11615 
   11616 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11617 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11618 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11619 	case CTRL_EXT_LINK_MODE_SGMII:
   11620 		pcs_autoneg = true;
   11621 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11622 		break;
   11623 	case CTRL_EXT_LINK_MODE_1000KX:
   11624 		pcs_autoneg = false;
   11625 		/* FALLTHROUGH */
   11626 	default:
   11627 		if ((sc->sc_type == WM_T_82575)
   11628 		    || (sc->sc_type == WM_T_82576)) {
   11629 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11630 				pcs_autoneg = false;
   11631 		}
   11632 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11633 		    | CTRL_FRCFDX;
   11634 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11635 	}
   11636 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11637 
   11638 	if (pcs_autoneg) {
   11639 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11640 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11641 
   11642 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11643 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11644 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11645 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11646 	} else
   11647 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11648 
   11649 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11650 
   11651 
   11652 	return 0;
   11653 }
   11654 
   11655 static void
   11656 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11657 {
   11658 	struct wm_softc *sc = ifp->if_softc;
   11659 	struct mii_data *mii = &sc->sc_mii;
   11660 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11661 	uint32_t pcs_adv, pcs_lpab, reg;
   11662 
   11663 	ifmr->ifm_status = IFM_AVALID;
   11664 	ifmr->ifm_active = IFM_ETHER;
   11665 
   11666 	/* Check PCS */
   11667 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11668 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11669 		ifmr->ifm_active |= IFM_NONE;
   11670 		sc->sc_tbi_linkup = 0;
   11671 		goto setled;
   11672 	}
   11673 
   11674 	sc->sc_tbi_linkup = 1;
   11675 	ifmr->ifm_status |= IFM_ACTIVE;
   11676 	if (sc->sc_type == WM_T_I354) {
   11677 		uint32_t status;
   11678 
   11679 		status = CSR_READ(sc, WMREG_STATUS);
   11680 		if (((status & STATUS_2P5_SKU) != 0)
   11681 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11682 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11683 		} else
   11684 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11685 	} else {
   11686 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11687 		case PCS_LSTS_SPEED_10:
   11688 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11689 			break;
   11690 		case PCS_LSTS_SPEED_100:
   11691 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11692 			break;
   11693 		case PCS_LSTS_SPEED_1000:
   11694 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11695 			break;
   11696 		default:
   11697 			device_printf(sc->sc_dev, "Unknown speed\n");
   11698 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11699 			break;
   11700 		}
   11701 	}
   11702 	if ((reg & PCS_LSTS_FDX) != 0)
   11703 		ifmr->ifm_active |= IFM_FDX;
   11704 	else
   11705 		ifmr->ifm_active |= IFM_HDX;
   11706 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11707 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11708 		/* Check flow */
   11709 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11710 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11711 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11712 			goto setled;
   11713 		}
   11714 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11715 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11716 		DPRINTF(WM_DEBUG_LINK,
   11717 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11718 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11719 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11720 			mii->mii_media_active |= IFM_FLOW
   11721 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11722 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11723 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11724 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11725 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11726 			mii->mii_media_active |= IFM_FLOW
   11727 			    | IFM_ETH_TXPAUSE;
   11728 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11729 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11730 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11731 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11732 			mii->mii_media_active |= IFM_FLOW
   11733 			    | IFM_ETH_RXPAUSE;
   11734 		}
   11735 	}
   11736 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11737 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11738 setled:
   11739 	wm_tbi_serdes_set_linkled(sc);
   11740 }
   11741 
   11742 /*
   11743  * wm_serdes_tick:
   11744  *
   11745  *	Check the link on serdes devices.
   11746  */
   11747 static void
   11748 wm_serdes_tick(struct wm_softc *sc)
   11749 {
   11750 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11751 	struct mii_data *mii = &sc->sc_mii;
   11752 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11753 	uint32_t reg;
   11754 
   11755 	KASSERT(WM_CORE_LOCKED(sc));
   11756 
   11757 	mii->mii_media_status = IFM_AVALID;
   11758 	mii->mii_media_active = IFM_ETHER;
   11759 
   11760 	/* Check PCS */
   11761 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11762 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11763 		mii->mii_media_status |= IFM_ACTIVE;
   11764 		sc->sc_tbi_linkup = 1;
   11765 		sc->sc_tbi_serdes_ticks = 0;
   11766 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11767 		if ((reg & PCS_LSTS_FDX) != 0)
   11768 			mii->mii_media_active |= IFM_FDX;
   11769 		else
   11770 			mii->mii_media_active |= IFM_HDX;
   11771 	} else {
   11772 		mii->mii_media_status |= IFM_NONE;
   11773 		sc->sc_tbi_linkup = 0;
   11774 		/* If the timer expired, retry autonegotiation */
   11775 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11776 		    && (++sc->sc_tbi_serdes_ticks
   11777 			>= sc->sc_tbi_serdes_anegticks)) {
   11778 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11779 			sc->sc_tbi_serdes_ticks = 0;
   11780 			/* XXX */
   11781 			wm_serdes_mediachange(ifp);
   11782 		}
   11783 	}
   11784 
   11785 	wm_tbi_serdes_set_linkled(sc);
   11786 }
   11787 
   11788 /* SFP related */
   11789 
   11790 static int
   11791 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11792 {
   11793 	uint32_t i2ccmd;
   11794 	int i;
   11795 
   11796 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11797 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11798 
   11799 	/* Poll the ready bit */
   11800 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11801 		delay(50);
   11802 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11803 		if (i2ccmd & I2CCMD_READY)
   11804 			break;
   11805 	}
   11806 	if ((i2ccmd & I2CCMD_READY) == 0)
   11807 		return -1;
   11808 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11809 		return -1;
   11810 
   11811 	*data = i2ccmd & 0x00ff;
   11812 
   11813 	return 0;
   11814 }
   11815 
   11816 static uint32_t
   11817 wm_sfp_get_media_type(struct wm_softc *sc)
   11818 {
   11819 	uint32_t ctrl_ext;
   11820 	uint8_t val = 0;
   11821 	int timeout = 3;
   11822 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11823 	int rv = -1;
   11824 
   11825 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11826 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11827 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11828 	CSR_WRITE_FLUSH(sc);
   11829 
   11830 	/* Read SFP module data */
   11831 	while (timeout) {
   11832 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11833 		if (rv == 0)
   11834 			break;
   11835 		delay(100*1000); /* XXX too big */
   11836 		timeout--;
   11837 	}
   11838 	if (rv != 0)
   11839 		goto out;
   11840 	switch (val) {
   11841 	case SFF_SFP_ID_SFF:
   11842 		aprint_normal_dev(sc->sc_dev,
   11843 		    "Module/Connector soldered to board\n");
   11844 		break;
   11845 	case SFF_SFP_ID_SFP:
   11846 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11847 		break;
   11848 	case SFF_SFP_ID_UNKNOWN:
   11849 		goto out;
   11850 	default:
   11851 		break;
   11852 	}
   11853 
   11854 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11855 	if (rv != 0) {
   11856 		goto out;
   11857 	}
   11858 
   11859 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11860 		mediatype = WM_MEDIATYPE_SERDES;
   11861 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   11862 		sc->sc_flags |= WM_F_SGMII;
   11863 		mediatype = WM_MEDIATYPE_COPPER;
   11864 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   11865 		sc->sc_flags |= WM_F_SGMII;
   11866 		mediatype = WM_MEDIATYPE_SERDES;
   11867 	}
   11868 
   11869 out:
   11870 	/* Restore I2C interface setting */
   11871 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11872 
   11873 	return mediatype;
   11874 }
   11875 
   11876 /*
   11877  * NVM related.
   11878  * Microwire, SPI (w/wo EERD) and Flash.
   11879  */
   11880 
   11881 /* Both spi and uwire */
   11882 
   11883 /*
   11884  * wm_eeprom_sendbits:
   11885  *
   11886  *	Send a series of bits to the EEPROM.
   11887  */
   11888 static void
   11889 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11890 {
   11891 	uint32_t reg;
   11892 	int x;
   11893 
   11894 	reg = CSR_READ(sc, WMREG_EECD);
   11895 
   11896 	for (x = nbits; x > 0; x--) {
   11897 		if (bits & (1U << (x - 1)))
   11898 			reg |= EECD_DI;
   11899 		else
   11900 			reg &= ~EECD_DI;
   11901 		CSR_WRITE(sc, WMREG_EECD, reg);
   11902 		CSR_WRITE_FLUSH(sc);
   11903 		delay(2);
   11904 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11905 		CSR_WRITE_FLUSH(sc);
   11906 		delay(2);
   11907 		CSR_WRITE(sc, WMREG_EECD, reg);
   11908 		CSR_WRITE_FLUSH(sc);
   11909 		delay(2);
   11910 	}
   11911 }
   11912 
   11913 /*
   11914  * wm_eeprom_recvbits:
   11915  *
   11916  *	Receive a series of bits from the EEPROM.
   11917  */
   11918 static void
   11919 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11920 {
   11921 	uint32_t reg, val;
   11922 	int x;
   11923 
   11924 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11925 
   11926 	val = 0;
   11927 	for (x = nbits; x > 0; x--) {
   11928 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11929 		CSR_WRITE_FLUSH(sc);
   11930 		delay(2);
   11931 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11932 			val |= (1U << (x - 1));
   11933 		CSR_WRITE(sc, WMREG_EECD, reg);
   11934 		CSR_WRITE_FLUSH(sc);
   11935 		delay(2);
   11936 	}
   11937 	*valp = val;
   11938 }
   11939 
   11940 /* Microwire */
   11941 
   11942 /*
   11943  * wm_nvm_read_uwire:
   11944  *
   11945  *	Read a word from the EEPROM using the MicroWire protocol.
   11946  */
   11947 static int
   11948 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11949 {
   11950 	uint32_t reg, val;
   11951 	int i;
   11952 
   11953 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11954 		device_xname(sc->sc_dev), __func__));
   11955 
   11956 	if (sc->nvm.acquire(sc) != 0)
   11957 		return -1;
   11958 
   11959 	for (i = 0; i < wordcnt; i++) {
   11960 		/* Clear SK and DI. */
   11961 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11962 		CSR_WRITE(sc, WMREG_EECD, reg);
   11963 
   11964 		/*
   11965 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11966 		 * and Xen.
   11967 		 *
   11968 		 * We use this workaround only for 82540 because qemu's
   11969 		 * e1000 act as 82540.
   11970 		 */
   11971 		if (sc->sc_type == WM_T_82540) {
   11972 			reg |= EECD_SK;
   11973 			CSR_WRITE(sc, WMREG_EECD, reg);
   11974 			reg &= ~EECD_SK;
   11975 			CSR_WRITE(sc, WMREG_EECD, reg);
   11976 			CSR_WRITE_FLUSH(sc);
   11977 			delay(2);
   11978 		}
   11979 		/* XXX: end of workaround */
   11980 
   11981 		/* Set CHIP SELECT. */
   11982 		reg |= EECD_CS;
   11983 		CSR_WRITE(sc, WMREG_EECD, reg);
   11984 		CSR_WRITE_FLUSH(sc);
   11985 		delay(2);
   11986 
   11987 		/* Shift in the READ command. */
   11988 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11989 
   11990 		/* Shift in address. */
   11991 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11992 
   11993 		/* Shift out the data. */
   11994 		wm_eeprom_recvbits(sc, &val, 16);
   11995 		data[i] = val & 0xffff;
   11996 
   11997 		/* Clear CHIP SELECT. */
   11998 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11999 		CSR_WRITE(sc, WMREG_EECD, reg);
   12000 		CSR_WRITE_FLUSH(sc);
   12001 		delay(2);
   12002 	}
   12003 
   12004 	sc->nvm.release(sc);
   12005 	return 0;
   12006 }
   12007 
   12008 /* SPI */
   12009 
   12010 /*
   12011  * Set SPI and FLASH related information from the EECD register.
   12012  * For 82541 and 82547, the word size is taken from EEPROM.
   12013  */
   12014 static int
   12015 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12016 {
   12017 	int size;
   12018 	uint32_t reg;
   12019 	uint16_t data;
   12020 
   12021 	reg = CSR_READ(sc, WMREG_EECD);
   12022 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12023 
   12024 	/* Read the size of NVM from EECD by default */
   12025 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12026 	switch (sc->sc_type) {
   12027 	case WM_T_82541:
   12028 	case WM_T_82541_2:
   12029 	case WM_T_82547:
   12030 	case WM_T_82547_2:
   12031 		/* Set dummy value to access EEPROM */
   12032 		sc->sc_nvm_wordsize = 64;
   12033 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12034 			aprint_error_dev(sc->sc_dev,
   12035 			    "%s: failed to read EEPROM size\n", __func__);
   12036 		}
   12037 		reg = data;
   12038 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12039 		if (size == 0)
   12040 			size = 6; /* 64 word size */
   12041 		else
   12042 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12043 		break;
   12044 	case WM_T_80003:
   12045 	case WM_T_82571:
   12046 	case WM_T_82572:
   12047 	case WM_T_82573: /* SPI case */
   12048 	case WM_T_82574: /* SPI case */
   12049 	case WM_T_82583: /* SPI case */
   12050 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12051 		if (size > 14)
   12052 			size = 14;
   12053 		break;
   12054 	case WM_T_82575:
   12055 	case WM_T_82576:
   12056 	case WM_T_82580:
   12057 	case WM_T_I350:
   12058 	case WM_T_I354:
   12059 	case WM_T_I210:
   12060 	case WM_T_I211:
   12061 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12062 		if (size > 15)
   12063 			size = 15;
   12064 		break;
   12065 	default:
   12066 		aprint_error_dev(sc->sc_dev,
   12067 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12068 		return -1;
   12069 		break;
   12070 	}
   12071 
   12072 	sc->sc_nvm_wordsize = 1 << size;
   12073 
   12074 	return 0;
   12075 }
   12076 
   12077 /*
   12078  * wm_nvm_ready_spi:
   12079  *
   12080  *	Wait for a SPI EEPROM to be ready for commands.
   12081  */
   12082 static int
   12083 wm_nvm_ready_spi(struct wm_softc *sc)
   12084 {
   12085 	uint32_t val;
   12086 	int usec;
   12087 
   12088 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12089 		device_xname(sc->sc_dev), __func__));
   12090 
   12091 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12092 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12093 		wm_eeprom_recvbits(sc, &val, 8);
   12094 		if ((val & SPI_SR_RDY) == 0)
   12095 			break;
   12096 	}
   12097 	if (usec >= SPI_MAX_RETRIES) {
   12098 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12099 		return -1;
   12100 	}
   12101 	return 0;
   12102 }
   12103 
   12104 /*
   12105  * wm_nvm_read_spi:
   12106  *
   12107  *	Read a work from the EEPROM using the SPI protocol.
   12108  */
   12109 static int
   12110 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12111 {
   12112 	uint32_t reg, val;
   12113 	int i;
   12114 	uint8_t opc;
   12115 	int rv = 0;
   12116 
   12117 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12118 		device_xname(sc->sc_dev), __func__));
   12119 
   12120 	if (sc->nvm.acquire(sc) != 0)
   12121 		return -1;
   12122 
   12123 	/* Clear SK and CS. */
   12124 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12125 	CSR_WRITE(sc, WMREG_EECD, reg);
   12126 	CSR_WRITE_FLUSH(sc);
   12127 	delay(2);
   12128 
   12129 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12130 		goto out;
   12131 
   12132 	/* Toggle CS to flush commands. */
   12133 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12134 	CSR_WRITE_FLUSH(sc);
   12135 	delay(2);
   12136 	CSR_WRITE(sc, WMREG_EECD, reg);
   12137 	CSR_WRITE_FLUSH(sc);
   12138 	delay(2);
   12139 
   12140 	opc = SPI_OPC_READ;
   12141 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12142 		opc |= SPI_OPC_A8;
   12143 
   12144 	wm_eeprom_sendbits(sc, opc, 8);
   12145 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12146 
   12147 	for (i = 0; i < wordcnt; i++) {
   12148 		wm_eeprom_recvbits(sc, &val, 16);
   12149 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12150 	}
   12151 
   12152 	/* Raise CS and clear SK. */
   12153 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12154 	CSR_WRITE(sc, WMREG_EECD, reg);
   12155 	CSR_WRITE_FLUSH(sc);
   12156 	delay(2);
   12157 
   12158 out:
   12159 	sc->nvm.release(sc);
   12160 	return rv;
   12161 }
   12162 
   12163 /* Using with EERD */
   12164 
   12165 static int
   12166 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12167 {
   12168 	uint32_t attempts = 100000;
   12169 	uint32_t i, reg = 0;
   12170 	int32_t done = -1;
   12171 
   12172 	for (i = 0; i < attempts; i++) {
   12173 		reg = CSR_READ(sc, rw);
   12174 
   12175 		if (reg & EERD_DONE) {
   12176 			done = 0;
   12177 			break;
   12178 		}
   12179 		delay(5);
   12180 	}
   12181 
   12182 	return done;
   12183 }
   12184 
   12185 static int
   12186 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12187 {
   12188 	int i, eerd = 0;
   12189 	int rv = 0;
   12190 
   12191 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12192 		device_xname(sc->sc_dev), __func__));
   12193 
   12194 	if (sc->nvm.acquire(sc) != 0)
   12195 		return -1;
   12196 
   12197 	for (i = 0; i < wordcnt; i++) {
   12198 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12199 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12200 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12201 		if (rv != 0) {
   12202 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12203 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12204 			break;
   12205 		}
   12206 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12207 	}
   12208 
   12209 	sc->nvm.release(sc);
   12210 	return rv;
   12211 }
   12212 
   12213 /* Flash */
   12214 
   12215 static int
   12216 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12217 {
   12218 	uint32_t eecd;
   12219 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12220 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12221 	uint32_t nvm_dword = 0;
   12222 	uint8_t sig_byte = 0;
   12223 	int rv;
   12224 
   12225 	switch (sc->sc_type) {
   12226 	case WM_T_PCH_SPT:
   12227 	case WM_T_PCH_CNP:
   12228 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12229 		act_offset = ICH_NVM_SIG_WORD * 2;
   12230 
   12231 		/* set bank to 0 in case flash read fails. */
   12232 		*bank = 0;
   12233 
   12234 		/* Check bank 0 */
   12235 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12236 		if (rv != 0)
   12237 			return rv;
   12238 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12239 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12240 			*bank = 0;
   12241 			return 0;
   12242 		}
   12243 
   12244 		/* Check bank 1 */
   12245 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12246 		    &nvm_dword);
   12247 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12248 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12249 			*bank = 1;
   12250 			return 0;
   12251 		}
   12252 		aprint_error_dev(sc->sc_dev,
   12253 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12254 		return -1;
   12255 	case WM_T_ICH8:
   12256 	case WM_T_ICH9:
   12257 		eecd = CSR_READ(sc, WMREG_EECD);
   12258 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12259 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12260 			return 0;
   12261 		}
   12262 		/* FALLTHROUGH */
   12263 	default:
   12264 		/* Default to 0 */
   12265 		*bank = 0;
   12266 
   12267 		/* Check bank 0 */
   12268 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12269 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12270 			*bank = 0;
   12271 			return 0;
   12272 		}
   12273 
   12274 		/* Check bank 1 */
   12275 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12276 		    &sig_byte);
   12277 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12278 			*bank = 1;
   12279 			return 0;
   12280 		}
   12281 	}
   12282 
   12283 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12284 		device_xname(sc->sc_dev)));
   12285 	return -1;
   12286 }
   12287 
   12288 /******************************************************************************
   12289  * This function does initial flash setup so that a new read/write/erase cycle
   12290  * can be started.
   12291  *
   12292  * sc - The pointer to the hw structure
   12293  ****************************************************************************/
   12294 static int32_t
   12295 wm_ich8_cycle_init(struct wm_softc *sc)
   12296 {
   12297 	uint16_t hsfsts;
   12298 	int32_t error = 1;
   12299 	int32_t i     = 0;
   12300 
   12301 	if (sc->sc_type >= WM_T_PCH_SPT)
   12302 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12303 	else
   12304 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12305 
   12306 	/* May be check the Flash Des Valid bit in Hw status */
   12307 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12308 		return error;
   12309 
   12310 	/* Clear FCERR in Hw status by writing 1 */
   12311 	/* Clear DAEL in Hw status by writing a 1 */
   12312 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12313 
   12314 	if (sc->sc_type >= WM_T_PCH_SPT)
   12315 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12316 	else
   12317 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12318 
   12319 	/*
   12320 	 * Either we should have a hardware SPI cycle in progress bit to check
   12321 	 * against, in order to start a new cycle or FDONE bit should be
   12322 	 * changed in the hardware so that it is 1 after harware reset, which
   12323 	 * can then be used as an indication whether a cycle is in progress or
   12324 	 * has been completed .. we should also have some software semaphore
   12325 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12326 	 * threads access to those bits can be sequentiallized or a way so that
   12327 	 * 2 threads dont start the cycle at the same time
   12328 	 */
   12329 
   12330 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12331 		/*
   12332 		 * There is no cycle running at present, so we can start a
   12333 		 * cycle
   12334 		 */
   12335 
   12336 		/* Begin by setting Flash Cycle Done. */
   12337 		hsfsts |= HSFSTS_DONE;
   12338 		if (sc->sc_type >= WM_T_PCH_SPT)
   12339 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12340 			    hsfsts & 0xffffUL);
   12341 		else
   12342 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12343 		error = 0;
   12344 	} else {
   12345 		/*
   12346 		 * otherwise poll for sometime so the current cycle has a
   12347 		 * chance to end before giving up.
   12348 		 */
   12349 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12350 			if (sc->sc_type >= WM_T_PCH_SPT)
   12351 				hsfsts = ICH8_FLASH_READ32(sc,
   12352 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12353 			else
   12354 				hsfsts = ICH8_FLASH_READ16(sc,
   12355 				    ICH_FLASH_HSFSTS);
   12356 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12357 				error = 0;
   12358 				break;
   12359 			}
   12360 			delay(1);
   12361 		}
   12362 		if (error == 0) {
   12363 			/*
   12364 			 * Successful in waiting for previous cycle to timeout,
   12365 			 * now set the Flash Cycle Done.
   12366 			 */
   12367 			hsfsts |= HSFSTS_DONE;
   12368 			if (sc->sc_type >= WM_T_PCH_SPT)
   12369 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12370 				    hsfsts & 0xffffUL);
   12371 			else
   12372 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12373 				    hsfsts);
   12374 		}
   12375 	}
   12376 	return error;
   12377 }
   12378 
   12379 /******************************************************************************
   12380  * This function starts a flash cycle and waits for its completion
   12381  *
   12382  * sc - The pointer to the hw structure
   12383  ****************************************************************************/
   12384 static int32_t
   12385 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12386 {
   12387 	uint16_t hsflctl;
   12388 	uint16_t hsfsts;
   12389 	int32_t error = 1;
   12390 	uint32_t i = 0;
   12391 
   12392 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12393 	if (sc->sc_type >= WM_T_PCH_SPT)
   12394 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12395 	else
   12396 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12397 	hsflctl |= HSFCTL_GO;
   12398 	if (sc->sc_type >= WM_T_PCH_SPT)
   12399 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12400 		    (uint32_t)hsflctl << 16);
   12401 	else
   12402 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12403 
   12404 	/* Wait till FDONE bit is set to 1 */
   12405 	do {
   12406 		if (sc->sc_type >= WM_T_PCH_SPT)
   12407 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12408 			    & 0xffffUL;
   12409 		else
   12410 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12411 		if (hsfsts & HSFSTS_DONE)
   12412 			break;
   12413 		delay(1);
   12414 		i++;
   12415 	} while (i < timeout);
   12416 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12417 		error = 0;
   12418 
   12419 	return error;
   12420 }
   12421 
   12422 /******************************************************************************
   12423  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12424  *
   12425  * sc - The pointer to the hw structure
   12426  * index - The index of the byte or word to read.
   12427  * size - Size of data to read, 1=byte 2=word, 4=dword
   12428  * data - Pointer to the word to store the value read.
   12429  *****************************************************************************/
   12430 static int32_t
   12431 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12432     uint32_t size, uint32_t *data)
   12433 {
   12434 	uint16_t hsfsts;
   12435 	uint16_t hsflctl;
   12436 	uint32_t flash_linear_address;
   12437 	uint32_t flash_data = 0;
   12438 	int32_t error = 1;
   12439 	int32_t count = 0;
   12440 
   12441 	if (size < 1  || size > 4 || data == 0x0 ||
   12442 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12443 		return error;
   12444 
   12445 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12446 	    sc->sc_ich8_flash_base;
   12447 
   12448 	do {
   12449 		delay(1);
   12450 		/* Steps */
   12451 		error = wm_ich8_cycle_init(sc);
   12452 		if (error)
   12453 			break;
   12454 
   12455 		if (sc->sc_type >= WM_T_PCH_SPT)
   12456 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12457 			    >> 16;
   12458 		else
   12459 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12460 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12461 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12462 		    & HSFCTL_BCOUNT_MASK;
   12463 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12464 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12465 			/*
   12466 			 * In SPT, This register is in Lan memory space, not
   12467 			 * flash. Therefore, only 32 bit access is supported.
   12468 			 */
   12469 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12470 			    (uint32_t)hsflctl << 16);
   12471 		} else
   12472 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12473 
   12474 		/*
   12475 		 * Write the last 24 bits of index into Flash Linear address
   12476 		 * field in Flash Address
   12477 		 */
   12478 		/* TODO: TBD maybe check the index against the size of flash */
   12479 
   12480 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12481 
   12482 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12483 
   12484 		/*
   12485 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12486 		 * the whole sequence a few more times, else read in (shift in)
   12487 		 * the Flash Data0, the order is least significant byte first
   12488 		 * msb to lsb
   12489 		 */
   12490 		if (error == 0) {
   12491 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12492 			if (size == 1)
   12493 				*data = (uint8_t)(flash_data & 0x000000FF);
   12494 			else if (size == 2)
   12495 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12496 			else if (size == 4)
   12497 				*data = (uint32_t)flash_data;
   12498 			break;
   12499 		} else {
   12500 			/*
   12501 			 * If we've gotten here, then things are probably
   12502 			 * completely hosed, but if the error condition is
   12503 			 * detected, it won't hurt to give it another try...
   12504 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12505 			 */
   12506 			if (sc->sc_type >= WM_T_PCH_SPT)
   12507 				hsfsts = ICH8_FLASH_READ32(sc,
   12508 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12509 			else
   12510 				hsfsts = ICH8_FLASH_READ16(sc,
   12511 				    ICH_FLASH_HSFSTS);
   12512 
   12513 			if (hsfsts & HSFSTS_ERR) {
   12514 				/* Repeat for some time before giving up. */
   12515 				continue;
   12516 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12517 				break;
   12518 		}
   12519 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12520 
   12521 	return error;
   12522 }
   12523 
   12524 /******************************************************************************
   12525  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12526  *
   12527  * sc - pointer to wm_hw structure
   12528  * index - The index of the byte to read.
   12529  * data - Pointer to a byte to store the value read.
   12530  *****************************************************************************/
   12531 static int32_t
   12532 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12533 {
   12534 	int32_t status;
   12535 	uint32_t word = 0;
   12536 
   12537 	status = wm_read_ich8_data(sc, index, 1, &word);
   12538 	if (status == 0)
   12539 		*data = (uint8_t)word;
   12540 	else
   12541 		*data = 0;
   12542 
   12543 	return status;
   12544 }
   12545 
   12546 /******************************************************************************
   12547  * Reads a word from the NVM using the ICH8 flash access registers.
   12548  *
   12549  * sc - pointer to wm_hw structure
   12550  * index - The starting byte index of the word to read.
   12551  * data - Pointer to a word to store the value read.
   12552  *****************************************************************************/
   12553 static int32_t
   12554 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12555 {
   12556 	int32_t status;
   12557 	uint32_t word = 0;
   12558 
   12559 	status = wm_read_ich8_data(sc, index, 2, &word);
   12560 	if (status == 0)
   12561 		*data = (uint16_t)word;
   12562 	else
   12563 		*data = 0;
   12564 
   12565 	return status;
   12566 }
   12567 
   12568 /******************************************************************************
   12569  * Reads a dword from the NVM using the ICH8 flash access registers.
   12570  *
   12571  * sc - pointer to wm_hw structure
   12572  * index - The starting byte index of the word to read.
   12573  * data - Pointer to a word to store the value read.
   12574  *****************************************************************************/
   12575 static int32_t
   12576 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12577 {
   12578 	int32_t status;
   12579 
   12580 	status = wm_read_ich8_data(sc, index, 4, data);
   12581 	return status;
   12582 }
   12583 
   12584 /******************************************************************************
   12585  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12586  * register.
   12587  *
   12588  * sc - Struct containing variables accessed by shared code
   12589  * offset - offset of word in the EEPROM to read
   12590  * data - word read from the EEPROM
   12591  * words - number of words to read
   12592  *****************************************************************************/
   12593 static int
   12594 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12595 {
   12596 	int32_t	 rv = 0;
   12597 	uint32_t flash_bank = 0;
   12598 	uint32_t act_offset = 0;
   12599 	uint32_t bank_offset = 0;
   12600 	uint16_t word = 0;
   12601 	uint16_t i = 0;
   12602 
   12603 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12604 		device_xname(sc->sc_dev), __func__));
   12605 
   12606 	if (sc->nvm.acquire(sc) != 0)
   12607 		return -1;
   12608 
   12609 	/*
   12610 	 * We need to know which is the valid flash bank.  In the event
   12611 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12612 	 * managing flash_bank. So it cannot be trusted and needs
   12613 	 * to be updated with each read.
   12614 	 */
   12615 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12616 	if (rv) {
   12617 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12618 			device_xname(sc->sc_dev)));
   12619 		flash_bank = 0;
   12620 	}
   12621 
   12622 	/*
   12623 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12624 	 * size
   12625 	 */
   12626 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12627 
   12628 	for (i = 0; i < words; i++) {
   12629 		/* The NVM part needs a byte offset, hence * 2 */
   12630 		act_offset = bank_offset + ((offset + i) * 2);
   12631 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12632 		if (rv) {
   12633 			aprint_error_dev(sc->sc_dev,
   12634 			    "%s: failed to read NVM\n", __func__);
   12635 			break;
   12636 		}
   12637 		data[i] = word;
   12638 	}
   12639 
   12640 	sc->nvm.release(sc);
   12641 	return rv;
   12642 }
   12643 
   12644 /******************************************************************************
   12645  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12646  * register.
   12647  *
   12648  * sc - Struct containing variables accessed by shared code
   12649  * offset - offset of word in the EEPROM to read
   12650  * data - word read from the EEPROM
   12651  * words - number of words to read
   12652  *****************************************************************************/
   12653 static int
   12654 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12655 {
   12656 	int32_t	 rv = 0;
   12657 	uint32_t flash_bank = 0;
   12658 	uint32_t act_offset = 0;
   12659 	uint32_t bank_offset = 0;
   12660 	uint32_t dword = 0;
   12661 	uint16_t i = 0;
   12662 
   12663 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12664 		device_xname(sc->sc_dev), __func__));
   12665 
   12666 	if (sc->nvm.acquire(sc) != 0)
   12667 		return -1;
   12668 
   12669 	/*
   12670 	 * We need to know which is the valid flash bank.  In the event
   12671 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12672 	 * managing flash_bank. So it cannot be trusted and needs
   12673 	 * to be updated with each read.
   12674 	 */
   12675 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12676 	if (rv) {
   12677 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12678 			device_xname(sc->sc_dev)));
   12679 		flash_bank = 0;
   12680 	}
   12681 
   12682 	/*
   12683 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12684 	 * size
   12685 	 */
   12686 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12687 
   12688 	for (i = 0; i < words; i++) {
   12689 		/* The NVM part needs a byte offset, hence * 2 */
   12690 		act_offset = bank_offset + ((offset + i) * 2);
   12691 		/* but we must read dword aligned, so mask ... */
   12692 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12693 		if (rv) {
   12694 			aprint_error_dev(sc->sc_dev,
   12695 			    "%s: failed to read NVM\n", __func__);
   12696 			break;
   12697 		}
   12698 		/* ... and pick out low or high word */
   12699 		if ((act_offset & 0x2) == 0)
   12700 			data[i] = (uint16_t)(dword & 0xFFFF);
   12701 		else
   12702 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12703 	}
   12704 
   12705 	sc->nvm.release(sc);
   12706 	return rv;
   12707 }
   12708 
   12709 /* iNVM */
   12710 
   12711 static int
   12712 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12713 {
   12714 	int32_t	 rv = 0;
   12715 	uint32_t invm_dword;
   12716 	uint16_t i;
   12717 	uint8_t record_type, word_address;
   12718 
   12719 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12720 		device_xname(sc->sc_dev), __func__));
   12721 
   12722 	for (i = 0; i < INVM_SIZE; i++) {
   12723 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12724 		/* Get record type */
   12725 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12726 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12727 			break;
   12728 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12729 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12730 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12731 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12732 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12733 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12734 			if (word_address == address) {
   12735 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12736 				rv = 0;
   12737 				break;
   12738 			}
   12739 		}
   12740 	}
   12741 
   12742 	return rv;
   12743 }
   12744 
   12745 static int
   12746 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12747 {
   12748 	int rv = 0;
   12749 	int i;
   12750 
   12751 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12752 		device_xname(sc->sc_dev), __func__));
   12753 
   12754 	if (sc->nvm.acquire(sc) != 0)
   12755 		return -1;
   12756 
   12757 	for (i = 0; i < words; i++) {
   12758 		switch (offset + i) {
   12759 		case NVM_OFF_MACADDR:
   12760 		case NVM_OFF_MACADDR1:
   12761 		case NVM_OFF_MACADDR2:
   12762 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12763 			if (rv != 0) {
   12764 				data[i] = 0xffff;
   12765 				rv = -1;
   12766 			}
   12767 			break;
   12768 		case NVM_OFF_CFG2:
   12769 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12770 			if (rv != 0) {
   12771 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12772 				rv = 0;
   12773 			}
   12774 			break;
   12775 		case NVM_OFF_CFG4:
   12776 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12777 			if (rv != 0) {
   12778 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12779 				rv = 0;
   12780 			}
   12781 			break;
   12782 		case NVM_OFF_LED_1_CFG:
   12783 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12784 			if (rv != 0) {
   12785 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12786 				rv = 0;
   12787 			}
   12788 			break;
   12789 		case NVM_OFF_LED_0_2_CFG:
   12790 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12791 			if (rv != 0) {
   12792 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12793 				rv = 0;
   12794 			}
   12795 			break;
   12796 		case NVM_OFF_ID_LED_SETTINGS:
   12797 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12798 			if (rv != 0) {
   12799 				*data = ID_LED_RESERVED_FFFF;
   12800 				rv = 0;
   12801 			}
   12802 			break;
   12803 		default:
   12804 			DPRINTF(WM_DEBUG_NVM,
   12805 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12806 			*data = NVM_RESERVED_WORD;
   12807 			break;
   12808 		}
   12809 	}
   12810 
   12811 	sc->nvm.release(sc);
   12812 	return rv;
   12813 }
   12814 
   12815 /* Lock, detecting NVM type, validate checksum, version and read */
   12816 
   12817 static int
   12818 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12819 {
   12820 	uint32_t eecd = 0;
   12821 
   12822 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12823 	    || sc->sc_type == WM_T_82583) {
   12824 		eecd = CSR_READ(sc, WMREG_EECD);
   12825 
   12826 		/* Isolate bits 15 & 16 */
   12827 		eecd = ((eecd >> 15) & 0x03);
   12828 
   12829 		/* If both bits are set, device is Flash type */
   12830 		if (eecd == 0x03)
   12831 			return 0;
   12832 	}
   12833 	return 1;
   12834 }
   12835 
   12836 static int
   12837 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12838 {
   12839 	uint32_t eec;
   12840 
   12841 	eec = CSR_READ(sc, WMREG_EEC);
   12842 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12843 		return 1;
   12844 
   12845 	return 0;
   12846 }
   12847 
   12848 /*
   12849  * wm_nvm_validate_checksum
   12850  *
   12851  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12852  */
   12853 static int
   12854 wm_nvm_validate_checksum(struct wm_softc *sc)
   12855 {
   12856 	uint16_t checksum;
   12857 	uint16_t eeprom_data;
   12858 #ifdef WM_DEBUG
   12859 	uint16_t csum_wordaddr, valid_checksum;
   12860 #endif
   12861 	int i;
   12862 
   12863 	checksum = 0;
   12864 
   12865 	/* Don't check for I211 */
   12866 	if (sc->sc_type == WM_T_I211)
   12867 		return 0;
   12868 
   12869 #ifdef WM_DEBUG
   12870 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12871 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12872 		csum_wordaddr = NVM_OFF_COMPAT;
   12873 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12874 	} else {
   12875 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12876 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12877 	}
   12878 
   12879 	/* Dump EEPROM image for debug */
   12880 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12881 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12882 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12883 		/* XXX PCH_SPT? */
   12884 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12885 		if ((eeprom_data & valid_checksum) == 0) {
   12886 			DPRINTF(WM_DEBUG_NVM,
   12887 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12888 				device_xname(sc->sc_dev), eeprom_data,
   12889 				    valid_checksum));
   12890 		}
   12891 	}
   12892 
   12893 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12894 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12895 		for (i = 0; i < NVM_SIZE; i++) {
   12896 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12897 				printf("XXXX ");
   12898 			else
   12899 				printf("%04hx ", eeprom_data);
   12900 			if (i % 8 == 7)
   12901 				printf("\n");
   12902 		}
   12903 	}
   12904 
   12905 #endif /* WM_DEBUG */
   12906 
   12907 	for (i = 0; i < NVM_SIZE; i++) {
   12908 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12909 			return 1;
   12910 		checksum += eeprom_data;
   12911 	}
   12912 
   12913 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12914 #ifdef WM_DEBUG
   12915 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12916 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12917 #endif
   12918 	}
   12919 
   12920 	return 0;
   12921 }
   12922 
   12923 static void
   12924 wm_nvm_version_invm(struct wm_softc *sc)
   12925 {
   12926 	uint32_t dword;
   12927 
   12928 	/*
   12929 	 * Linux's code to decode version is very strange, so we don't
   12930 	 * obey that algorithm and just use word 61 as the document.
   12931 	 * Perhaps it's not perfect though...
   12932 	 *
   12933 	 * Example:
   12934 	 *
   12935 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12936 	 */
   12937 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12938 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12939 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12940 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12941 }
   12942 
   12943 static void
   12944 wm_nvm_version(struct wm_softc *sc)
   12945 {
   12946 	uint16_t major, minor, build, patch;
   12947 	uint16_t uid0, uid1;
   12948 	uint16_t nvm_data;
   12949 	uint16_t off;
   12950 	bool check_version = false;
   12951 	bool check_optionrom = false;
   12952 	bool have_build = false;
   12953 	bool have_uid = true;
   12954 
   12955 	/*
   12956 	 * Version format:
   12957 	 *
   12958 	 * XYYZ
   12959 	 * X0YZ
   12960 	 * X0YY
   12961 	 *
   12962 	 * Example:
   12963 	 *
   12964 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12965 	 *	82571	0x50a6	5.10.6?
   12966 	 *	82572	0x506a	5.6.10?
   12967 	 *	82572EI	0x5069	5.6.9?
   12968 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12969 	 *		0x2013	2.1.3?
   12970 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12971 	 */
   12972 
   12973 	/*
   12974 	 * XXX
   12975 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12976 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12977 	 */
   12978 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12979 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12980 		have_uid = false;
   12981 
   12982 	switch (sc->sc_type) {
   12983 	case WM_T_82571:
   12984 	case WM_T_82572:
   12985 	case WM_T_82574:
   12986 	case WM_T_82583:
   12987 		check_version = true;
   12988 		check_optionrom = true;
   12989 		have_build = true;
   12990 		break;
   12991 	case WM_T_82575:
   12992 	case WM_T_82576:
   12993 	case WM_T_82580:
   12994 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12995 			check_version = true;
   12996 		break;
   12997 	case WM_T_I211:
   12998 		wm_nvm_version_invm(sc);
   12999 		have_uid = false;
   13000 		goto printver;
   13001 	case WM_T_I210:
   13002 		if (!wm_nvm_flash_presence_i210(sc)) {
   13003 			wm_nvm_version_invm(sc);
   13004 			have_uid = false;
   13005 			goto printver;
   13006 		}
   13007 		/* FALLTHROUGH */
   13008 	case WM_T_I350:
   13009 	case WM_T_I354:
   13010 		check_version = true;
   13011 		check_optionrom = true;
   13012 		break;
   13013 	default:
   13014 		return;
   13015 	}
   13016 	if (check_version
   13017 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13018 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13019 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13020 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13021 			build = nvm_data & NVM_BUILD_MASK;
   13022 			have_build = true;
   13023 		} else
   13024 			minor = nvm_data & 0x00ff;
   13025 
   13026 		/* Decimal */
   13027 		minor = (minor / 16) * 10 + (minor % 16);
   13028 		sc->sc_nvm_ver_major = major;
   13029 		sc->sc_nvm_ver_minor = minor;
   13030 
   13031 printver:
   13032 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13033 		    sc->sc_nvm_ver_minor);
   13034 		if (have_build) {
   13035 			sc->sc_nvm_ver_build = build;
   13036 			aprint_verbose(".%d", build);
   13037 		}
   13038 	}
   13039 
   13040 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13041 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13042 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13043 		/* Option ROM Version */
   13044 		if ((off != 0x0000) && (off != 0xffff)) {
   13045 			int rv;
   13046 
   13047 			off += NVM_COMBO_VER_OFF;
   13048 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13049 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13050 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13051 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13052 				/* 16bits */
   13053 				major = uid0 >> 8;
   13054 				build = (uid0 << 8) | (uid1 >> 8);
   13055 				patch = uid1 & 0x00ff;
   13056 				aprint_verbose(", option ROM Version %d.%d.%d",
   13057 				    major, build, patch);
   13058 			}
   13059 		}
   13060 	}
   13061 
   13062 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13063 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13064 }
   13065 
   13066 /*
   13067  * wm_nvm_read:
   13068  *
   13069  *	Read data from the serial EEPROM.
   13070  */
   13071 static int
   13072 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13073 {
   13074 	int rv;
   13075 
   13076 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13077 		device_xname(sc->sc_dev), __func__));
   13078 
   13079 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13080 		return -1;
   13081 
   13082 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13083 
   13084 	return rv;
   13085 }
   13086 
   13087 /*
   13088  * Hardware semaphores.
   13089  * Very complexed...
   13090  */
   13091 
   13092 static int
   13093 wm_get_null(struct wm_softc *sc)
   13094 {
   13095 
   13096 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13097 		device_xname(sc->sc_dev), __func__));
   13098 	return 0;
   13099 }
   13100 
   13101 static void
   13102 wm_put_null(struct wm_softc *sc)
   13103 {
   13104 
   13105 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13106 		device_xname(sc->sc_dev), __func__));
   13107 	return;
   13108 }
   13109 
   13110 static int
   13111 wm_get_eecd(struct wm_softc *sc)
   13112 {
   13113 	uint32_t reg;
   13114 	int x;
   13115 
   13116 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13117 		device_xname(sc->sc_dev), __func__));
   13118 
   13119 	reg = CSR_READ(sc, WMREG_EECD);
   13120 
   13121 	/* Request EEPROM access. */
   13122 	reg |= EECD_EE_REQ;
   13123 	CSR_WRITE(sc, WMREG_EECD, reg);
   13124 
   13125 	/* ..and wait for it to be granted. */
   13126 	for (x = 0; x < 1000; x++) {
   13127 		reg = CSR_READ(sc, WMREG_EECD);
   13128 		if (reg & EECD_EE_GNT)
   13129 			break;
   13130 		delay(5);
   13131 	}
   13132 	if ((reg & EECD_EE_GNT) == 0) {
   13133 		aprint_error_dev(sc->sc_dev,
   13134 		    "could not acquire EEPROM GNT\n");
   13135 		reg &= ~EECD_EE_REQ;
   13136 		CSR_WRITE(sc, WMREG_EECD, reg);
   13137 		return -1;
   13138 	}
   13139 
   13140 	return 0;
   13141 }
   13142 
   13143 static void
   13144 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13145 {
   13146 
   13147 	*eecd |= EECD_SK;
   13148 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13149 	CSR_WRITE_FLUSH(sc);
   13150 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13151 		delay(1);
   13152 	else
   13153 		delay(50);
   13154 }
   13155 
   13156 static void
   13157 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13158 {
   13159 
   13160 	*eecd &= ~EECD_SK;
   13161 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13162 	CSR_WRITE_FLUSH(sc);
   13163 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13164 		delay(1);
   13165 	else
   13166 		delay(50);
   13167 }
   13168 
   13169 static void
   13170 wm_put_eecd(struct wm_softc *sc)
   13171 {
   13172 	uint32_t reg;
   13173 
   13174 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13175 		device_xname(sc->sc_dev), __func__));
   13176 
   13177 	/* Stop nvm */
   13178 	reg = CSR_READ(sc, WMREG_EECD);
   13179 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13180 		/* Pull CS high */
   13181 		reg |= EECD_CS;
   13182 		wm_nvm_eec_clock_lower(sc, &reg);
   13183 	} else {
   13184 		/* CS on Microwire is active-high */
   13185 		reg &= ~(EECD_CS | EECD_DI);
   13186 		CSR_WRITE(sc, WMREG_EECD, reg);
   13187 		wm_nvm_eec_clock_raise(sc, &reg);
   13188 		wm_nvm_eec_clock_lower(sc, &reg);
   13189 	}
   13190 
   13191 	reg = CSR_READ(sc, WMREG_EECD);
   13192 	reg &= ~EECD_EE_REQ;
   13193 	CSR_WRITE(sc, WMREG_EECD, reg);
   13194 
   13195 	return;
   13196 }
   13197 
   13198 /*
   13199  * Get hardware semaphore.
   13200  * Same as e1000_get_hw_semaphore_generic()
   13201  */
   13202 static int
   13203 wm_get_swsm_semaphore(struct wm_softc *sc)
   13204 {
   13205 	int32_t timeout;
   13206 	uint32_t swsm;
   13207 
   13208 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13209 		device_xname(sc->sc_dev), __func__));
   13210 	KASSERT(sc->sc_nvm_wordsize > 0);
   13211 
   13212 retry:
   13213 	/* Get the SW semaphore. */
   13214 	timeout = sc->sc_nvm_wordsize + 1;
   13215 	while (timeout) {
   13216 		swsm = CSR_READ(sc, WMREG_SWSM);
   13217 
   13218 		if ((swsm & SWSM_SMBI) == 0)
   13219 			break;
   13220 
   13221 		delay(50);
   13222 		timeout--;
   13223 	}
   13224 
   13225 	if (timeout == 0) {
   13226 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13227 			/*
   13228 			 * In rare circumstances, the SW semaphore may already
   13229 			 * be held unintentionally. Clear the semaphore once
   13230 			 * before giving up.
   13231 			 */
   13232 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13233 			wm_put_swsm_semaphore(sc);
   13234 			goto retry;
   13235 		}
   13236 		aprint_error_dev(sc->sc_dev,
   13237 		    "could not acquire SWSM SMBI\n");
   13238 		return 1;
   13239 	}
   13240 
   13241 	/* Get the FW semaphore. */
   13242 	timeout = sc->sc_nvm_wordsize + 1;
   13243 	while (timeout) {
   13244 		swsm = CSR_READ(sc, WMREG_SWSM);
   13245 		swsm |= SWSM_SWESMBI;
   13246 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13247 		/* If we managed to set the bit we got the semaphore. */
   13248 		swsm = CSR_READ(sc, WMREG_SWSM);
   13249 		if (swsm & SWSM_SWESMBI)
   13250 			break;
   13251 
   13252 		delay(50);
   13253 		timeout--;
   13254 	}
   13255 
   13256 	if (timeout == 0) {
   13257 		aprint_error_dev(sc->sc_dev,
   13258 		    "could not acquire SWSM SWESMBI\n");
   13259 		/* Release semaphores */
   13260 		wm_put_swsm_semaphore(sc);
   13261 		return 1;
   13262 	}
   13263 	return 0;
   13264 }
   13265 
   13266 /*
   13267  * Put hardware semaphore.
   13268  * Same as e1000_put_hw_semaphore_generic()
   13269  */
   13270 static void
   13271 wm_put_swsm_semaphore(struct wm_softc *sc)
   13272 {
   13273 	uint32_t swsm;
   13274 
   13275 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13276 		device_xname(sc->sc_dev), __func__));
   13277 
   13278 	swsm = CSR_READ(sc, WMREG_SWSM);
   13279 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13280 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13281 }
   13282 
   13283 /*
   13284  * Get SW/FW semaphore.
   13285  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13286  */
   13287 static int
   13288 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13289 {
   13290 	uint32_t swfw_sync;
   13291 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13292 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13293 	int timeout;
   13294 
   13295 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13296 		device_xname(sc->sc_dev), __func__));
   13297 
   13298 	if (sc->sc_type == WM_T_80003)
   13299 		timeout = 50;
   13300 	else
   13301 		timeout = 200;
   13302 
   13303 	while (timeout) {
   13304 		if (wm_get_swsm_semaphore(sc)) {
   13305 			aprint_error_dev(sc->sc_dev,
   13306 			    "%s: failed to get semaphore\n",
   13307 			    __func__);
   13308 			return 1;
   13309 		}
   13310 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13311 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13312 			swfw_sync |= swmask;
   13313 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13314 			wm_put_swsm_semaphore(sc);
   13315 			return 0;
   13316 		}
   13317 		wm_put_swsm_semaphore(sc);
   13318 		delay(5000);
   13319 		timeout--;
   13320 	}
   13321 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13322 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13323 	return 1;
   13324 }
   13325 
   13326 static void
   13327 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13328 {
   13329 	uint32_t swfw_sync;
   13330 
   13331 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13332 		device_xname(sc->sc_dev), __func__));
   13333 
   13334 	while (wm_get_swsm_semaphore(sc) != 0)
   13335 		continue;
   13336 
   13337 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13338 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13339 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13340 
   13341 	wm_put_swsm_semaphore(sc);
   13342 }
   13343 
   13344 static int
   13345 wm_get_nvm_80003(struct wm_softc *sc)
   13346 {
   13347 	int rv;
   13348 
   13349 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13350 		device_xname(sc->sc_dev), __func__));
   13351 
   13352 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13353 		aprint_error_dev(sc->sc_dev,
   13354 		    "%s: failed to get semaphore(SWFW)\n",
   13355 		    __func__);
   13356 		return rv;
   13357 	}
   13358 
   13359 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13360 	    && (rv = wm_get_eecd(sc)) != 0) {
   13361 		aprint_error_dev(sc->sc_dev,
   13362 		    "%s: failed to get semaphore(EECD)\n",
   13363 		    __func__);
   13364 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13365 		return rv;
   13366 	}
   13367 
   13368 	return 0;
   13369 }
   13370 
   13371 static void
   13372 wm_put_nvm_80003(struct wm_softc *sc)
   13373 {
   13374 
   13375 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13376 		device_xname(sc->sc_dev), __func__));
   13377 
   13378 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13379 		wm_put_eecd(sc);
   13380 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13381 }
   13382 
   13383 static int
   13384 wm_get_nvm_82571(struct wm_softc *sc)
   13385 {
   13386 	int rv;
   13387 
   13388 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13389 		device_xname(sc->sc_dev), __func__));
   13390 
   13391 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13392 		return rv;
   13393 
   13394 	switch (sc->sc_type) {
   13395 	case WM_T_82573:
   13396 		break;
   13397 	default:
   13398 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13399 			rv = wm_get_eecd(sc);
   13400 		break;
   13401 	}
   13402 
   13403 	if (rv != 0) {
   13404 		aprint_error_dev(sc->sc_dev,
   13405 		    "%s: failed to get semaphore\n",
   13406 		    __func__);
   13407 		wm_put_swsm_semaphore(sc);
   13408 	}
   13409 
   13410 	return rv;
   13411 }
   13412 
   13413 static void
   13414 wm_put_nvm_82571(struct wm_softc *sc)
   13415 {
   13416 
   13417 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13418 		device_xname(sc->sc_dev), __func__));
   13419 
   13420 	switch (sc->sc_type) {
   13421 	case WM_T_82573:
   13422 		break;
   13423 	default:
   13424 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13425 			wm_put_eecd(sc);
   13426 		break;
   13427 	}
   13428 
   13429 	wm_put_swsm_semaphore(sc);
   13430 }
   13431 
   13432 static int
   13433 wm_get_phy_82575(struct wm_softc *sc)
   13434 {
   13435 
   13436 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13437 		device_xname(sc->sc_dev), __func__));
   13438 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13439 }
   13440 
   13441 static void
   13442 wm_put_phy_82575(struct wm_softc *sc)
   13443 {
   13444 
   13445 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13446 		device_xname(sc->sc_dev), __func__));
   13447 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13448 }
   13449 
   13450 static int
   13451 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13452 {
   13453 	uint32_t ext_ctrl;
   13454 	int timeout = 200;
   13455 
   13456 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13457 		device_xname(sc->sc_dev), __func__));
   13458 
   13459 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13460 	for (timeout = 0; timeout < 200; timeout++) {
   13461 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13462 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13463 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13464 
   13465 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13466 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13467 			return 0;
   13468 		delay(5000);
   13469 	}
   13470 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13471 	    device_xname(sc->sc_dev), ext_ctrl);
   13472 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13473 	return 1;
   13474 }
   13475 
   13476 static void
   13477 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13478 {
   13479 	uint32_t ext_ctrl;
   13480 
   13481 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13482 		device_xname(sc->sc_dev), __func__));
   13483 
   13484 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13485 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13486 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13487 
   13488 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13489 }
   13490 
   13491 static int
   13492 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13493 {
   13494 	uint32_t ext_ctrl;
   13495 	int timeout;
   13496 
   13497 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13498 		device_xname(sc->sc_dev), __func__));
   13499 	mutex_enter(sc->sc_ich_phymtx);
   13500 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13501 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13502 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13503 			break;
   13504 		delay(1000);
   13505 	}
   13506 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13507 		printf("%s: SW has already locked the resource\n",
   13508 		    device_xname(sc->sc_dev));
   13509 		goto out;
   13510 	}
   13511 
   13512 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13513 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13514 	for (timeout = 0; timeout < 1000; timeout++) {
   13515 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13516 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13517 			break;
   13518 		delay(1000);
   13519 	}
   13520 	if (timeout >= 1000) {
   13521 		printf("%s: failed to acquire semaphore\n",
   13522 		    device_xname(sc->sc_dev));
   13523 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13524 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13525 		goto out;
   13526 	}
   13527 	return 0;
   13528 
   13529 out:
   13530 	mutex_exit(sc->sc_ich_phymtx);
   13531 	return 1;
   13532 }
   13533 
   13534 static void
   13535 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13536 {
   13537 	uint32_t ext_ctrl;
   13538 
   13539 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13540 		device_xname(sc->sc_dev), __func__));
   13541 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13542 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13543 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13544 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13545 	} else {
   13546 		printf("%s: Semaphore unexpectedly released\n",
   13547 		    device_xname(sc->sc_dev));
   13548 	}
   13549 
   13550 	mutex_exit(sc->sc_ich_phymtx);
   13551 }
   13552 
   13553 static int
   13554 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13555 {
   13556 
   13557 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13558 		device_xname(sc->sc_dev), __func__));
   13559 	mutex_enter(sc->sc_ich_nvmmtx);
   13560 
   13561 	return 0;
   13562 }
   13563 
   13564 static void
   13565 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13566 {
   13567 
   13568 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13569 		device_xname(sc->sc_dev), __func__));
   13570 	mutex_exit(sc->sc_ich_nvmmtx);
   13571 }
   13572 
   13573 static int
   13574 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13575 {
   13576 	int i = 0;
   13577 	uint32_t reg;
   13578 
   13579 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13580 		device_xname(sc->sc_dev), __func__));
   13581 
   13582 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13583 	do {
   13584 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13585 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13586 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13587 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13588 			break;
   13589 		delay(2*1000);
   13590 		i++;
   13591 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13592 
   13593 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13594 		wm_put_hw_semaphore_82573(sc);
   13595 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13596 		    device_xname(sc->sc_dev));
   13597 		return -1;
   13598 	}
   13599 
   13600 	return 0;
   13601 }
   13602 
   13603 static void
   13604 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13605 {
   13606 	uint32_t reg;
   13607 
   13608 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13609 		device_xname(sc->sc_dev), __func__));
   13610 
   13611 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13612 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13613 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13614 }
   13615 
   13616 /*
   13617  * Management mode and power management related subroutines.
   13618  * BMC, AMT, suspend/resume and EEE.
   13619  */
   13620 
   13621 #ifdef WM_WOL
   13622 static int
   13623 wm_check_mng_mode(struct wm_softc *sc)
   13624 {
   13625 	int rv;
   13626 
   13627 	switch (sc->sc_type) {
   13628 	case WM_T_ICH8:
   13629 	case WM_T_ICH9:
   13630 	case WM_T_ICH10:
   13631 	case WM_T_PCH:
   13632 	case WM_T_PCH2:
   13633 	case WM_T_PCH_LPT:
   13634 	case WM_T_PCH_SPT:
   13635 	case WM_T_PCH_CNP:
   13636 		rv = wm_check_mng_mode_ich8lan(sc);
   13637 		break;
   13638 	case WM_T_82574:
   13639 	case WM_T_82583:
   13640 		rv = wm_check_mng_mode_82574(sc);
   13641 		break;
   13642 	case WM_T_82571:
   13643 	case WM_T_82572:
   13644 	case WM_T_82573:
   13645 	case WM_T_80003:
   13646 		rv = wm_check_mng_mode_generic(sc);
   13647 		break;
   13648 	default:
   13649 		/* noting to do */
   13650 		rv = 0;
   13651 		break;
   13652 	}
   13653 
   13654 	return rv;
   13655 }
   13656 
   13657 static int
   13658 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13659 {
   13660 	uint32_t fwsm;
   13661 
   13662 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13663 
   13664 	if (((fwsm & FWSM_FW_VALID) != 0)
   13665 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13666 		return 1;
   13667 
   13668 	return 0;
   13669 }
   13670 
   13671 static int
   13672 wm_check_mng_mode_82574(struct wm_softc *sc)
   13673 {
   13674 	uint16_t data;
   13675 
   13676 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13677 
   13678 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13679 		return 1;
   13680 
   13681 	return 0;
   13682 }
   13683 
   13684 static int
   13685 wm_check_mng_mode_generic(struct wm_softc *sc)
   13686 {
   13687 	uint32_t fwsm;
   13688 
   13689 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13690 
   13691 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13692 		return 1;
   13693 
   13694 	return 0;
   13695 }
   13696 #endif /* WM_WOL */
   13697 
   13698 static int
   13699 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13700 {
   13701 	uint32_t manc, fwsm, factps;
   13702 
   13703 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13704 		return 0;
   13705 
   13706 	manc = CSR_READ(sc, WMREG_MANC);
   13707 
   13708 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13709 		device_xname(sc->sc_dev), manc));
   13710 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13711 		return 0;
   13712 
   13713 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13714 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13715 		factps = CSR_READ(sc, WMREG_FACTPS);
   13716 		if (((factps & FACTPS_MNGCG) == 0)
   13717 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13718 			return 1;
   13719 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13720 		uint16_t data;
   13721 
   13722 		factps = CSR_READ(sc, WMREG_FACTPS);
   13723 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13724 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13725 			device_xname(sc->sc_dev), factps, data));
   13726 		if (((factps & FACTPS_MNGCG) == 0)
   13727 		    && ((data & NVM_CFG2_MNGM_MASK)
   13728 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13729 			return 1;
   13730 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13731 	    && ((manc & MANC_ASF_EN) == 0))
   13732 		return 1;
   13733 
   13734 	return 0;
   13735 }
   13736 
   13737 static bool
   13738 wm_phy_resetisblocked(struct wm_softc *sc)
   13739 {
   13740 	bool blocked = false;
   13741 	uint32_t reg;
   13742 	int i = 0;
   13743 
   13744 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13745 		device_xname(sc->sc_dev), __func__));
   13746 
   13747 	switch (sc->sc_type) {
   13748 	case WM_T_ICH8:
   13749 	case WM_T_ICH9:
   13750 	case WM_T_ICH10:
   13751 	case WM_T_PCH:
   13752 	case WM_T_PCH2:
   13753 	case WM_T_PCH_LPT:
   13754 	case WM_T_PCH_SPT:
   13755 	case WM_T_PCH_CNP:
   13756 		do {
   13757 			reg = CSR_READ(sc, WMREG_FWSM);
   13758 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13759 				blocked = true;
   13760 				delay(10*1000);
   13761 				continue;
   13762 			}
   13763 			blocked = false;
   13764 		} while (blocked && (i++ < 30));
   13765 		return blocked;
   13766 		break;
   13767 	case WM_T_82571:
   13768 	case WM_T_82572:
   13769 	case WM_T_82573:
   13770 	case WM_T_82574:
   13771 	case WM_T_82583:
   13772 	case WM_T_80003:
   13773 		reg = CSR_READ(sc, WMREG_MANC);
   13774 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13775 			return true;
   13776 		else
   13777 			return false;
   13778 		break;
   13779 	default:
   13780 		/* no problem */
   13781 		break;
   13782 	}
   13783 
   13784 	return false;
   13785 }
   13786 
   13787 static void
   13788 wm_get_hw_control(struct wm_softc *sc)
   13789 {
   13790 	uint32_t reg;
   13791 
   13792 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13793 		device_xname(sc->sc_dev), __func__));
   13794 
   13795 	if (sc->sc_type == WM_T_82573) {
   13796 		reg = CSR_READ(sc, WMREG_SWSM);
   13797 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13798 	} else if (sc->sc_type >= WM_T_82571) {
   13799 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13800 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13801 	}
   13802 }
   13803 
   13804 static void
   13805 wm_release_hw_control(struct wm_softc *sc)
   13806 {
   13807 	uint32_t reg;
   13808 
   13809 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13810 		device_xname(sc->sc_dev), __func__));
   13811 
   13812 	if (sc->sc_type == WM_T_82573) {
   13813 		reg = CSR_READ(sc, WMREG_SWSM);
   13814 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13815 	} else if (sc->sc_type >= WM_T_82571) {
   13816 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13817 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13818 	}
   13819 }
   13820 
   13821 static void
   13822 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13823 {
   13824 	uint32_t reg;
   13825 
   13826 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13827 		device_xname(sc->sc_dev), __func__));
   13828 
   13829 	if (sc->sc_type < WM_T_PCH2)
   13830 		return;
   13831 
   13832 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13833 
   13834 	if (gate)
   13835 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13836 	else
   13837 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13838 
   13839 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13840 }
   13841 
   13842 static void
   13843 wm_smbustopci(struct wm_softc *sc)
   13844 {
   13845 	uint32_t fwsm, reg;
   13846 	int rv = 0;
   13847 
   13848 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13849 		device_xname(sc->sc_dev), __func__));
   13850 
   13851 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13852 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13853 
   13854 	/* Disable ULP */
   13855 	wm_ulp_disable(sc);
   13856 
   13857 	/* Acquire PHY semaphore */
   13858 	sc->phy.acquire(sc);
   13859 
   13860 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13861 	switch (sc->sc_type) {
   13862 	case WM_T_PCH_LPT:
   13863 	case WM_T_PCH_SPT:
   13864 	case WM_T_PCH_CNP:
   13865 		if (wm_phy_is_accessible_pchlan(sc))
   13866 			break;
   13867 
   13868 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13869 		reg |= CTRL_EXT_FORCE_SMBUS;
   13870 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13871 #if 0
   13872 		/* XXX Isn't this required??? */
   13873 		CSR_WRITE_FLUSH(sc);
   13874 #endif
   13875 		delay(50 * 1000);
   13876 		/* FALLTHROUGH */
   13877 	case WM_T_PCH2:
   13878 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13879 			break;
   13880 		/* FALLTHROUGH */
   13881 	case WM_T_PCH:
   13882 		if (sc->sc_type == WM_T_PCH)
   13883 			if ((fwsm & FWSM_FW_VALID) != 0)
   13884 				break;
   13885 
   13886 		if (wm_phy_resetisblocked(sc) == true) {
   13887 			printf("XXX reset is blocked(3)\n");
   13888 			break;
   13889 		}
   13890 
   13891 		wm_toggle_lanphypc_pch_lpt(sc);
   13892 
   13893 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13894 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13895 				break;
   13896 
   13897 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13898 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13899 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13900 
   13901 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13902 				break;
   13903 			rv = -1;
   13904 		}
   13905 		break;
   13906 	default:
   13907 		break;
   13908 	}
   13909 
   13910 	/* Release semaphore */
   13911 	sc->phy.release(sc);
   13912 
   13913 	if (rv == 0) {
   13914 		if (wm_phy_resetisblocked(sc)) {
   13915 			printf("XXX reset is blocked(4)\n");
   13916 			goto out;
   13917 		}
   13918 		wm_reset_phy(sc);
   13919 		if (wm_phy_resetisblocked(sc))
   13920 			printf("XXX reset is blocked(4)\n");
   13921 	}
   13922 
   13923 out:
   13924 	/*
   13925 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13926 	 */
   13927 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13928 		delay(10*1000);
   13929 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13930 	}
   13931 }
   13932 
   13933 static void
   13934 wm_init_manageability(struct wm_softc *sc)
   13935 {
   13936 
   13937 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13938 		device_xname(sc->sc_dev), __func__));
   13939 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13940 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13941 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13942 
   13943 		/* Disable hardware interception of ARP */
   13944 		manc &= ~MANC_ARP_EN;
   13945 
   13946 		/* Enable receiving management packets to the host */
   13947 		if (sc->sc_type >= WM_T_82571) {
   13948 			manc |= MANC_EN_MNG2HOST;
   13949 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   13950 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13951 		}
   13952 
   13953 		CSR_WRITE(sc, WMREG_MANC, manc);
   13954 	}
   13955 }
   13956 
   13957 static void
   13958 wm_release_manageability(struct wm_softc *sc)
   13959 {
   13960 
   13961 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13962 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13963 
   13964 		manc |= MANC_ARP_EN;
   13965 		if (sc->sc_type >= WM_T_82571)
   13966 			manc &= ~MANC_EN_MNG2HOST;
   13967 
   13968 		CSR_WRITE(sc, WMREG_MANC, manc);
   13969 	}
   13970 }
   13971 
   13972 static void
   13973 wm_get_wakeup(struct wm_softc *sc)
   13974 {
   13975 
   13976 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13977 	switch (sc->sc_type) {
   13978 	case WM_T_82573:
   13979 	case WM_T_82583:
   13980 		sc->sc_flags |= WM_F_HAS_AMT;
   13981 		/* FALLTHROUGH */
   13982 	case WM_T_80003:
   13983 	case WM_T_82575:
   13984 	case WM_T_82576:
   13985 	case WM_T_82580:
   13986 	case WM_T_I350:
   13987 	case WM_T_I354:
   13988 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13989 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13990 		/* FALLTHROUGH */
   13991 	case WM_T_82541:
   13992 	case WM_T_82541_2:
   13993 	case WM_T_82547:
   13994 	case WM_T_82547_2:
   13995 	case WM_T_82571:
   13996 	case WM_T_82572:
   13997 	case WM_T_82574:
   13998 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13999 		break;
   14000 	case WM_T_ICH8:
   14001 	case WM_T_ICH9:
   14002 	case WM_T_ICH10:
   14003 	case WM_T_PCH:
   14004 	case WM_T_PCH2:
   14005 	case WM_T_PCH_LPT:
   14006 	case WM_T_PCH_SPT:
   14007 	case WM_T_PCH_CNP:
   14008 		sc->sc_flags |= WM_F_HAS_AMT;
   14009 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14010 		break;
   14011 	default:
   14012 		break;
   14013 	}
   14014 
   14015 	/* 1: HAS_MANAGE */
   14016 	if (wm_enable_mng_pass_thru(sc) != 0)
   14017 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14018 
   14019 	/*
   14020 	 * Note that the WOL flags is set after the resetting of the eeprom
   14021 	 * stuff
   14022 	 */
   14023 }
   14024 
   14025 /*
   14026  * Unconfigure Ultra Low Power mode.
   14027  * Only for I217 and newer (see below).
   14028  */
   14029 static int
   14030 wm_ulp_disable(struct wm_softc *sc)
   14031 {
   14032 	uint32_t reg;
   14033 	uint16_t phyreg;
   14034 	int i = 0, rv = 0;
   14035 
   14036 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14037 		device_xname(sc->sc_dev), __func__));
   14038 	/* Exclude old devices */
   14039 	if ((sc->sc_type < WM_T_PCH_LPT)
   14040 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14041 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14042 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14043 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14044 		return 0;
   14045 
   14046 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14047 		/* Request ME un-configure ULP mode in the PHY */
   14048 		reg = CSR_READ(sc, WMREG_H2ME);
   14049 		reg &= ~H2ME_ULP;
   14050 		reg |= H2ME_ENFORCE_SETTINGS;
   14051 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14052 
   14053 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14054 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14055 			if (i++ == 30) {
   14056 				printf("%s timed out\n", __func__);
   14057 				return -1;
   14058 			}
   14059 			delay(10 * 1000);
   14060 		}
   14061 		reg = CSR_READ(sc, WMREG_H2ME);
   14062 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14063 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14064 
   14065 		return 0;
   14066 	}
   14067 
   14068 	/* Acquire semaphore */
   14069 	sc->phy.acquire(sc);
   14070 
   14071 	/* Toggle LANPHYPC */
   14072 	wm_toggle_lanphypc_pch_lpt(sc);
   14073 
   14074 	/* Unforce SMBus mode in PHY */
   14075 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14076 	if (rv != 0) {
   14077 		uint32_t reg2;
   14078 
   14079 		printf("%s: Force SMBus first.\n", __func__);
   14080 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14081 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14082 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14083 		delay(50 * 1000);
   14084 
   14085 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14086 		    &phyreg);
   14087 		if (rv != 0)
   14088 			goto release;
   14089 	}
   14090 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14091 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14092 
   14093 	/* Unforce SMBus mode in MAC */
   14094 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14095 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14096 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14097 
   14098 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14099 	if (rv != 0)
   14100 		goto release;
   14101 	phyreg |= HV_PM_CTRL_K1_ENA;
   14102 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14103 
   14104 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14105 		&phyreg);
   14106 	if (rv != 0)
   14107 		goto release;
   14108 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14109 	    | I218_ULP_CONFIG1_STICKY_ULP
   14110 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14111 	    | I218_ULP_CONFIG1_WOL_HOST
   14112 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14113 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14114 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14115 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14116 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14117 	phyreg |= I218_ULP_CONFIG1_START;
   14118 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14119 
   14120 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14121 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14122 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14123 
   14124 release:
   14125 	/* Release semaphore */
   14126 	sc->phy.release(sc);
   14127 	wm_gmii_reset(sc);
   14128 	delay(50 * 1000);
   14129 
   14130 	return rv;
   14131 }
   14132 
   14133 /* WOL in the newer chipset interfaces (pchlan) */
   14134 static void
   14135 wm_enable_phy_wakeup(struct wm_softc *sc)
   14136 {
   14137 #if 0
   14138 	uint16_t preg;
   14139 
   14140 	/* Copy MAC RARs to PHY RARs */
   14141 
   14142 	/* Copy MAC MTA to PHY MTA */
   14143 
   14144 	/* Configure PHY Rx Control register */
   14145 
   14146 	/* Enable PHY wakeup in MAC register */
   14147 
   14148 	/* Configure and enable PHY wakeup in PHY registers */
   14149 
   14150 	/* Activate PHY wakeup */
   14151 
   14152 	/* XXX */
   14153 #endif
   14154 }
   14155 
   14156 /* Power down workaround on D3 */
   14157 static void
   14158 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14159 {
   14160 	uint32_t reg;
   14161 	int i;
   14162 
   14163 	for (i = 0; i < 2; i++) {
   14164 		/* Disable link */
   14165 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14166 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14167 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14168 
   14169 		/*
   14170 		 * Call gig speed drop workaround on Gig disable before
   14171 		 * accessing any PHY registers
   14172 		 */
   14173 		if (sc->sc_type == WM_T_ICH8)
   14174 			wm_gig_downshift_workaround_ich8lan(sc);
   14175 
   14176 		/* Write VR power-down enable */
   14177 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14178 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14179 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14180 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   14181 
   14182 		/* Read it back and test */
   14183 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14184 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14185 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14186 			break;
   14187 
   14188 		/* Issue PHY reset and repeat at most one more time */
   14189 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14190 	}
   14191 }
   14192 
   14193 /*
   14194  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14195  *  @sc: pointer to the HW structure
   14196  *
   14197  *  During S0 to Sx transition, it is possible the link remains at gig
   14198  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14199  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14200  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14201  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14202  *  needs to be written.
   14203  *  Parts that support (and are linked to a partner which support) EEE in
   14204  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14205  *  than 10Mbps w/o EEE.
   14206  */
   14207 static void
   14208 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14209 {
   14210 	uint32_t phy_ctrl;
   14211 
   14212 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14213 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14214 
   14215 	if (sc->sc_phytype == WMPHY_I217) {
   14216 		uint16_t devid = sc->sc_pcidevid;
   14217 
   14218 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14219 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14220 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14221 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14222 		    (sc->sc_type >= WM_T_PCH_SPT))
   14223 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14224 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14225 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14226 
   14227 #if 0 /* notyet */
   14228 		if (sc->phy.acquire(sc) != 0)
   14229 			goto out;
   14230 
   14231 		/* XXX Do workaround for EEE */
   14232 
   14233 		/*
   14234 		 * For i217 Intel Rapid Start Technology support,
   14235 		 * when the system is going into Sx and no manageability engine
   14236 		 * is present, the driver must configure proxy to reset only on
   14237 		 * power good.	LPI (Low Power Idle) state must also reset only
   14238 		 * on power good, as well as the MTA (Multicast table array).
   14239 		 * The SMBus release must also be disabled on LCD reset.
   14240 		 */
   14241 
   14242 		/*
   14243 		 * Enable MTA to reset for Intel Rapid Start Technology
   14244 		 * Support
   14245 		 */
   14246 
   14247 		sc->phy.release(sc);
   14248 #endif
   14249 	}
   14250 #if 0
   14251 out:
   14252 #endif
   14253 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14254 
   14255 	if (sc->sc_type == WM_T_ICH8)
   14256 		wm_gig_downshift_workaround_ich8lan(sc);
   14257 
   14258 	if (sc->sc_type >= WM_T_PCH) {
   14259 		wm_oem_bits_config_ich8lan(sc, false);
   14260 
   14261 		/* Reset PHY to activate OEM bits on 82577/8 */
   14262 		if (sc->sc_type == WM_T_PCH)
   14263 			wm_reset_phy(sc);
   14264 
   14265 		if (sc->phy.acquire(sc) != 0)
   14266 			return;
   14267 		wm_write_smbus_addr(sc);
   14268 		sc->phy.release(sc);
   14269 	}
   14270 }
   14271 
   14272 static void
   14273 wm_enable_wakeup(struct wm_softc *sc)
   14274 {
   14275 	uint32_t reg, pmreg;
   14276 	pcireg_t pmode;
   14277 
   14278 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14279 		device_xname(sc->sc_dev), __func__));
   14280 
   14281 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14282 		&pmreg, NULL) == 0)
   14283 		return;
   14284 
   14285 	/* Advertise the wakeup capability */
   14286 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14287 	    | CTRL_SWDPIN(3));
   14288 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   14289 
   14290 	/* Keep the laser running on fiber adapters */
   14291 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14292 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14293 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14294 		reg |= CTRL_EXT_SWDPIN(3);
   14295 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14296 	}
   14297 
   14298 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   14299 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
   14300 		wm_suspend_workarounds_ich8lan(sc);
   14301 
   14302 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14303 #if 0	/* for the multicast packet */
   14304 	reg |= WUFC_MC;
   14305 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14306 #endif
   14307 
   14308 	if (sc->sc_type >= WM_T_PCH)
   14309 		wm_enable_phy_wakeup(sc);
   14310 	else {
   14311 		/* Enable wakeup by the MAC */
   14312 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14313 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14314 	}
   14315 
   14316 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14317 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14318 		|| (sc->sc_type == WM_T_PCH2))
   14319 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14320 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14321 
   14322 	/* Request PME */
   14323 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14324 #if 0
   14325 	/* Disable WOL */
   14326 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14327 #else
   14328 	/* For WOL */
   14329 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14330 #endif
   14331 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14332 }
   14333 
   14334 /* Disable ASPM L0s and/or L1 for workaround */
   14335 static void
   14336 wm_disable_aspm(struct wm_softc *sc)
   14337 {
   14338 	pcireg_t reg, mask = 0;
   14339 	unsigned const char *str = "";
   14340 
   14341 	/*
   14342 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14343 	 * space.
   14344 	 */
   14345 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14346 		return;
   14347 
   14348 	switch (sc->sc_type) {
   14349 	case WM_T_82571:
   14350 	case WM_T_82572:
   14351 		/*
   14352 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14353 		 * State Power management L1 State (ASPM L1).
   14354 		 */
   14355 		mask = PCIE_LCSR_ASPM_L1;
   14356 		str = "L1 is";
   14357 		break;
   14358 	case WM_T_82573:
   14359 	case WM_T_82574:
   14360 	case WM_T_82583:
   14361 		/*
   14362 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14363 		 *
   14364 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14365 		 * some chipset.  The document of 82574 and 82583 says that
   14366 		 * disabling L0s with some specific chipset is sufficient,
   14367 		 * but we follow as of the Intel em driver does.
   14368 		 *
   14369 		 * References:
   14370 		 * Errata 8 of the Specification Update of i82573.
   14371 		 * Errata 20 of the Specification Update of i82574.
   14372 		 * Errata 9 of the Specification Update of i82583.
   14373 		 */
   14374 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14375 		str = "L0s and L1 are";
   14376 		break;
   14377 	default:
   14378 		return;
   14379 	}
   14380 
   14381 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14382 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14383 	reg &= ~mask;
   14384 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14385 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14386 
   14387 	/* Print only in wm_attach() */
   14388 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14389 		aprint_verbose_dev(sc->sc_dev,
   14390 		    "ASPM %s disabled to workaround the errata.\n", str);
   14391 }
   14392 
   14393 /* LPLU */
   14394 
   14395 static void
   14396 wm_lplu_d0_disable(struct wm_softc *sc)
   14397 {
   14398 	struct mii_data *mii = &sc->sc_mii;
   14399 	uint32_t reg;
   14400 
   14401 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14402 		device_xname(sc->sc_dev), __func__));
   14403 
   14404 	if (sc->sc_phytype == WMPHY_IFE)
   14405 		return;
   14406 
   14407 	switch (sc->sc_type) {
   14408 	case WM_T_82571:
   14409 	case WM_T_82572:
   14410 	case WM_T_82573:
   14411 	case WM_T_82575:
   14412 	case WM_T_82576:
   14413 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14414 		reg &= ~PMR_D0_LPLU;
   14415 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14416 		break;
   14417 	case WM_T_82580:
   14418 	case WM_T_I350:
   14419 	case WM_T_I210:
   14420 	case WM_T_I211:
   14421 		reg = CSR_READ(sc, WMREG_PHPM);
   14422 		reg &= ~PHPM_D0A_LPLU;
   14423 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14424 		break;
   14425 	case WM_T_82574:
   14426 	case WM_T_82583:
   14427 	case WM_T_ICH8:
   14428 	case WM_T_ICH9:
   14429 	case WM_T_ICH10:
   14430 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14431 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14432 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14433 		CSR_WRITE_FLUSH(sc);
   14434 		break;
   14435 	case WM_T_PCH:
   14436 	case WM_T_PCH2:
   14437 	case WM_T_PCH_LPT:
   14438 	case WM_T_PCH_SPT:
   14439 	case WM_T_PCH_CNP:
   14440 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14441 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14442 		if (wm_phy_resetisblocked(sc) == false)
   14443 			reg |= HV_OEM_BITS_ANEGNOW;
   14444 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14445 		break;
   14446 	default:
   14447 		break;
   14448 	}
   14449 }
   14450 
   14451 /* EEE */
   14452 
   14453 static void
   14454 wm_set_eee_i350(struct wm_softc *sc)
   14455 {
   14456 	uint32_t ipcnfg, eeer;
   14457 
   14458 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14459 	eeer = CSR_READ(sc, WMREG_EEER);
   14460 
   14461 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14462 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14463 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14464 		    | EEER_LPI_FC);
   14465 	} else {
   14466 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14467 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14468 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14469 		    | EEER_LPI_FC);
   14470 	}
   14471 
   14472 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14473 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14474 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14475 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14476 }
   14477 
   14478 /*
   14479  * Workarounds (mainly PHY related).
   14480  * Basically, PHY's workarounds are in the PHY drivers.
   14481  */
   14482 
   14483 /* Work-around for 82566 Kumeran PCS lock loss */
   14484 static void
   14485 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14486 {
   14487 	struct mii_data *mii = &sc->sc_mii;
   14488 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14489 	int i;
   14490 	int reg;
   14491 
   14492 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14493 		device_xname(sc->sc_dev), __func__));
   14494 
   14495 	/* If the link is not up, do nothing */
   14496 	if ((status & STATUS_LU) == 0)
   14497 		return;
   14498 
   14499 	/* Nothing to do if the link is other than 1Gbps */
   14500 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14501 		return;
   14502 
   14503 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14504 	for (i = 0; i < 10; i++) {
   14505 		/* read twice */
   14506 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14507 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14508 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14509 			goto out;	/* GOOD! */
   14510 
   14511 		/* Reset the PHY */
   14512 		wm_reset_phy(sc);
   14513 		delay(5*1000);
   14514 	}
   14515 
   14516 	/* Disable GigE link negotiation */
   14517 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14518 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14519 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14520 
   14521 	/*
   14522 	 * Call gig speed drop workaround on Gig disable before accessing
   14523 	 * any PHY registers.
   14524 	 */
   14525 	wm_gig_downshift_workaround_ich8lan(sc);
   14526 
   14527 out:
   14528 	return;
   14529 }
   14530 
   14531 /* WOL from S5 stops working */
   14532 static void
   14533 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14534 {
   14535 	uint16_t kmreg;
   14536 
   14537 	/* Only for igp3 */
   14538 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14539 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14540 			return;
   14541 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14542 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14543 			return;
   14544 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14545 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14546 	}
   14547 }
   14548 
   14549 /*
   14550  * Workaround for pch's PHYs
   14551  * XXX should be moved to new PHY driver?
   14552  */
   14553 static void
   14554 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14555 {
   14556 
   14557 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14558 		device_xname(sc->sc_dev), __func__));
   14559 	KASSERT(sc->sc_type == WM_T_PCH);
   14560 
   14561 	if (sc->sc_phytype == WMPHY_82577)
   14562 		wm_set_mdio_slow_mode_hv(sc);
   14563 
   14564 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14565 
   14566 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14567 
   14568 	/* 82578 */
   14569 	if (sc->sc_phytype == WMPHY_82578) {
   14570 		struct mii_softc *child;
   14571 
   14572 		/*
   14573 		 * Return registers to default by doing a soft reset then
   14574 		 * writing 0x3140 to the control register
   14575 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14576 		 */
   14577 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14578 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14579 			PHY_RESET(child);
   14580 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14581 			    0x3140);
   14582 		}
   14583 	}
   14584 
   14585 	/* Select page 0 */
   14586 	sc->phy.acquire(sc);
   14587 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14588 	sc->phy.release(sc);
   14589 
   14590 	/*
   14591 	 * Configure the K1 Si workaround during phy reset assuming there is
   14592 	 * link so that it disables K1 if link is in 1Gbps.
   14593 	 */
   14594 	wm_k1_gig_workaround_hv(sc, 1);
   14595 }
   14596 
   14597 static void
   14598 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14599 {
   14600 
   14601 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14602 		device_xname(sc->sc_dev), __func__));
   14603 	KASSERT(sc->sc_type == WM_T_PCH2);
   14604 
   14605 	wm_set_mdio_slow_mode_hv(sc);
   14606 }
   14607 
   14608 /**
   14609  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   14610  *  @link: link up bool flag
   14611  *
   14612  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   14613  *  preventing further DMA write requests.  Workaround the issue by disabling
   14614  *  the de-assertion of the clock request when in 1Gpbs mode.
   14615  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   14616  *  speeds in order to avoid Tx hangs.
   14617  **/
   14618 static int
   14619 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   14620 {
   14621 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   14622 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14623 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   14624 	uint16_t phyreg;
   14625 
   14626 	if (link && (speed == STATUS_SPEED_1000)) {
   14627 		sc->phy.acquire(sc);
   14628 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14629 		    &phyreg);
   14630 		if (rv != 0)
   14631 			goto release;
   14632 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14633 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   14634 		if (rv != 0)
   14635 			goto release;
   14636 		delay(20);
   14637 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   14638 
   14639 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14640 		    &phyreg);
   14641 release:
   14642 		sc->phy.release(sc);
   14643 		return rv;
   14644 	}
   14645 
   14646 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   14647 
   14648 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14649 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   14650 	    || !link
   14651 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   14652 		goto update_fextnvm6;
   14653 
   14654 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL);
   14655 
   14656 	/* Clear link status transmit timeout */
   14657 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   14658 	if (speed == STATUS_SPEED_100) {
   14659 		/* Set inband Tx timeout to 5x10us for 100Half */
   14660 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14661 
   14662 		/* Do not extend the K1 entry latency for 100Half */
   14663 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14664 	} else {
   14665 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   14666 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14667 
   14668 		/* Extend the K1 entry latency for 10 Mbps */
   14669 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14670 	}
   14671 
   14672 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   14673 
   14674 update_fextnvm6:
   14675 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   14676 	return 0;
   14677 }
   14678 
   14679 static int
   14680 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14681 {
   14682 	int k1_enable = sc->sc_nvm_k1_enabled;
   14683 
   14684 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14685 		device_xname(sc->sc_dev), __func__));
   14686 
   14687 	if (sc->phy.acquire(sc) != 0)
   14688 		return -1;
   14689 
   14690 	if (link) {
   14691 		k1_enable = 0;
   14692 
   14693 		/* Link stall fix for link up */
   14694 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14695 		    0x0100);
   14696 	} else {
   14697 		/* Link stall fix for link down */
   14698 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14699 		    0x4100);
   14700 	}
   14701 
   14702 	wm_configure_k1_ich8lan(sc, k1_enable);
   14703 	sc->phy.release(sc);
   14704 
   14705 	return 0;
   14706 }
   14707 
   14708 static void
   14709 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14710 {
   14711 	uint32_t reg;
   14712 
   14713 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14714 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14715 	    reg | HV_KMRN_MDIO_SLOW);
   14716 }
   14717 
   14718 static void
   14719 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14720 {
   14721 	uint32_t ctrl, ctrl_ext, tmp;
   14722 	uint16_t kmreg;
   14723 	int rv;
   14724 
   14725 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14726 
   14727 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14728 	if (rv != 0)
   14729 		return;
   14730 
   14731 	if (k1_enable)
   14732 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14733 	else
   14734 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14735 
   14736 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14737 	if (rv != 0)
   14738 		return;
   14739 
   14740 	delay(20);
   14741 
   14742 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14743 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14744 
   14745 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14746 	tmp |= CTRL_FRCSPD;
   14747 
   14748 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14749 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14750 	CSR_WRITE_FLUSH(sc);
   14751 	delay(20);
   14752 
   14753 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14754 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14755 	CSR_WRITE_FLUSH(sc);
   14756 	delay(20);
   14757 
   14758 	return;
   14759 }
   14760 
   14761 /* special case - for 82575 - need to do manual init ... */
   14762 static void
   14763 wm_reset_init_script_82575(struct wm_softc *sc)
   14764 {
   14765 	/*
   14766 	 * remark: this is untested code - we have no board without EEPROM
   14767 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14768 	 */
   14769 
   14770 	/* SerDes configuration via SERDESCTRL */
   14771 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14772 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14773 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14774 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14775 
   14776 	/* CCM configuration via CCMCTL register */
   14777 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14778 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14779 
   14780 	/* PCIe lanes configuration */
   14781 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14782 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14783 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14784 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14785 
   14786 	/* PCIe PLL Configuration */
   14787 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14788 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14789 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14790 }
   14791 
   14792 static void
   14793 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14794 {
   14795 	uint32_t reg;
   14796 	uint16_t nvmword;
   14797 	int rv;
   14798 
   14799 	if (sc->sc_type != WM_T_82580)
   14800 		return;
   14801 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14802 		return;
   14803 
   14804 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14805 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14806 	if (rv != 0) {
   14807 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14808 		    __func__);
   14809 		return;
   14810 	}
   14811 
   14812 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14813 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14814 		reg |= MDICNFG_DEST;
   14815 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14816 		reg |= MDICNFG_COM_MDIO;
   14817 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14818 }
   14819 
   14820 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14821 
   14822 static bool
   14823 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14824 {
   14825 	uint32_t reg;
   14826 	uint16_t id1, id2;
   14827 	int i, rv;
   14828 
   14829 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14830 		device_xname(sc->sc_dev), __func__));
   14831 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14832 
   14833 	id1 = id2 = 0xffff;
   14834 	for (i = 0; i < 2; i++) {
   14835 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   14836 		    &id1);
   14837 		if ((rv != 0) || MII_INVALIDID(id1))
   14838 			continue;
   14839 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   14840 		    &id2);
   14841 		if ((rv != 0) || MII_INVALIDID(id2))
   14842 			continue;
   14843 		break;
   14844 	}
   14845 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   14846 		goto out;
   14847 
   14848 	/*
   14849 	 * In case the PHY needs to be in mdio slow mode,
   14850 	 * set slow mode and try to get the PHY id again.
   14851 	 */
   14852 	if (sc->sc_type < WM_T_PCH_LPT) {
   14853 		sc->phy.release(sc);
   14854 		wm_set_mdio_slow_mode_hv(sc);
   14855 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14856 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14857 		sc->phy.acquire(sc);
   14858 	}
   14859 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14860 		printf("XXX return with false\n");
   14861 		return false;
   14862 	}
   14863 out:
   14864 	if (sc->sc_type >= WM_T_PCH_LPT) {
   14865 		/* Only unforce SMBus if ME is not active */
   14866 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14867 			uint16_t phyreg;
   14868 
   14869 			/* Unforce SMBus mode in PHY */
   14870 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14871 			    CV_SMB_CTRL, &phyreg);
   14872 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14873 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14874 			    CV_SMB_CTRL, phyreg);
   14875 
   14876 			/* Unforce SMBus mode in MAC */
   14877 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14878 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14879 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14880 		}
   14881 	}
   14882 	return true;
   14883 }
   14884 
   14885 static void
   14886 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14887 {
   14888 	uint32_t reg;
   14889 	int i;
   14890 
   14891 	/* Set PHY Config Counter to 50msec */
   14892 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14893 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14894 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14895 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14896 
   14897 	/* Toggle LANPHYPC */
   14898 	reg = CSR_READ(sc, WMREG_CTRL);
   14899 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14900 	reg &= ~CTRL_LANPHYPC_VALUE;
   14901 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14902 	CSR_WRITE_FLUSH(sc);
   14903 	delay(1000);
   14904 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14905 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14906 	CSR_WRITE_FLUSH(sc);
   14907 
   14908 	if (sc->sc_type < WM_T_PCH_LPT)
   14909 		delay(50 * 1000);
   14910 	else {
   14911 		i = 20;
   14912 
   14913 		do {
   14914 			delay(5 * 1000);
   14915 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14916 		    && i--);
   14917 
   14918 		delay(30 * 1000);
   14919 	}
   14920 }
   14921 
   14922 static int
   14923 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14924 {
   14925 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14926 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14927 	uint32_t rxa;
   14928 	uint16_t scale = 0, lat_enc = 0;
   14929 	int32_t obff_hwm = 0;
   14930 	int64_t lat_ns, value;
   14931 
   14932 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14933 		device_xname(sc->sc_dev), __func__));
   14934 
   14935 	if (link) {
   14936 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14937 		uint32_t status;
   14938 		uint16_t speed;
   14939 		pcireg_t preg;
   14940 
   14941 		status = CSR_READ(sc, WMREG_STATUS);
   14942 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14943 		case STATUS_SPEED_10:
   14944 			speed = 10;
   14945 			break;
   14946 		case STATUS_SPEED_100:
   14947 			speed = 100;
   14948 			break;
   14949 		case STATUS_SPEED_1000:
   14950 			speed = 1000;
   14951 			break;
   14952 		default:
   14953 			device_printf(sc->sc_dev, "Unknown speed "
   14954 			    "(status = %08x)\n", status);
   14955 			return -1;
   14956 		}
   14957 
   14958 		/* Rx Packet Buffer Allocation size (KB) */
   14959 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14960 
   14961 		/*
   14962 		 * Determine the maximum latency tolerated by the device.
   14963 		 *
   14964 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14965 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14966 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14967 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14968 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14969 		 */
   14970 		lat_ns = ((int64_t)rxa * 1024 -
   14971 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14972 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14973 		if (lat_ns < 0)
   14974 			lat_ns = 0;
   14975 		else
   14976 			lat_ns /= speed;
   14977 		value = lat_ns;
   14978 
   14979 		while (value > LTRV_VALUE) {
   14980 			scale ++;
   14981 			value = howmany(value, __BIT(5));
   14982 		}
   14983 		if (scale > LTRV_SCALE_MAX) {
   14984 			printf("%s: Invalid LTR latency scale %d\n",
   14985 			    device_xname(sc->sc_dev), scale);
   14986 			return -1;
   14987 		}
   14988 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14989 
   14990 		/* Determine the maximum latency tolerated by the platform */
   14991 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14992 		    WM_PCI_LTR_CAP_LPT);
   14993 		max_snoop = preg & 0xffff;
   14994 		max_nosnoop = preg >> 16;
   14995 
   14996 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14997 
   14998 		if (lat_enc > max_ltr_enc) {
   14999 			lat_enc = max_ltr_enc;
   15000 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   15001 			    * PCI_LTR_SCALETONS(
   15002 				    __SHIFTOUT(lat_enc,
   15003 					PCI_LTR_MAXSNOOPLAT_SCALE));
   15004 		}
   15005 
   15006 		if (lat_ns) {
   15007 			lat_ns *= speed * 1000;
   15008 			lat_ns /= 8;
   15009 			lat_ns /= 1000000000;
   15010 			obff_hwm = (int32_t)(rxa - lat_ns);
   15011 		}
   15012 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   15013 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   15014 			    "(rxa = %d, lat_ns = %d)\n",
   15015 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   15016 			return -1;
   15017 		}
   15018 	}
   15019 	/* Snoop and No-Snoop latencies the same */
   15020 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   15021 	CSR_WRITE(sc, WMREG_LTRV, reg);
   15022 
   15023 	/* Set OBFF high water mark */
   15024 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   15025 	reg |= obff_hwm;
   15026 	CSR_WRITE(sc, WMREG_SVT, reg);
   15027 
   15028 	/* Enable OBFF */
   15029 	reg = CSR_READ(sc, WMREG_SVCR);
   15030 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   15031 	CSR_WRITE(sc, WMREG_SVCR, reg);
   15032 
   15033 	return 0;
   15034 }
   15035 
   15036 /*
   15037  * I210 Errata 25 and I211 Errata 10
   15038  * Slow System Clock.
   15039  */
   15040 static void
   15041 wm_pll_workaround_i210(struct wm_softc *sc)
   15042 {
   15043 	uint32_t mdicnfg, wuc;
   15044 	uint32_t reg;
   15045 	pcireg_t pcireg;
   15046 	uint32_t pmreg;
   15047 	uint16_t nvmword, tmp_nvmword;
   15048 	int phyval;
   15049 	bool wa_done = false;
   15050 	int i;
   15051 
   15052 	/* Save WUC and MDICNFG registers */
   15053 	wuc = CSR_READ(sc, WMREG_WUC);
   15054 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   15055 
   15056 	reg = mdicnfg & ~MDICNFG_DEST;
   15057 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15058 
   15059 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   15060 		nvmword = INVM_DEFAULT_AL;
   15061 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   15062 
   15063 	/* Get Power Management cap offset */
   15064 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15065 		&pmreg, NULL) == 0)
   15066 		return;
   15067 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   15068 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   15069 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   15070 
   15071 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   15072 			break; /* OK */
   15073 		}
   15074 
   15075 		wa_done = true;
   15076 		/* Directly reset the internal PHY */
   15077 		reg = CSR_READ(sc, WMREG_CTRL);
   15078 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   15079 
   15080 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15081 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   15082 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15083 
   15084 		CSR_WRITE(sc, WMREG_WUC, 0);
   15085 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   15086 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15087 
   15088 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15089 		    pmreg + PCI_PMCSR);
   15090 		pcireg |= PCI_PMCSR_STATE_D3;
   15091 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15092 		    pmreg + PCI_PMCSR, pcireg);
   15093 		delay(1000);
   15094 		pcireg &= ~PCI_PMCSR_STATE_D3;
   15095 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15096 		    pmreg + PCI_PMCSR, pcireg);
   15097 
   15098 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   15099 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15100 
   15101 		/* Restore WUC register */
   15102 		CSR_WRITE(sc, WMREG_WUC, wuc);
   15103 	}
   15104 
   15105 	/* Restore MDICNFG setting */
   15106 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   15107 	if (wa_done)
   15108 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   15109 }
   15110 
   15111 static void
   15112 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   15113 {
   15114 	uint32_t reg;
   15115 
   15116 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15117 		device_xname(sc->sc_dev), __func__));
   15118 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   15119 	    || (sc->sc_type == WM_T_PCH_CNP));
   15120 
   15121 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15122 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   15123 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15124 
   15125 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   15126 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   15127 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   15128 }
   15129