Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.789
      1 /*	$NetBSD: if_wm.c,v 1.789 2023/09/25 09:15:48 rin Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.789 2023/09/25 09:15:48 rin Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 
    464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	bool wmq_wq_enqueued;
    482 	struct work wmq_cookie;
    483 	void *wmq_si;
    484 };
    485 
    486 struct wm_phyop {
    487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    488 	void (*release)(struct wm_softc *);
    489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    490 	int (*writereg_locked)(device_t, int, int, uint16_t);
    491 	int reset_delay_us;
    492 	bool no_errprint;
    493 };
    494 
    495 struct wm_nvmop {
    496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    497 	void (*release)(struct wm_softc *);
    498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    499 };
    500 
    501 /*
    502  * Software state per device.
    503  */
    504 struct wm_softc {
    505 	device_t sc_dev;		/* generic device information */
    506 	bus_space_tag_t sc_st;		/* bus space tag */
    507 	bus_space_handle_t sc_sh;	/* bus space handle */
    508 	bus_size_t sc_ss;		/* bus space size */
    509 	bus_space_tag_t sc_iot;		/* I/O space tag */
    510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    511 	bus_size_t sc_ios;		/* I/O space size */
    512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    514 	bus_size_t sc_flashs;		/* flash registers space size */
    515 	off_t sc_flashreg_offset;	/*
    516 					 * offset to flash registers from
    517 					 * start of BAR
    518 					 */
    519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    520 
    521 	struct ethercom sc_ethercom;	/* Ethernet common data */
    522 	struct mii_data sc_mii;		/* MII/media information */
    523 
    524 	pci_chipset_tag_t sc_pc;
    525 	pcitag_t sc_pcitag;
    526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    528 
    529 	uint16_t sc_pcidevid;		/* PCI device ID */
    530 	wm_chip_type sc_type;		/* MAC type */
    531 	int sc_rev;			/* MAC revision */
    532 	wm_phy_type sc_phytype;		/* PHY type */
    533 	uint8_t sc_sfptype;		/* SFP type */
    534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    535 #define	WM_MEDIATYPE_UNKNOWN		0x00
    536 #define	WM_MEDIATYPE_FIBER		0x01
    537 #define	WM_MEDIATYPE_COPPER		0x02
    538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    540 	u_int sc_flags;			/* flags; see below */
    541 	u_short sc_if_flags;		/* last if_flags */
    542 	int sc_ec_capenable;		/* last ec_capenable */
    543 	int sc_flowflags;		/* 802.3x flow control flags */
    544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    545 	int sc_align_tweak;
    546 
    547 	void *sc_ihs[WM_MAX_NINTR];	/*
    548 					 * interrupt cookie.
    549 					 * - legacy and msi use sc_ihs[0] only
    550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    551 					 */
    552 	pci_intr_handle_t *sc_intrs;	/*
    553 					 * legacy and msi use sc_intrs[0] only
    554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    555 					 */
    556 	int sc_nintrs;			/* number of interrupts */
    557 
    558 	int sc_link_intr_idx;		/* index of MSI-X tables */
    559 
    560 	callout_t sc_tick_ch;		/* tick callout */
    561 	bool sc_core_stopping;
    562 
    563 	int sc_nvm_ver_major;
    564 	int sc_nvm_ver_minor;
    565 	int sc_nvm_ver_build;
    566 	int sc_nvm_addrbits;		/* NVM address bits */
    567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    568 	int sc_ich8_flash_base;
    569 	int sc_ich8_flash_bank_size;
    570 	int sc_nvm_k1_enabled;
    571 
    572 	int sc_nqueues;
    573 	struct wm_queue *sc_queue;
    574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    578 	struct workqueue *sc_queue_wq;
    579 	bool sc_txrx_use_workqueue;
    580 
    581 	int sc_affinity_offset;
    582 
    583 #ifdef WM_EVENT_COUNTERS
    584 	/* Event counters. */
    585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    586 
    587 	/* >= WM_T_82542_2_1 */
    588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    593 
    594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    599 	struct evcnt sc_ev_scc;		/* Single Collision */
    600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    602 	struct evcnt sc_ev_latecol;	/* Late Collision */
    603 	struct evcnt sc_ev_colc;	/* Collision */
    604 	struct evcnt sc_ev_cbtmpc;	/* Circuit Breaker Tx Mng. Packet */
    605 	struct evcnt sc_ev_dc;		/* Defer */
    606 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    607 	struct evcnt sc_ev_sec;		/* Sequence Error */
    608 
    609 	/* Old */
    610 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    611 	/* New */
    612 	struct evcnt sc_ev_htdpmc;	/* Host Tx Discarded Pkts by MAC */
    613 
    614 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    615 	struct evcnt sc_ev_cbrdpc;	/* Circuit Breaker Rx Dropped Packet */
    616 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    617 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    618 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    619 	struct evcnt sc_ev_prc511;	/* Packets Rx (256-511 bytes) */
    620 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    621 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    622 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    623 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    624 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    625 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    626 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    627 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    628 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    629 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    630 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    631 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    632 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    633 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    634 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    635 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    636 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    637 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    638 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    639 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    640 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    641 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    642 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    643 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    644 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    645 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    646 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    647 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx */
    648 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    649 
    650 	/* Old */
    651 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    652 	/* New */
    653 	struct evcnt sc_ev_cbrmpc;	/* Circuit Breaker Rx Mng. Packet */
    654 
    655 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    656 
    657 	/* Old */
    658 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    659 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    660 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    661 	struct evcnt sc_ev_ictxatc;	/* Intr. Cause Tx Abs Timer Expire */
    662 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    663 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    664 	/*
    665 	 * sc_ev_rxdmtc is shared with both "Intr. cause" and
    666 	 * non "Intr. cause" register.
    667 	 */
    668 	struct evcnt sc_ev_rxdmtc;	/* (Intr. Cause) Rx Desc Min Thresh */
    669 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    670 	/* New */
    671 	struct evcnt sc_ev_rpthc;	/* Rx Packets To Host */
    672 	struct evcnt sc_ev_debug1;	/* Debug Counter 1 */
    673 	struct evcnt sc_ev_debug2;	/* Debug Counter 2 */
    674 	struct evcnt sc_ev_debug3;	/* Debug Counter 3 */
    675 	struct evcnt sc_ev_hgptc;	/* Host Good Packets TX */
    676 	struct evcnt sc_ev_debug4;	/* Debug Counter 4 */
    677 	struct evcnt sc_ev_htcbdpc;	/* Host Tx Circuit Breaker Drp. Pkts */
    678 	struct evcnt sc_ev_hgorc;	/* Host Good Octets Rx */
    679 	struct evcnt sc_ev_hgotc;	/* Host Good Octets Tx */
    680 	struct evcnt sc_ev_lenerrs;	/* Length Error */
    681 	struct evcnt sc_ev_tlpic;	/* EEE Tx LPI */
    682 	struct evcnt sc_ev_rlpic;	/* EEE Rx LPI */
    683 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    684 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    685 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    686 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    687 	struct evcnt sc_ev_scvpc;	/* SerDes/SGMII Code Violation Pkt. */
    688 	struct evcnt sc_ev_hrmpc;	/* Header Redirection Missed Packet */
    689 #endif /* WM_EVENT_COUNTERS */
    690 
    691 	struct sysctllog *sc_sysctllog;
    692 
    693 	/* This variable are used only on the 82547. */
    694 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    695 
    696 	uint32_t sc_ctrl;		/* prototype CTRL register */
    697 #if 0
    698 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    699 #endif
    700 	uint32_t sc_icr;		/* prototype interrupt bits */
    701 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    702 	uint32_t sc_tctl;		/* prototype TCTL register */
    703 	uint32_t sc_rctl;		/* prototype RCTL register */
    704 	uint32_t sc_txcw;		/* prototype TXCW register */
    705 	uint32_t sc_tipg;		/* prototype TIPG register */
    706 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    707 	uint32_t sc_pba;		/* prototype PBA register */
    708 
    709 	int sc_tbi_linkup;		/* TBI link status */
    710 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    711 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    712 	struct timeval sc_linkup_delay_time; /* delay LINK_STATE_UP */
    713 
    714 	int sc_mchash_type;		/* multicast filter offset */
    715 
    716 	krndsource_t rnd_source;	/* random source */
    717 
    718 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    719 
    720 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    721 	kmutex_t *sc_ich_phymtx;	/*
    722 					 * 82574/82583/ICH/PCH specific PHY
    723 					 * mutex. For 82574/82583, the mutex
    724 					 * is used for both PHY and NVM.
    725 					 */
    726 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    727 
    728 	struct wm_phyop phy;
    729 	struct wm_nvmop nvm;
    730 
    731 	struct workqueue *sc_reset_wq;
    732 	struct work sc_reset_work;
    733 	volatile unsigned sc_reset_pending;
    734 
    735 	bool sc_dying;
    736 
    737 #ifdef WM_DEBUG
    738 	uint32_t sc_debug;
    739 	bool sc_trigger_reset;
    740 #endif
    741 };
    742 
    743 #define	WM_RXCHAIN_RESET(rxq)						\
    744 do {									\
    745 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    746 	*(rxq)->rxq_tailp = NULL;					\
    747 	(rxq)->rxq_len = 0;						\
    748 } while (/*CONSTCOND*/0)
    749 
    750 #define	WM_RXCHAIN_LINK(rxq, m)						\
    751 do {									\
    752 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    753 	(rxq)->rxq_tailp = &(m)->m_next;				\
    754 } while (/*CONSTCOND*/0)
    755 
    756 #ifdef WM_EVENT_COUNTERS
    757 #ifdef __HAVE_ATOMIC64_LOADSTORE
    758 #define	WM_EVCNT_INCR(ev)						\
    759 	atomic_store_relaxed(&((ev)->ev_count),				\
    760 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    761 #define	WM_EVCNT_STORE(ev, val)						\
    762 	atomic_store_relaxed(&((ev)->ev_count), (val))
    763 #define	WM_EVCNT_ADD(ev, val)						\
    764 	atomic_store_relaxed(&((ev)->ev_count),				\
    765 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    766 #else
    767 #define	WM_EVCNT_INCR(ev)						\
    768 	((ev)->ev_count)++
    769 #define	WM_EVCNT_STORE(ev, val)						\
    770 	((ev)->ev_count = (val))
    771 #define	WM_EVCNT_ADD(ev, val)						\
    772 	(ev)->ev_count += (val)
    773 #endif
    774 
    775 #define WM_Q_EVCNT_INCR(qname, evname)			\
    776 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    777 #define WM_Q_EVCNT_STORE(qname, evname, val)		\
    778 	WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val))
    779 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    780 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    781 #else /* !WM_EVENT_COUNTERS */
    782 #define	WM_EVCNT_INCR(ev)	__nothing
    783 #define	WM_EVCNT_STORE(ev, val)	__nothing
    784 #define	WM_EVCNT_ADD(ev, val)	__nothing
    785 
    786 #define WM_Q_EVCNT_INCR(qname, evname)		__nothing
    787 #define WM_Q_EVCNT_STORE(qname, evname, val)	__nothing
    788 #define WM_Q_EVCNT_ADD(qname, evname, val)	__nothing
    789 #endif /* !WM_EVENT_COUNTERS */
    790 
    791 #define	CSR_READ(sc, reg)						\
    792 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    793 #define	CSR_WRITE(sc, reg, val)						\
    794 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    795 #define	CSR_WRITE_FLUSH(sc)						\
    796 	(void)CSR_READ((sc), WMREG_STATUS)
    797 
    798 #define ICH8_FLASH_READ32(sc, reg)					\
    799 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    800 	    (reg) + sc->sc_flashreg_offset)
    801 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    802 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    803 	    (reg) + sc->sc_flashreg_offset, (data))
    804 
    805 #define ICH8_FLASH_READ16(sc, reg)					\
    806 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    807 	    (reg) + sc->sc_flashreg_offset)
    808 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    809 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    810 	    (reg) + sc->sc_flashreg_offset, (data))
    811 
    812 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    813 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    814 
    815 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    816 #define	WM_CDTXADDR_HI(txq, x)						\
    817 	(sizeof(bus_addr_t) == 8 ?					\
    818 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    819 
    820 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    821 #define	WM_CDRXADDR_HI(rxq, x)						\
    822 	(sizeof(bus_addr_t) == 8 ?					\
    823 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    824 
    825 /*
    826  * Register read/write functions.
    827  * Other than CSR_{READ|WRITE}().
    828  */
    829 #if 0
    830 static inline uint32_t wm_io_read(struct wm_softc *, int);
    831 #endif
    832 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    833 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    834     uint32_t, uint32_t);
    835 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    836 
    837 /*
    838  * Descriptor sync/init functions.
    839  */
    840 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    841 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    842 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    843 
    844 /*
    845  * Device driver interface functions and commonly used functions.
    846  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    847  */
    848 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    849 static int	wm_match(device_t, cfdata_t, void *);
    850 static void	wm_attach(device_t, device_t, void *);
    851 static int	wm_detach(device_t, int);
    852 static bool	wm_suspend(device_t, const pmf_qual_t *);
    853 static bool	wm_resume(device_t, const pmf_qual_t *);
    854 static bool	wm_watchdog(struct ifnet *);
    855 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    856     uint16_t *);
    857 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    858     uint16_t *);
    859 static void	wm_tick(void *);
    860 static int	wm_ifflags_cb(struct ethercom *);
    861 static int	wm_ioctl(struct ifnet *, u_long, void *);
    862 /* MAC address related */
    863 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    864 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    865 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    866 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    867 static int	wm_rar_count(struct wm_softc *);
    868 static void	wm_set_filter(struct wm_softc *);
    869 /* Reset and init related */
    870 static void	wm_set_vlan(struct wm_softc *);
    871 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    872 static void	wm_get_auto_rd_done(struct wm_softc *);
    873 static void	wm_lan_init_done(struct wm_softc *);
    874 static void	wm_get_cfg_done(struct wm_softc *);
    875 static int	wm_phy_post_reset(struct wm_softc *);
    876 static int	wm_write_smbus_addr(struct wm_softc *);
    877 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    878 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    879 static void	wm_initialize_hardware_bits(struct wm_softc *);
    880 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    881 static int	wm_reset_phy(struct wm_softc *);
    882 static void	wm_flush_desc_rings(struct wm_softc *);
    883 static void	wm_reset(struct wm_softc *);
    884 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    885 static void	wm_rxdrain(struct wm_rxqueue *);
    886 static void	wm_init_rss(struct wm_softc *);
    887 static void	wm_adjust_qnum(struct wm_softc *, int);
    888 static inline bool	wm_is_using_msix(struct wm_softc *);
    889 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    890 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    891 static int	wm_setup_legacy(struct wm_softc *);
    892 static int	wm_setup_msix(struct wm_softc *);
    893 static int	wm_init(struct ifnet *);
    894 static int	wm_init_locked(struct ifnet *);
    895 static void	wm_init_sysctls(struct wm_softc *);
    896 static void	wm_update_stats(struct wm_softc *);
    897 static void	wm_clear_evcnt(struct wm_softc *);
    898 static void	wm_unset_stopping_flags(struct wm_softc *);
    899 static void	wm_set_stopping_flags(struct wm_softc *);
    900 static void	wm_stop(struct ifnet *, int);
    901 static void	wm_stop_locked(struct ifnet *, bool, bool);
    902 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    903 static void	wm_82547_txfifo_stall(void *);
    904 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    905 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    906 /* DMA related */
    907 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    908 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    909 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    910 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    911     struct wm_txqueue *);
    912 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    913 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    914 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    915     struct wm_rxqueue *);
    916 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    917 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    918 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    919 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    920 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    921 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    922 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    923     struct wm_txqueue *);
    924 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    925     struct wm_rxqueue *);
    926 static int	wm_alloc_txrx_queues(struct wm_softc *);
    927 static void	wm_free_txrx_queues(struct wm_softc *);
    928 static int	wm_init_txrx_queues(struct wm_softc *);
    929 /* Start */
    930 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    931     struct wm_txsoft *, uint32_t *, uint8_t *);
    932 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    933 static void	wm_start(struct ifnet *);
    934 static void	wm_start_locked(struct ifnet *);
    935 static int	wm_transmit(struct ifnet *, struct mbuf *);
    936 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    937 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    938     bool);
    939 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    940     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    941 static void	wm_nq_start(struct ifnet *);
    942 static void	wm_nq_start_locked(struct ifnet *);
    943 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    944 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    945 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    946     bool);
    947 static void	wm_deferred_start_locked(struct wm_txqueue *);
    948 static void	wm_handle_queue(void *);
    949 static void	wm_handle_queue_work(struct work *, void *);
    950 static void	wm_handle_reset_work(struct work *, void *);
    951 /* Interrupt */
    952 static bool	wm_txeof(struct wm_txqueue *, u_int);
    953 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    954 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    955 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    956 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    957 static void	wm_linkintr(struct wm_softc *, uint32_t);
    958 static int	wm_intr_legacy(void *);
    959 static inline void	wm_txrxintr_disable(struct wm_queue *);
    960 static inline void	wm_txrxintr_enable(struct wm_queue *);
    961 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    962 static int	wm_txrxintr_msix(void *);
    963 static int	wm_linkintr_msix(void *);
    964 
    965 /*
    966  * Media related.
    967  * GMII, SGMII, TBI, SERDES and SFP.
    968  */
    969 /* Common */
    970 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    971 /* GMII related */
    972 static void	wm_gmii_reset(struct wm_softc *);
    973 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    974 static int	wm_get_phy_id_82575(struct wm_softc *);
    975 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    976 static int	wm_gmii_mediachange(struct ifnet *);
    977 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    978 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    979 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    980 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    981 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    982 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    983 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    984 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    985 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    986 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    987 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    988 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    989 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    990 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    991 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    992 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    993 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    994 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    995 	bool);
    996 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    997 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    998 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    999 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
   1000 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
   1001 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
   1002 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
   1003 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
   1004 static void	wm_gmii_statchg(struct ifnet *);
   1005 /*
   1006  * kumeran related (80003, ICH* and PCH*).
   1007  * These functions are not for accessing MII registers but for accessing
   1008  * kumeran specific registers.
   1009  */
   1010 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
   1011 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
   1012 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
   1013 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
   1014 /* EMI register related */
   1015 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
   1016 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
   1017 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
   1018 /* SGMII */
   1019 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
   1020 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
   1021 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
   1022 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
   1023 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
   1024 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
   1025 /* TBI related */
   1026 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
   1027 static void	wm_tbi_mediainit(struct wm_softc *);
   1028 static int	wm_tbi_mediachange(struct ifnet *);
   1029 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
   1030 static int	wm_check_for_link(struct wm_softc *);
   1031 static void	wm_tbi_tick(struct wm_softc *);
   1032 /* SERDES related */
   1033 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1034 static int	wm_serdes_mediachange(struct ifnet *);
   1035 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1036 static void	wm_serdes_tick(struct wm_softc *);
   1037 /* SFP related */
   1038 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1039 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1040 
   1041 /*
   1042  * NVM related.
   1043  * Microwire, SPI (w/wo EERD) and Flash.
   1044  */
   1045 /* Misc functions */
   1046 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1047 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1048 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1049 /* Microwire */
   1050 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1051 /* SPI */
   1052 static int	wm_nvm_ready_spi(struct wm_softc *);
   1053 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1054 /* Using with EERD */
   1055 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1056 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1057 /* Flash */
   1058 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1059     unsigned int *);
   1060 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1061 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1062 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1063     uint32_t *);
   1064 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1065 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1066 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1067 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1068 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1069 /* iNVM */
   1070 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1071 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1072 /* Lock, detecting NVM type, validate checksum and read */
   1073 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1074 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1075 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1076 static void	wm_nvm_version_invm(struct wm_softc *);
   1077 static void	wm_nvm_version(struct wm_softc *);
   1078 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1079 
   1080 /*
   1081  * Hardware semaphores.
   1082  * Very complexed...
   1083  */
   1084 static int	wm_get_null(struct wm_softc *);
   1085 static void	wm_put_null(struct wm_softc *);
   1086 static int	wm_get_eecd(struct wm_softc *);
   1087 static void	wm_put_eecd(struct wm_softc *);
   1088 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1089 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1090 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1091 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1092 static int	wm_get_nvm_80003(struct wm_softc *);
   1093 static void	wm_put_nvm_80003(struct wm_softc *);
   1094 static int	wm_get_nvm_82571(struct wm_softc *);
   1095 static void	wm_put_nvm_82571(struct wm_softc *);
   1096 static int	wm_get_phy_82575(struct wm_softc *);
   1097 static void	wm_put_phy_82575(struct wm_softc *);
   1098 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1099 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1100 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1101 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1102 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1103 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1104 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1105 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1106 
   1107 /*
   1108  * Management mode and power management related subroutines.
   1109  * BMC, AMT, suspend/resume and EEE.
   1110  */
   1111 #if 0
   1112 static int	wm_check_mng_mode(struct wm_softc *);
   1113 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1114 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1115 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1116 #endif
   1117 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1118 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1119 static void	wm_get_hw_control(struct wm_softc *);
   1120 static void	wm_release_hw_control(struct wm_softc *);
   1121 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1122 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1123 static void	wm_init_manageability(struct wm_softc *);
   1124 static void	wm_release_manageability(struct wm_softc *);
   1125 static void	wm_get_wakeup(struct wm_softc *);
   1126 static int	wm_ulp_disable(struct wm_softc *);
   1127 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1128 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1129 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1130 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1131 static void	wm_enable_wakeup(struct wm_softc *);
   1132 static void	wm_disable_aspm(struct wm_softc *);
   1133 /* LPLU (Low Power Link Up) */
   1134 static void	wm_lplu_d0_disable(struct wm_softc *);
   1135 /* EEE */
   1136 static int	wm_set_eee_i350(struct wm_softc *);
   1137 static int	wm_set_eee_pchlan(struct wm_softc *);
   1138 static int	wm_set_eee(struct wm_softc *);
   1139 
   1140 /*
   1141  * Workarounds (mainly PHY related).
   1142  * Basically, PHY's workarounds are in the PHY drivers.
   1143  */
   1144 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1145 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1146 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1147 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1148 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1149 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1150 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1151 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1152 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1153 static int	wm_k1_workaround_lv(struct wm_softc *);
   1154 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1155 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1156 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1157 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1158 static void	wm_reset_init_script_82575(struct wm_softc *);
   1159 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1160 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1161 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1162 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1163 static int	wm_pll_workaround_i210(struct wm_softc *);
   1164 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1165 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1166 static void	wm_set_linkdown_discard(struct wm_softc *);
   1167 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1168 
   1169 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1170 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1171 #ifdef WM_DEBUG
   1172 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1173 #endif
   1174 
   1175 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1176     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1177 
   1178 /*
   1179  * Devices supported by this driver.
   1180  */
   1181 static const struct wm_product {
   1182 	pci_vendor_id_t		wmp_vendor;
   1183 	pci_product_id_t	wmp_product;
   1184 	const char		*wmp_name;
   1185 	wm_chip_type		wmp_type;
   1186 	uint32_t		wmp_flags;
   1187 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1188 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1189 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1190 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1191 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1192 } wm_products[] = {
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1194 	  "Intel i82542 1000BASE-X Ethernet",
   1195 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1198 	  "Intel i82543GC 1000BASE-X Ethernet",
   1199 	  WM_T_82543,		WMP_F_FIBER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1202 	  "Intel i82543GC 1000BASE-T Ethernet",
   1203 	  WM_T_82543,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1206 	  "Intel i82544EI 1000BASE-T Ethernet",
   1207 	  WM_T_82544,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1210 	  "Intel i82544EI 1000BASE-X Ethernet",
   1211 	  WM_T_82544,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1214 	  "Intel i82544GC 1000BASE-T Ethernet",
   1215 	  WM_T_82544,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1218 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1219 	  WM_T_82544,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1222 	  "Intel i82540EM 1000BASE-T Ethernet",
   1223 	  WM_T_82540,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1226 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1227 	  WM_T_82540,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1230 	  "Intel i82540EP 1000BASE-T Ethernet",
   1231 	  WM_T_82540,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1234 	  "Intel i82540EP 1000BASE-T Ethernet",
   1235 	  WM_T_82540,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1238 	  "Intel i82540EP 1000BASE-T Ethernet",
   1239 	  WM_T_82540,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1242 	  "Intel i82545EM 1000BASE-T Ethernet",
   1243 	  WM_T_82545,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1246 	  "Intel i82545GM 1000BASE-T Ethernet",
   1247 	  WM_T_82545_3,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1250 	  "Intel i82545GM 1000BASE-X Ethernet",
   1251 	  WM_T_82545_3,		WMP_F_FIBER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1254 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1255 	  WM_T_82545_3,		WMP_F_SERDES },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1258 	  "Intel i82546EB 1000BASE-T Ethernet",
   1259 	  WM_T_82546,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1262 	  "Intel i82546EB 1000BASE-T Ethernet",
   1263 	  WM_T_82546,		WMP_F_COPPER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1266 	  "Intel i82545EM 1000BASE-X Ethernet",
   1267 	  WM_T_82545,		WMP_F_FIBER },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1270 	  "Intel i82546EB 1000BASE-X Ethernet",
   1271 	  WM_T_82546,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1274 	  "Intel i82546GB 1000BASE-T Ethernet",
   1275 	  WM_T_82546_3,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1278 	  "Intel i82546GB 1000BASE-X Ethernet",
   1279 	  WM_T_82546_3,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1282 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82546_3,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1286 	  "i82546GB quad-port Gigabit Ethernet",
   1287 	  WM_T_82546_3,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1290 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1291 	  WM_T_82546_3,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1294 	  "Intel PRO/1000MT (82546GB)",
   1295 	  WM_T_82546_3,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1298 	  "Intel i82541EI 1000BASE-T Ethernet",
   1299 	  WM_T_82541,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1302 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1303 	  WM_T_82541,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1306 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1307 	  WM_T_82541,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1310 	  "Intel i82541ER 1000BASE-T Ethernet",
   1311 	  WM_T_82541_2,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1314 	  "Intel i82541GI 1000BASE-T Ethernet",
   1315 	  WM_T_82541_2,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1318 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1319 	  WM_T_82541_2,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1322 	  "Intel i82541PI 1000BASE-T Ethernet",
   1323 	  WM_T_82541_2,		WMP_F_COPPER },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1326 	  "Intel i82547EI 1000BASE-T Ethernet",
   1327 	  WM_T_82547,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1330 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1331 	  WM_T_82547,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1334 	  "Intel i82547GI 1000BASE-T Ethernet",
   1335 	  WM_T_82547_2,		WMP_F_COPPER },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1338 	  "Intel PRO/1000 PT (82571EB)",
   1339 	  WM_T_82571,		WMP_F_COPPER },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1342 	  "Intel PRO/1000 PF (82571EB)",
   1343 	  WM_T_82571,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1346 	  "Intel PRO/1000 PB (82571EB)",
   1347 	  WM_T_82571,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1350 	  "Intel PRO/1000 QT (82571EB)",
   1351 	  WM_T_82571,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1354 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1355 	  WM_T_82571,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1358 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1359 	  WM_T_82571,		WMP_F_COPPER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1362 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1363 	  WM_T_82571,		WMP_F_SERDES },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1366 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1367 	  WM_T_82571,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1370 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1371 	  WM_T_82571,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1374 	  "Intel i82572EI 1000baseT Ethernet",
   1375 	  WM_T_82572,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1378 	  "Intel i82572EI 1000baseX Ethernet",
   1379 	  WM_T_82572,		WMP_F_FIBER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1382 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1383 	  WM_T_82572,		WMP_F_SERDES },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1386 	  "Intel i82572EI 1000baseT Ethernet",
   1387 	  WM_T_82572,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1390 	  "Intel i82573E",
   1391 	  WM_T_82573,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1394 	  "Intel i82573E IAMT",
   1395 	  WM_T_82573,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1398 	  "Intel i82573L Gigabit Ethernet",
   1399 	  WM_T_82573,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1402 	  "Intel i82574L",
   1403 	  WM_T_82574,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1406 	  "Intel i82574L",
   1407 	  WM_T_82574,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1410 	  "Intel i82583V",
   1411 	  WM_T_82583,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1414 	  "i80003 dual 1000baseT Ethernet",
   1415 	  WM_T_80003,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1418 	  "i80003 dual 1000baseX Ethernet",
   1419 	  WM_T_80003,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1422 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1423 	  WM_T_80003,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1426 	  "Intel i80003 1000baseT Ethernet",
   1427 	  WM_T_80003,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1430 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1431 	  WM_T_80003,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1434 	  "Intel i82801H (M_AMT) LAN Controller",
   1435 	  WM_T_ICH8,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1437 	  "Intel i82801H (AMT) LAN Controller",
   1438 	  WM_T_ICH8,		WMP_F_COPPER },
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1440 	  "Intel i82801H LAN Controller",
   1441 	  WM_T_ICH8,		WMP_F_COPPER },
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1443 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1444 	  WM_T_ICH8,		WMP_F_COPPER },
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1446 	  "Intel i82801H (M) LAN Controller",
   1447 	  WM_T_ICH8,		WMP_F_COPPER },
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1449 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1450 	  WM_T_ICH8,		WMP_F_COPPER },
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1452 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1453 	  WM_T_ICH8,		WMP_F_COPPER },
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1455 	  "82567V-3 LAN Controller",
   1456 	  WM_T_ICH8,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1458 	  "82801I (AMT) LAN Controller",
   1459 	  WM_T_ICH9,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1461 	  "82801I 10/100 LAN Controller",
   1462 	  WM_T_ICH9,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1464 	  "82801I (G) 10/100 LAN Controller",
   1465 	  WM_T_ICH9,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1467 	  "82801I (GT) 10/100 LAN Controller",
   1468 	  WM_T_ICH9,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1470 	  "82801I (C) LAN Controller",
   1471 	  WM_T_ICH9,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1473 	  "82801I mobile LAN Controller",
   1474 	  WM_T_ICH9,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1476 	  "82801I mobile (V) LAN Controller",
   1477 	  WM_T_ICH9,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1479 	  "82801I mobile (AMT) LAN Controller",
   1480 	  WM_T_ICH9,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1482 	  "82567LM-4 LAN Controller",
   1483 	  WM_T_ICH9,		WMP_F_COPPER },
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1485 	  "82567LM-2 LAN Controller",
   1486 	  WM_T_ICH10,		WMP_F_COPPER },
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1488 	  "82567LF-2 LAN Controller",
   1489 	  WM_T_ICH10,		WMP_F_COPPER },
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1491 	  "82567LM-3 LAN Controller",
   1492 	  WM_T_ICH10,		WMP_F_COPPER },
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1494 	  "82567LF-3 LAN Controller",
   1495 	  WM_T_ICH10,		WMP_F_COPPER },
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1497 	  "82567V-2 LAN Controller",
   1498 	  WM_T_ICH10,		WMP_F_COPPER },
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1500 	  "82567V-3? LAN Controller",
   1501 	  WM_T_ICH10,		WMP_F_COPPER },
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1503 	  "HANKSVILLE LAN Controller",
   1504 	  WM_T_ICH10,		WMP_F_COPPER },
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1506 	  "PCH LAN (82577LM) Controller",
   1507 	  WM_T_PCH,		WMP_F_COPPER },
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1509 	  "PCH LAN (82577LC) Controller",
   1510 	  WM_T_PCH,		WMP_F_COPPER },
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1512 	  "PCH LAN (82578DM) Controller",
   1513 	  WM_T_PCH,		WMP_F_COPPER },
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1515 	  "PCH LAN (82578DC) Controller",
   1516 	  WM_T_PCH,		WMP_F_COPPER },
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1518 	  "PCH2 LAN (82579LM) Controller",
   1519 	  WM_T_PCH2,		WMP_F_COPPER },
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1521 	  "PCH2 LAN (82579V) Controller",
   1522 	  WM_T_PCH2,		WMP_F_COPPER },
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1524 	  "82575EB dual-1000baseT Ethernet",
   1525 	  WM_T_82575,		WMP_F_COPPER },
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1527 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1528 	  WM_T_82575,		WMP_F_SERDES },
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1530 	  "82575GB quad-1000baseT Ethernet",
   1531 	  WM_T_82575,		WMP_F_COPPER },
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1533 	  "82575GB quad-1000baseT Ethernet (PM)",
   1534 	  WM_T_82575,		WMP_F_COPPER },
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1536 	  "82576 1000BaseT Ethernet",
   1537 	  WM_T_82576,		WMP_F_COPPER },
   1538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1539 	  "82576 1000BaseX Ethernet",
   1540 	  WM_T_82576,		WMP_F_FIBER },
   1541 
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1543 	  "82576 gigabit Ethernet (SERDES)",
   1544 	  WM_T_82576,		WMP_F_SERDES },
   1545 
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1547 	  "82576 quad-1000BaseT Ethernet",
   1548 	  WM_T_82576,		WMP_F_COPPER },
   1549 
   1550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1551 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1552 	  WM_T_82576,		WMP_F_COPPER },
   1553 
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1555 	  "82576 gigabit Ethernet",
   1556 	  WM_T_82576,		WMP_F_COPPER },
   1557 
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1559 	  "82576 gigabit Ethernet (SERDES)",
   1560 	  WM_T_82576,		WMP_F_SERDES },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1562 	  "82576 quad-gigabit Ethernet (SERDES)",
   1563 	  WM_T_82576,		WMP_F_SERDES },
   1564 
   1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1566 	  "82580 1000BaseT Ethernet",
   1567 	  WM_T_82580,		WMP_F_COPPER },
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1569 	  "82580 1000BaseX Ethernet",
   1570 	  WM_T_82580,		WMP_F_FIBER },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1573 	  "82580 1000BaseT Ethernet (SERDES)",
   1574 	  WM_T_82580,		WMP_F_SERDES },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1577 	  "82580 gigabit Ethernet (SGMII)",
   1578 	  WM_T_82580,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1580 	  "82580 dual-1000BaseT Ethernet",
   1581 	  WM_T_82580,		WMP_F_COPPER },
   1582 
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1584 	  "82580 quad-1000BaseX Ethernet",
   1585 	  WM_T_82580,		WMP_F_FIBER },
   1586 
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1588 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1589 	  WM_T_82580,		WMP_F_COPPER },
   1590 
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1592 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1593 	  WM_T_82580,		WMP_F_SERDES },
   1594 
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1596 	  "DH89XXCC 1000BASE-KX Ethernet",
   1597 	  WM_T_82580,		WMP_F_SERDES },
   1598 
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1600 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1601 	  WM_T_82580,		WMP_F_SERDES },
   1602 
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1604 	  "I350 Gigabit Network Connection",
   1605 	  WM_T_I350,		WMP_F_COPPER },
   1606 
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1608 	  "I350 Gigabit Fiber Network Connection",
   1609 	  WM_T_I350,		WMP_F_FIBER },
   1610 
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1612 	  "I350 Gigabit Backplane Connection",
   1613 	  WM_T_I350,		WMP_F_SERDES },
   1614 
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1616 	  "I350 Quad Port Gigabit Ethernet",
   1617 	  WM_T_I350,		WMP_F_SERDES },
   1618 
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1620 	  "I350 Gigabit Connection",
   1621 	  WM_T_I350,		WMP_F_COPPER },
   1622 
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1624 	  "I354 Gigabit Ethernet (KX)",
   1625 	  WM_T_I354,		WMP_F_SERDES },
   1626 
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1628 	  "I354 Gigabit Ethernet (SGMII)",
   1629 	  WM_T_I354,		WMP_F_COPPER },
   1630 
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1632 	  "I354 Gigabit Ethernet (2.5G)",
   1633 	  WM_T_I354,		WMP_F_COPPER },
   1634 
   1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1636 	  "I210-T1 Ethernet Server Adapter",
   1637 	  WM_T_I210,		WMP_F_COPPER },
   1638 
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1640 	  "I210 Ethernet (Copper OEM)",
   1641 	  WM_T_I210,		WMP_F_COPPER },
   1642 
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1644 	  "I210 Ethernet (Copper IT)",
   1645 	  WM_T_I210,		WMP_F_COPPER },
   1646 
   1647 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1648 	  "I210 Ethernet (Copper, FLASH less)",
   1649 	  WM_T_I210,		WMP_F_COPPER },
   1650 
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1652 	  "I210 Gigabit Ethernet (Fiber)",
   1653 	  WM_T_I210,		WMP_F_FIBER },
   1654 
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1656 	  "I210 Gigabit Ethernet (SERDES)",
   1657 	  WM_T_I210,		WMP_F_SERDES },
   1658 
   1659 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1660 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1661 	  WM_T_I210,		WMP_F_SERDES },
   1662 
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1664 	  "I210 Gigabit Ethernet (SGMII)",
   1665 	  WM_T_I210,		WMP_F_COPPER },
   1666 
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1668 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1669 	  WM_T_I210,		WMP_F_COPPER },
   1670 
   1671 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1672 	  "I211 Ethernet (COPPER)",
   1673 	  WM_T_I211,		WMP_F_COPPER },
   1674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1675 	  "I217 V Ethernet Connection",
   1676 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1678 	  "I217 LM Ethernet Connection",
   1679 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1681 	  "I218 V Ethernet Connection",
   1682 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1683 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1684 	  "I218 V Ethernet Connection",
   1685 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1687 	  "I218 V Ethernet Connection",
   1688 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1690 	  "I218 LM Ethernet Connection",
   1691 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1693 	  "I218 LM Ethernet Connection",
   1694 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1695 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1696 	  "I218 LM Ethernet Connection",
   1697 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1699 	  "I219 LM Ethernet Connection",
   1700 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1702 	  "I219 LM (2) Ethernet Connection",
   1703 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1705 	  "I219 LM (3) Ethernet Connection",
   1706 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1707 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1708 	  "I219 LM (4) Ethernet Connection",
   1709 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1711 	  "I219 LM (5) Ethernet Connection",
   1712 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1714 	  "I219 LM (6) Ethernet Connection",
   1715 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1717 	  "I219 LM (7) Ethernet Connection",
   1718 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1719 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1720 	  "I219 LM (8) Ethernet Connection",
   1721 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1723 	  "I219 LM (9) Ethernet Connection",
   1724 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1726 	  "I219 LM (10) Ethernet Connection",
   1727 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1729 	  "I219 LM (11) Ethernet Connection",
   1730 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1731 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1732 	  "I219 LM (12) Ethernet Connection",
   1733 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1735 	  "I219 LM (13) Ethernet Connection",
   1736 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1738 	  "I219 LM (14) Ethernet Connection",
   1739 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1741 	  "I219 LM (15) Ethernet Connection",
   1742 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1743 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1744 	  "I219 LM (16) Ethernet Connection",
   1745 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1747 	  "I219 LM (17) Ethernet Connection",
   1748 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1750 	  "I219 LM (18) Ethernet Connection",
   1751 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1753 	  "I219 LM (19) Ethernet Connection",
   1754 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1755 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1756 	  "I219 V Ethernet Connection",
   1757 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1759 	  "I219 V (2) Ethernet Connection",
   1760 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1762 	  "I219 V (4) Ethernet Connection",
   1763 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1765 	  "I219 V (5) Ethernet Connection",
   1766 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1767 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1768 	  "I219 V (6) Ethernet Connection",
   1769 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1771 	  "I219 V (7) Ethernet Connection",
   1772 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1774 	  "I219 V (8) Ethernet Connection",
   1775 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1777 	  "I219 V (9) Ethernet Connection",
   1778 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1779 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1780 	  "I219 V (10) Ethernet Connection",
   1781 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1783 	  "I219 V (11) Ethernet Connection",
   1784 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1786 	  "I219 V (12) Ethernet Connection",
   1787 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1789 	  "I219 V (13) Ethernet Connection",
   1790 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1791 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1792 	  "I219 V (14) Ethernet Connection",
   1793 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1795 	  "I219 V (15) Ethernet Connection",
   1796 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1798 	  "I219 V (16) Ethernet Connection",
   1799 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1801 	  "I219 V (17) Ethernet Connection",
   1802 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1804 	  "I219 V (18) Ethernet Connection",
   1805 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1807 	  "I219 V (19) Ethernet Connection",
   1808 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1809 	{ 0,			0,
   1810 	  NULL,
   1811 	  0,			0 },
   1812 };
   1813 
   1814 /*
   1815  * Register read/write functions.
   1816  * Other than CSR_{READ|WRITE}().
   1817  */
   1818 
   1819 #if 0 /* Not currently used */
   1820 static inline uint32_t
   1821 wm_io_read(struct wm_softc *sc, int reg)
   1822 {
   1823 
   1824 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1825 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1826 }
   1827 #endif
   1828 
   1829 static inline void
   1830 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1831 {
   1832 
   1833 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1834 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1835 }
   1836 
   1837 static inline void
   1838 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1839     uint32_t data)
   1840 {
   1841 	uint32_t regval;
   1842 	int i;
   1843 
   1844 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1845 
   1846 	CSR_WRITE(sc, reg, regval);
   1847 
   1848 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1849 		delay(5);
   1850 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1851 			break;
   1852 	}
   1853 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1854 		aprint_error("%s: WARNING:"
   1855 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1856 		    device_xname(sc->sc_dev), reg);
   1857 	}
   1858 }
   1859 
   1860 static inline void
   1861 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1862 {
   1863 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1864 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1865 }
   1866 
   1867 /*
   1868  * Descriptor sync/init functions.
   1869  */
   1870 static inline void
   1871 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1872 {
   1873 	struct wm_softc *sc = txq->txq_sc;
   1874 
   1875 	/* If it will wrap around, sync to the end of the ring. */
   1876 	if ((start + num) > WM_NTXDESC(txq)) {
   1877 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1878 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1879 		    (WM_NTXDESC(txq) - start), ops);
   1880 		num -= (WM_NTXDESC(txq) - start);
   1881 		start = 0;
   1882 	}
   1883 
   1884 	/* Now sync whatever is left. */
   1885 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1886 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1887 }
   1888 
   1889 static inline void
   1890 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1891 {
   1892 	struct wm_softc *sc = rxq->rxq_sc;
   1893 
   1894 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1895 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1896 }
   1897 
   1898 static inline void
   1899 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1900 {
   1901 	struct wm_softc *sc = rxq->rxq_sc;
   1902 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1903 	struct mbuf *m = rxs->rxs_mbuf;
   1904 
   1905 	/*
   1906 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1907 	 * so that the payload after the Ethernet header is aligned
   1908 	 * to a 4-byte boundary.
   1909 
   1910 	 * XXX BRAINDAMAGE ALERT!
   1911 	 * The stupid chip uses the same size for every buffer, which
   1912 	 * is set in the Receive Control register.  We are using the 2K
   1913 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1914 	 * reason, we can't "scoot" packets longer than the standard
   1915 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1916 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1917 	 * the upper layer copy the headers.
   1918 	 */
   1919 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1920 
   1921 	if (sc->sc_type == WM_T_82574) {
   1922 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1923 		rxd->erx_data.erxd_addr =
   1924 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1925 		rxd->erx_data.erxd_dd = 0;
   1926 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1927 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1928 
   1929 		rxd->nqrx_data.nrxd_paddr =
   1930 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1931 		/* Currently, split header is not supported. */
   1932 		rxd->nqrx_data.nrxd_haddr = 0;
   1933 	} else {
   1934 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1935 
   1936 		wm_set_dma_addr(&rxd->wrx_addr,
   1937 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1938 		rxd->wrx_len = 0;
   1939 		rxd->wrx_cksum = 0;
   1940 		rxd->wrx_status = 0;
   1941 		rxd->wrx_errors = 0;
   1942 		rxd->wrx_special = 0;
   1943 	}
   1944 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1945 
   1946 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1947 }
   1948 
   1949 /*
   1950  * Device driver interface functions and commonly used functions.
   1951  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1952  */
   1953 
   1954 /* Lookup supported device table */
   1955 static const struct wm_product *
   1956 wm_lookup(const struct pci_attach_args *pa)
   1957 {
   1958 	const struct wm_product *wmp;
   1959 
   1960 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1961 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1962 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1963 			return wmp;
   1964 	}
   1965 	return NULL;
   1966 }
   1967 
   1968 /* The match function (ca_match) */
   1969 static int
   1970 wm_match(device_t parent, cfdata_t cf, void *aux)
   1971 {
   1972 	struct pci_attach_args *pa = aux;
   1973 
   1974 	if (wm_lookup(pa) != NULL)
   1975 		return 1;
   1976 
   1977 	return 0;
   1978 }
   1979 
   1980 /* The attach function (ca_attach) */
   1981 static void
   1982 wm_attach(device_t parent, device_t self, void *aux)
   1983 {
   1984 	struct wm_softc *sc = device_private(self);
   1985 	struct pci_attach_args *pa = aux;
   1986 	prop_dictionary_t dict;
   1987 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1988 	pci_chipset_tag_t pc = pa->pa_pc;
   1989 	int counts[PCI_INTR_TYPE_SIZE];
   1990 	pci_intr_type_t max_type;
   1991 	const char *eetype, *xname;
   1992 	bus_space_tag_t memt;
   1993 	bus_space_handle_t memh;
   1994 	bus_size_t memsize;
   1995 	int memh_valid;
   1996 	int i, error;
   1997 	const struct wm_product *wmp;
   1998 	prop_data_t ea;
   1999 	prop_number_t pn;
   2000 	uint8_t enaddr[ETHER_ADDR_LEN];
   2001 	char buf[256];
   2002 	char wqname[MAXCOMLEN];
   2003 	uint16_t cfg1, cfg2, swdpin, nvmword;
   2004 	pcireg_t preg, memtype;
   2005 	uint16_t eeprom_data, apme_mask;
   2006 	bool force_clear_smbi;
   2007 	uint32_t link_mode;
   2008 	uint32_t reg;
   2009 
   2010 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   2011 	sc->sc_debug = WM_DEBUG_DEFAULT;
   2012 #endif
   2013 	sc->sc_dev = self;
   2014 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   2015 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   2016 	sc->sc_core_stopping = false;
   2017 
   2018 	wmp = wm_lookup(pa);
   2019 #ifdef DIAGNOSTIC
   2020 	if (wmp == NULL) {
   2021 		printf("\n");
   2022 		panic("wm_attach: impossible");
   2023 	}
   2024 #endif
   2025 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   2026 
   2027 	sc->sc_pc = pa->pa_pc;
   2028 	sc->sc_pcitag = pa->pa_tag;
   2029 
   2030 	if (pci_dma64_available(pa)) {
   2031 		aprint_verbose(", 64-bit DMA");
   2032 		sc->sc_dmat = pa->pa_dmat64;
   2033 	} else {
   2034 		aprint_verbose(", 32-bit DMA");
   2035 		sc->sc_dmat = pa->pa_dmat;
   2036 	}
   2037 
   2038 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2039 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2040 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2041 
   2042 	sc->sc_type = wmp->wmp_type;
   2043 
   2044 	/* Set default function pointers */
   2045 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2046 	sc->phy.release = sc->nvm.release = wm_put_null;
   2047 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2048 
   2049 	if (sc->sc_type < WM_T_82543) {
   2050 		if (sc->sc_rev < 2) {
   2051 			aprint_error_dev(sc->sc_dev,
   2052 			    "i82542 must be at least rev. 2\n");
   2053 			return;
   2054 		}
   2055 		if (sc->sc_rev < 3)
   2056 			sc->sc_type = WM_T_82542_2_0;
   2057 	}
   2058 
   2059 	/*
   2060 	 * Disable MSI for Errata:
   2061 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2062 	 *
   2063 	 *  82544: Errata 25
   2064 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2065 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2066 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2067 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2068 	 *
   2069 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2070 	 *
   2071 	 *  82571 & 82572: Errata 63
   2072 	 */
   2073 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2074 	    || (sc->sc_type == WM_T_82572))
   2075 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2076 
   2077 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2078 	    || (sc->sc_type == WM_T_82580)
   2079 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2080 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2081 		sc->sc_flags |= WM_F_NEWQUEUE;
   2082 
   2083 	/* Set device properties (mactype) */
   2084 	dict = device_properties(sc->sc_dev);
   2085 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2086 
   2087 	/*
   2088 	 * Map the device.  All devices support memory-mapped acccess,
   2089 	 * and it is really required for normal operation.
   2090 	 */
   2091 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2092 	switch (memtype) {
   2093 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2094 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2095 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2096 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2097 		break;
   2098 	default:
   2099 		memh_valid = 0;
   2100 		break;
   2101 	}
   2102 
   2103 	if (memh_valid) {
   2104 		sc->sc_st = memt;
   2105 		sc->sc_sh = memh;
   2106 		sc->sc_ss = memsize;
   2107 	} else {
   2108 		aprint_error_dev(sc->sc_dev,
   2109 		    "unable to map device registers\n");
   2110 		return;
   2111 	}
   2112 
   2113 	/*
   2114 	 * In addition, i82544 and later support I/O mapped indirect
   2115 	 * register access.  It is not desirable (nor supported in
   2116 	 * this driver) to use it for normal operation, though it is
   2117 	 * required to work around bugs in some chip versions.
   2118 	 */
   2119 	switch (sc->sc_type) {
   2120 	case WM_T_82544:
   2121 	case WM_T_82541:
   2122 	case WM_T_82541_2:
   2123 	case WM_T_82547:
   2124 	case WM_T_82547_2:
   2125 		/* First we have to find the I/O BAR. */
   2126 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2127 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2128 			if (memtype == PCI_MAPREG_TYPE_IO)
   2129 				break;
   2130 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2131 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2132 				i += 4;	/* skip high bits, too */
   2133 		}
   2134 		if (i < PCI_MAPREG_END) {
   2135 			/*
   2136 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2137 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2138 			 * It's no problem because newer chips has no this
   2139 			 * bug.
   2140 			 *
   2141 			 * The i8254x doesn't apparently respond when the
   2142 			 * I/O BAR is 0, which looks somewhat like it's not
   2143 			 * been configured.
   2144 			 */
   2145 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2146 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2147 				aprint_error_dev(sc->sc_dev,
   2148 				    "WARNING: I/O BAR at zero.\n");
   2149 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2150 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2151 			    == 0) {
   2152 				sc->sc_flags |= WM_F_IOH_VALID;
   2153 			} else
   2154 				aprint_error_dev(sc->sc_dev,
   2155 				    "WARNING: unable to map I/O space\n");
   2156 		}
   2157 		break;
   2158 	default:
   2159 		break;
   2160 	}
   2161 
   2162 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2163 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2164 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2165 	if (sc->sc_type < WM_T_82542_2_1)
   2166 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2167 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2168 
   2169 	/* Power up chip */
   2170 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2171 	    && error != EOPNOTSUPP) {
   2172 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2173 		return;
   2174 	}
   2175 
   2176 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2177 	/*
   2178 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2179 	 * resource.
   2180 	 */
   2181 	if (sc->sc_nqueues > 1) {
   2182 		max_type = PCI_INTR_TYPE_MSIX;
   2183 		/*
   2184 		 *  82583 has a MSI-X capability in the PCI configuration space
   2185 		 * but it doesn't support it. At least the document doesn't
   2186 		 * say anything about MSI-X.
   2187 		 */
   2188 		counts[PCI_INTR_TYPE_MSIX]
   2189 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2190 	} else {
   2191 		max_type = PCI_INTR_TYPE_MSI;
   2192 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2193 	}
   2194 
   2195 	/* Allocation settings */
   2196 	counts[PCI_INTR_TYPE_MSI] = 1;
   2197 	counts[PCI_INTR_TYPE_INTX] = 1;
   2198 	/* overridden by disable flags */
   2199 	if (wm_disable_msi != 0) {
   2200 		counts[PCI_INTR_TYPE_MSI] = 0;
   2201 		if (wm_disable_msix != 0) {
   2202 			max_type = PCI_INTR_TYPE_INTX;
   2203 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2204 		}
   2205 	} else if (wm_disable_msix != 0) {
   2206 		max_type = PCI_INTR_TYPE_MSI;
   2207 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2208 	}
   2209 
   2210 alloc_retry:
   2211 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2212 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2213 		return;
   2214 	}
   2215 
   2216 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2217 		error = wm_setup_msix(sc);
   2218 		if (error) {
   2219 			pci_intr_release(pc, sc->sc_intrs,
   2220 			    counts[PCI_INTR_TYPE_MSIX]);
   2221 
   2222 			/* Setup for MSI: Disable MSI-X */
   2223 			max_type = PCI_INTR_TYPE_MSI;
   2224 			counts[PCI_INTR_TYPE_MSI] = 1;
   2225 			counts[PCI_INTR_TYPE_INTX] = 1;
   2226 			goto alloc_retry;
   2227 		}
   2228 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2229 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2230 		error = wm_setup_legacy(sc);
   2231 		if (error) {
   2232 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2233 			    counts[PCI_INTR_TYPE_MSI]);
   2234 
   2235 			/* The next try is for INTx: Disable MSI */
   2236 			max_type = PCI_INTR_TYPE_INTX;
   2237 			counts[PCI_INTR_TYPE_INTX] = 1;
   2238 			goto alloc_retry;
   2239 		}
   2240 	} else {
   2241 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2242 		error = wm_setup_legacy(sc);
   2243 		if (error) {
   2244 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2245 			    counts[PCI_INTR_TYPE_INTX]);
   2246 			return;
   2247 		}
   2248 	}
   2249 
   2250 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2251 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2252 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2253 	    WQ_PERCPU | WQ_MPSAFE);
   2254 	if (error) {
   2255 		aprint_error_dev(sc->sc_dev,
   2256 		    "unable to create TxRx workqueue\n");
   2257 		goto out;
   2258 	}
   2259 
   2260 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2261 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2262 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2263 	    WQ_MPSAFE);
   2264 	if (error) {
   2265 		workqueue_destroy(sc->sc_queue_wq);
   2266 		aprint_error_dev(sc->sc_dev,
   2267 		    "unable to create reset workqueue\n");
   2268 		goto out;
   2269 	}
   2270 
   2271 	/*
   2272 	 * Check the function ID (unit number of the chip).
   2273 	 */
   2274 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2275 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2276 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2277 	    || (sc->sc_type == WM_T_82580)
   2278 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2279 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2280 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2281 	else
   2282 		sc->sc_funcid = 0;
   2283 
   2284 	/*
   2285 	 * Determine a few things about the bus we're connected to.
   2286 	 */
   2287 	if (sc->sc_type < WM_T_82543) {
   2288 		/* We don't really know the bus characteristics here. */
   2289 		sc->sc_bus_speed = 33;
   2290 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2291 		/*
   2292 		 * CSA (Communication Streaming Architecture) is about as fast
   2293 		 * a 32-bit 66MHz PCI Bus.
   2294 		 */
   2295 		sc->sc_flags |= WM_F_CSA;
   2296 		sc->sc_bus_speed = 66;
   2297 		aprint_verbose_dev(sc->sc_dev,
   2298 		    "Communication Streaming Architecture\n");
   2299 		if (sc->sc_type == WM_T_82547) {
   2300 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2301 			callout_setfunc(&sc->sc_txfifo_ch,
   2302 			    wm_82547_txfifo_stall, sc);
   2303 			aprint_verbose_dev(sc->sc_dev,
   2304 			    "using 82547 Tx FIFO stall work-around\n");
   2305 		}
   2306 	} else if (sc->sc_type >= WM_T_82571) {
   2307 		sc->sc_flags |= WM_F_PCIE;
   2308 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2309 		    && (sc->sc_type != WM_T_ICH10)
   2310 		    && (sc->sc_type != WM_T_PCH)
   2311 		    && (sc->sc_type != WM_T_PCH2)
   2312 		    && (sc->sc_type != WM_T_PCH_LPT)
   2313 		    && (sc->sc_type != WM_T_PCH_SPT)
   2314 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2315 			/* ICH* and PCH* have no PCIe capability registers */
   2316 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2317 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2318 				NULL) == 0)
   2319 				aprint_error_dev(sc->sc_dev,
   2320 				    "unable to find PCIe capability\n");
   2321 		}
   2322 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2323 	} else {
   2324 		reg = CSR_READ(sc, WMREG_STATUS);
   2325 		if (reg & STATUS_BUS64)
   2326 			sc->sc_flags |= WM_F_BUS64;
   2327 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2328 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2329 
   2330 			sc->sc_flags |= WM_F_PCIX;
   2331 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2332 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2333 				aprint_error_dev(sc->sc_dev,
   2334 				    "unable to find PCIX capability\n");
   2335 			else if (sc->sc_type != WM_T_82545_3 &&
   2336 			    sc->sc_type != WM_T_82546_3) {
   2337 				/*
   2338 				 * Work around a problem caused by the BIOS
   2339 				 * setting the max memory read byte count
   2340 				 * incorrectly.
   2341 				 */
   2342 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2343 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2344 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2345 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2346 
   2347 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2348 				    PCIX_CMD_BYTECNT_SHIFT;
   2349 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2350 				    PCIX_STATUS_MAXB_SHIFT;
   2351 				if (bytecnt > maxb) {
   2352 					aprint_verbose_dev(sc->sc_dev,
   2353 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2354 					    512 << bytecnt, 512 << maxb);
   2355 					pcix_cmd = (pcix_cmd &
   2356 					    ~PCIX_CMD_BYTECNT_MASK) |
   2357 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2358 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2359 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2360 					    pcix_cmd);
   2361 				}
   2362 			}
   2363 		}
   2364 		/*
   2365 		 * The quad port adapter is special; it has a PCIX-PCIX
   2366 		 * bridge on the board, and can run the secondary bus at
   2367 		 * a higher speed.
   2368 		 */
   2369 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2370 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2371 								      : 66;
   2372 		} else if (sc->sc_flags & WM_F_PCIX) {
   2373 			switch (reg & STATUS_PCIXSPD_MASK) {
   2374 			case STATUS_PCIXSPD_50_66:
   2375 				sc->sc_bus_speed = 66;
   2376 				break;
   2377 			case STATUS_PCIXSPD_66_100:
   2378 				sc->sc_bus_speed = 100;
   2379 				break;
   2380 			case STATUS_PCIXSPD_100_133:
   2381 				sc->sc_bus_speed = 133;
   2382 				break;
   2383 			default:
   2384 				aprint_error_dev(sc->sc_dev,
   2385 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2386 				    reg & STATUS_PCIXSPD_MASK);
   2387 				sc->sc_bus_speed = 66;
   2388 				break;
   2389 			}
   2390 		} else
   2391 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2392 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2393 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2394 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2395 	}
   2396 
   2397 	/* clear interesting stat counters */
   2398 	CSR_READ(sc, WMREG_COLC);
   2399 	CSR_READ(sc, WMREG_RXERRC);
   2400 
   2401 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2402 	    || (sc->sc_type >= WM_T_ICH8))
   2403 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2404 	if (sc->sc_type >= WM_T_ICH8)
   2405 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2406 
   2407 	/* Set PHY, NVM mutex related stuff */
   2408 	switch (sc->sc_type) {
   2409 	case WM_T_82542_2_0:
   2410 	case WM_T_82542_2_1:
   2411 	case WM_T_82543:
   2412 	case WM_T_82544:
   2413 		/* Microwire */
   2414 		sc->nvm.read = wm_nvm_read_uwire;
   2415 		sc->sc_nvm_wordsize = 64;
   2416 		sc->sc_nvm_addrbits = 6;
   2417 		break;
   2418 	case WM_T_82540:
   2419 	case WM_T_82545:
   2420 	case WM_T_82545_3:
   2421 	case WM_T_82546:
   2422 	case WM_T_82546_3:
   2423 		/* Microwire */
   2424 		sc->nvm.read = wm_nvm_read_uwire;
   2425 		reg = CSR_READ(sc, WMREG_EECD);
   2426 		if (reg & EECD_EE_SIZE) {
   2427 			sc->sc_nvm_wordsize = 256;
   2428 			sc->sc_nvm_addrbits = 8;
   2429 		} else {
   2430 			sc->sc_nvm_wordsize = 64;
   2431 			sc->sc_nvm_addrbits = 6;
   2432 		}
   2433 		sc->sc_flags |= WM_F_LOCK_EECD;
   2434 		sc->nvm.acquire = wm_get_eecd;
   2435 		sc->nvm.release = wm_put_eecd;
   2436 		break;
   2437 	case WM_T_82541:
   2438 	case WM_T_82541_2:
   2439 	case WM_T_82547:
   2440 	case WM_T_82547_2:
   2441 		reg = CSR_READ(sc, WMREG_EECD);
   2442 		/*
   2443 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2444 		 * on 8254[17], so set flags and functios before calling it.
   2445 		 */
   2446 		sc->sc_flags |= WM_F_LOCK_EECD;
   2447 		sc->nvm.acquire = wm_get_eecd;
   2448 		sc->nvm.release = wm_put_eecd;
   2449 		if (reg & EECD_EE_TYPE) {
   2450 			/* SPI */
   2451 			sc->nvm.read = wm_nvm_read_spi;
   2452 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2453 			wm_nvm_set_addrbits_size_eecd(sc);
   2454 		} else {
   2455 			/* Microwire */
   2456 			sc->nvm.read = wm_nvm_read_uwire;
   2457 			if ((reg & EECD_EE_ABITS) != 0) {
   2458 				sc->sc_nvm_wordsize = 256;
   2459 				sc->sc_nvm_addrbits = 8;
   2460 			} else {
   2461 				sc->sc_nvm_wordsize = 64;
   2462 				sc->sc_nvm_addrbits = 6;
   2463 			}
   2464 		}
   2465 		break;
   2466 	case WM_T_82571:
   2467 	case WM_T_82572:
   2468 		/* SPI */
   2469 		sc->nvm.read = wm_nvm_read_eerd;
   2470 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2471 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2472 		wm_nvm_set_addrbits_size_eecd(sc);
   2473 		sc->phy.acquire = wm_get_swsm_semaphore;
   2474 		sc->phy.release = wm_put_swsm_semaphore;
   2475 		sc->nvm.acquire = wm_get_nvm_82571;
   2476 		sc->nvm.release = wm_put_nvm_82571;
   2477 		break;
   2478 	case WM_T_82573:
   2479 	case WM_T_82574:
   2480 	case WM_T_82583:
   2481 		sc->nvm.read = wm_nvm_read_eerd;
   2482 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2483 		if (sc->sc_type == WM_T_82573) {
   2484 			sc->phy.acquire = wm_get_swsm_semaphore;
   2485 			sc->phy.release = wm_put_swsm_semaphore;
   2486 			sc->nvm.acquire = wm_get_nvm_82571;
   2487 			sc->nvm.release = wm_put_nvm_82571;
   2488 		} else {
   2489 			/* Both PHY and NVM use the same semaphore. */
   2490 			sc->phy.acquire = sc->nvm.acquire
   2491 			    = wm_get_swfwhw_semaphore;
   2492 			sc->phy.release = sc->nvm.release
   2493 			    = wm_put_swfwhw_semaphore;
   2494 		}
   2495 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2496 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2497 			sc->sc_nvm_wordsize = 2048;
   2498 		} else {
   2499 			/* SPI */
   2500 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2501 			wm_nvm_set_addrbits_size_eecd(sc);
   2502 		}
   2503 		break;
   2504 	case WM_T_82575:
   2505 	case WM_T_82576:
   2506 	case WM_T_82580:
   2507 	case WM_T_I350:
   2508 	case WM_T_I354:
   2509 	case WM_T_80003:
   2510 		/* SPI */
   2511 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2512 		wm_nvm_set_addrbits_size_eecd(sc);
   2513 		if ((sc->sc_type == WM_T_80003)
   2514 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2515 			sc->nvm.read = wm_nvm_read_eerd;
   2516 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2517 		} else {
   2518 			sc->nvm.read = wm_nvm_read_spi;
   2519 			sc->sc_flags |= WM_F_LOCK_EECD;
   2520 		}
   2521 		sc->phy.acquire = wm_get_phy_82575;
   2522 		sc->phy.release = wm_put_phy_82575;
   2523 		sc->nvm.acquire = wm_get_nvm_80003;
   2524 		sc->nvm.release = wm_put_nvm_80003;
   2525 		break;
   2526 	case WM_T_ICH8:
   2527 	case WM_T_ICH9:
   2528 	case WM_T_ICH10:
   2529 	case WM_T_PCH:
   2530 	case WM_T_PCH2:
   2531 	case WM_T_PCH_LPT:
   2532 		sc->nvm.read = wm_nvm_read_ich8;
   2533 		/* FLASH */
   2534 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2535 		sc->sc_nvm_wordsize = 2048;
   2536 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2537 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2538 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2539 			aprint_error_dev(sc->sc_dev,
   2540 			    "can't map FLASH registers\n");
   2541 			goto out;
   2542 		}
   2543 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2544 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2545 		    ICH_FLASH_SECTOR_SIZE;
   2546 		sc->sc_ich8_flash_bank_size =
   2547 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2548 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2549 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2550 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2551 		sc->sc_flashreg_offset = 0;
   2552 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2553 		sc->phy.release = wm_put_swflag_ich8lan;
   2554 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2555 		sc->nvm.release = wm_put_nvm_ich8lan;
   2556 		break;
   2557 	case WM_T_PCH_SPT:
   2558 	case WM_T_PCH_CNP:
   2559 		sc->nvm.read = wm_nvm_read_spt;
   2560 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2561 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2562 		sc->sc_flasht = sc->sc_st;
   2563 		sc->sc_flashh = sc->sc_sh;
   2564 		sc->sc_ich8_flash_base = 0;
   2565 		sc->sc_nvm_wordsize =
   2566 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2567 		    * NVM_SIZE_MULTIPLIER;
   2568 		/* It is size in bytes, we want words */
   2569 		sc->sc_nvm_wordsize /= 2;
   2570 		/* Assume 2 banks */
   2571 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2572 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2573 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2574 		sc->phy.release = wm_put_swflag_ich8lan;
   2575 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2576 		sc->nvm.release = wm_put_nvm_ich8lan;
   2577 		break;
   2578 	case WM_T_I210:
   2579 	case WM_T_I211:
   2580 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2581 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2582 		if (wm_nvm_flash_presence_i210(sc)) {
   2583 			sc->nvm.read = wm_nvm_read_eerd;
   2584 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2585 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2586 			wm_nvm_set_addrbits_size_eecd(sc);
   2587 		} else {
   2588 			sc->nvm.read = wm_nvm_read_invm;
   2589 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2590 			sc->sc_nvm_wordsize = INVM_SIZE;
   2591 		}
   2592 		sc->phy.acquire = wm_get_phy_82575;
   2593 		sc->phy.release = wm_put_phy_82575;
   2594 		sc->nvm.acquire = wm_get_nvm_80003;
   2595 		sc->nvm.release = wm_put_nvm_80003;
   2596 		break;
   2597 	default:
   2598 		break;
   2599 	}
   2600 
   2601 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2602 	switch (sc->sc_type) {
   2603 	case WM_T_82571:
   2604 	case WM_T_82572:
   2605 		reg = CSR_READ(sc, WMREG_SWSM2);
   2606 		if ((reg & SWSM2_LOCK) == 0) {
   2607 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2608 			force_clear_smbi = true;
   2609 		} else
   2610 			force_clear_smbi = false;
   2611 		break;
   2612 	case WM_T_82573:
   2613 	case WM_T_82574:
   2614 	case WM_T_82583:
   2615 		force_clear_smbi = true;
   2616 		break;
   2617 	default:
   2618 		force_clear_smbi = false;
   2619 		break;
   2620 	}
   2621 	if (force_clear_smbi) {
   2622 		reg = CSR_READ(sc, WMREG_SWSM);
   2623 		if ((reg & SWSM_SMBI) != 0)
   2624 			aprint_error_dev(sc->sc_dev,
   2625 			    "Please update the Bootagent\n");
   2626 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2627 	}
   2628 
   2629 	/*
   2630 	 * Defer printing the EEPROM type until after verifying the checksum
   2631 	 * This allows the EEPROM type to be printed correctly in the case
   2632 	 * that no EEPROM is attached.
   2633 	 */
   2634 	/*
   2635 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2636 	 * this for later, so we can fail future reads from the EEPROM.
   2637 	 */
   2638 	if (wm_nvm_validate_checksum(sc)) {
   2639 		/*
   2640 		 * Read twice again because some PCI-e parts fail the
   2641 		 * first check due to the link being in sleep state.
   2642 		 */
   2643 		if (wm_nvm_validate_checksum(sc))
   2644 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2645 	}
   2646 
   2647 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2648 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2649 	else {
   2650 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2651 		    sc->sc_nvm_wordsize);
   2652 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2653 			aprint_verbose("iNVM");
   2654 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2655 			aprint_verbose("FLASH(HW)");
   2656 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2657 			aprint_verbose("FLASH");
   2658 		else {
   2659 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2660 				eetype = "SPI";
   2661 			else
   2662 				eetype = "MicroWire";
   2663 			aprint_verbose("(%d address bits) %s EEPROM",
   2664 			    sc->sc_nvm_addrbits, eetype);
   2665 		}
   2666 	}
   2667 	wm_nvm_version(sc);
   2668 	aprint_verbose("\n");
   2669 
   2670 	/*
   2671 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2672 	 * incorrect.
   2673 	 */
   2674 	wm_gmii_setup_phytype(sc, 0, 0);
   2675 
   2676 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2677 	switch (sc->sc_type) {
   2678 	case WM_T_ICH8:
   2679 	case WM_T_ICH9:
   2680 	case WM_T_ICH10:
   2681 	case WM_T_PCH:
   2682 	case WM_T_PCH2:
   2683 	case WM_T_PCH_LPT:
   2684 	case WM_T_PCH_SPT:
   2685 	case WM_T_PCH_CNP:
   2686 		apme_mask = WUC_APME;
   2687 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2688 		if ((eeprom_data & apme_mask) != 0)
   2689 			sc->sc_flags |= WM_F_WOL;
   2690 		break;
   2691 	default:
   2692 		break;
   2693 	}
   2694 
   2695 	/* Reset the chip to a known state. */
   2696 	wm_reset(sc);
   2697 
   2698 	/*
   2699 	 * Check for I21[01] PLL workaround.
   2700 	 *
   2701 	 * Three cases:
   2702 	 * a) Chip is I211.
   2703 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2704 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2705 	 */
   2706 	if (sc->sc_type == WM_T_I211)
   2707 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2708 	if (sc->sc_type == WM_T_I210) {
   2709 		if (!wm_nvm_flash_presence_i210(sc))
   2710 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2711 		else if ((sc->sc_nvm_ver_major < 3)
   2712 		    || ((sc->sc_nvm_ver_major == 3)
   2713 			&& (sc->sc_nvm_ver_minor < 25))) {
   2714 			aprint_verbose_dev(sc->sc_dev,
   2715 			    "ROM image version %d.%d is older than 3.25\n",
   2716 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2717 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2718 		}
   2719 	}
   2720 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2721 		wm_pll_workaround_i210(sc);
   2722 
   2723 	wm_get_wakeup(sc);
   2724 
   2725 	/* Non-AMT based hardware can now take control from firmware */
   2726 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2727 		wm_get_hw_control(sc);
   2728 
   2729 	/*
   2730 	 * Read the Ethernet address from the EEPROM, if not first found
   2731 	 * in device properties.
   2732 	 */
   2733 	ea = prop_dictionary_get(dict, "mac-address");
   2734 	if (ea != NULL) {
   2735 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2736 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2737 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2738 	} else {
   2739 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2740 			aprint_error_dev(sc->sc_dev,
   2741 			    "unable to read Ethernet address\n");
   2742 			goto out;
   2743 		}
   2744 	}
   2745 
   2746 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2747 	    ether_sprintf(enaddr));
   2748 
   2749 	/*
   2750 	 * Read the config info from the EEPROM, and set up various
   2751 	 * bits in the control registers based on their contents.
   2752 	 */
   2753 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2754 	if (pn != NULL) {
   2755 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2756 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2757 	} else {
   2758 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2759 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2760 			goto out;
   2761 		}
   2762 	}
   2763 
   2764 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2765 	if (pn != NULL) {
   2766 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2767 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2768 	} else {
   2769 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2770 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2771 			goto out;
   2772 		}
   2773 	}
   2774 
   2775 	/* check for WM_F_WOL */
   2776 	switch (sc->sc_type) {
   2777 	case WM_T_82542_2_0:
   2778 	case WM_T_82542_2_1:
   2779 	case WM_T_82543:
   2780 		/* dummy? */
   2781 		eeprom_data = 0;
   2782 		apme_mask = NVM_CFG3_APME;
   2783 		break;
   2784 	case WM_T_82544:
   2785 		apme_mask = NVM_CFG2_82544_APM_EN;
   2786 		eeprom_data = cfg2;
   2787 		break;
   2788 	case WM_T_82546:
   2789 	case WM_T_82546_3:
   2790 	case WM_T_82571:
   2791 	case WM_T_82572:
   2792 	case WM_T_82573:
   2793 	case WM_T_82574:
   2794 	case WM_T_82583:
   2795 	case WM_T_80003:
   2796 	case WM_T_82575:
   2797 	case WM_T_82576:
   2798 		apme_mask = NVM_CFG3_APME;
   2799 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2800 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2801 		break;
   2802 	case WM_T_82580:
   2803 	case WM_T_I350:
   2804 	case WM_T_I354:
   2805 	case WM_T_I210:
   2806 	case WM_T_I211:
   2807 		apme_mask = NVM_CFG3_APME;
   2808 		wm_nvm_read(sc,
   2809 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2810 		    1, &eeprom_data);
   2811 		break;
   2812 	case WM_T_ICH8:
   2813 	case WM_T_ICH9:
   2814 	case WM_T_ICH10:
   2815 	case WM_T_PCH:
   2816 	case WM_T_PCH2:
   2817 	case WM_T_PCH_LPT:
   2818 	case WM_T_PCH_SPT:
   2819 	case WM_T_PCH_CNP:
   2820 		/* Already checked before wm_reset () */
   2821 		apme_mask = eeprom_data = 0;
   2822 		break;
   2823 	default: /* XXX 82540 */
   2824 		apme_mask = NVM_CFG3_APME;
   2825 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2826 		break;
   2827 	}
   2828 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2829 	if ((eeprom_data & apme_mask) != 0)
   2830 		sc->sc_flags |= WM_F_WOL;
   2831 
   2832 	/*
   2833 	 * We have the eeprom settings, now apply the special cases
   2834 	 * where the eeprom may be wrong or the board won't support
   2835 	 * wake on lan on a particular port
   2836 	 */
   2837 	switch (sc->sc_pcidevid) {
   2838 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2839 		sc->sc_flags &= ~WM_F_WOL;
   2840 		break;
   2841 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2842 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2843 		/* Wake events only supported on port A for dual fiber
   2844 		 * regardless of eeprom setting */
   2845 		if (sc->sc_funcid == 1)
   2846 			sc->sc_flags &= ~WM_F_WOL;
   2847 		break;
   2848 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2849 		/* If quad port adapter, disable WoL on all but port A */
   2850 		if (sc->sc_funcid != 0)
   2851 			sc->sc_flags &= ~WM_F_WOL;
   2852 		break;
   2853 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2854 		/* Wake events only supported on port A for dual fiber
   2855 		 * regardless of eeprom setting */
   2856 		if (sc->sc_funcid == 1)
   2857 			sc->sc_flags &= ~WM_F_WOL;
   2858 		break;
   2859 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2860 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2861 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2862 		/* If quad port adapter, disable WoL on all but port A */
   2863 		if (sc->sc_funcid != 0)
   2864 			sc->sc_flags &= ~WM_F_WOL;
   2865 		break;
   2866 	}
   2867 
   2868 	if (sc->sc_type >= WM_T_82575) {
   2869 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2870 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2871 			    nvmword);
   2872 			if ((sc->sc_type == WM_T_82575) ||
   2873 			    (sc->sc_type == WM_T_82576)) {
   2874 				/* Check NVM for autonegotiation */
   2875 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2876 				    != 0)
   2877 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2878 			}
   2879 			if ((sc->sc_type == WM_T_82575) ||
   2880 			    (sc->sc_type == WM_T_I350)) {
   2881 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2882 					sc->sc_flags |= WM_F_MAS;
   2883 			}
   2884 		}
   2885 	}
   2886 
   2887 	/*
   2888 	 * XXX need special handling for some multiple port cards
   2889 	 * to disable a paticular port.
   2890 	 */
   2891 
   2892 	if (sc->sc_type >= WM_T_82544) {
   2893 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2894 		if (pn != NULL) {
   2895 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2896 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2897 		} else {
   2898 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2899 				aprint_error_dev(sc->sc_dev,
   2900 				    "unable to read SWDPIN\n");
   2901 				goto out;
   2902 			}
   2903 		}
   2904 	}
   2905 
   2906 	if (cfg1 & NVM_CFG1_ILOS)
   2907 		sc->sc_ctrl |= CTRL_ILOS;
   2908 
   2909 	/*
   2910 	 * XXX
   2911 	 * This code isn't correct because pin 2 and 3 are located
   2912 	 * in different position on newer chips. Check all datasheet.
   2913 	 *
   2914 	 * Until resolve this problem, check if a chip < 82580
   2915 	 */
   2916 	if (sc->sc_type <= WM_T_82580) {
   2917 		if (sc->sc_type >= WM_T_82544) {
   2918 			sc->sc_ctrl |=
   2919 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2920 			    CTRL_SWDPIO_SHIFT;
   2921 			sc->sc_ctrl |=
   2922 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2923 			    CTRL_SWDPINS_SHIFT;
   2924 		} else {
   2925 			sc->sc_ctrl |=
   2926 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2927 			    CTRL_SWDPIO_SHIFT;
   2928 		}
   2929 	}
   2930 
   2931 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2932 		wm_nvm_read(sc,
   2933 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2934 		    1, &nvmword);
   2935 		if (nvmword & NVM_CFG3_ILOS)
   2936 			sc->sc_ctrl |= CTRL_ILOS;
   2937 	}
   2938 
   2939 #if 0
   2940 	if (sc->sc_type >= WM_T_82544) {
   2941 		if (cfg1 & NVM_CFG1_IPS0)
   2942 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2943 		if (cfg1 & NVM_CFG1_IPS1)
   2944 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2945 		sc->sc_ctrl_ext |=
   2946 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2947 		    CTRL_EXT_SWDPIO_SHIFT;
   2948 		sc->sc_ctrl_ext |=
   2949 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2950 		    CTRL_EXT_SWDPINS_SHIFT;
   2951 	} else {
   2952 		sc->sc_ctrl_ext |=
   2953 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2954 		    CTRL_EXT_SWDPIO_SHIFT;
   2955 	}
   2956 #endif
   2957 
   2958 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2959 #if 0
   2960 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2961 #endif
   2962 
   2963 	if (sc->sc_type == WM_T_PCH) {
   2964 		uint16_t val;
   2965 
   2966 		/* Save the NVM K1 bit setting */
   2967 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2968 
   2969 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2970 			sc->sc_nvm_k1_enabled = 1;
   2971 		else
   2972 			sc->sc_nvm_k1_enabled = 0;
   2973 	}
   2974 
   2975 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2976 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2977 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2978 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2979 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2980 	    || sc->sc_type == WM_T_82573
   2981 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2982 		/* Copper only */
   2983 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2984 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2985 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2986 	    || (sc->sc_type ==WM_T_I211)) {
   2987 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2988 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2989 		switch (link_mode) {
   2990 		case CTRL_EXT_LINK_MODE_1000KX:
   2991 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2992 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2993 			break;
   2994 		case CTRL_EXT_LINK_MODE_SGMII:
   2995 			if (wm_sgmii_uses_mdio(sc)) {
   2996 				aprint_normal_dev(sc->sc_dev,
   2997 				    "SGMII(MDIO)\n");
   2998 				sc->sc_flags |= WM_F_SGMII;
   2999 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3000 				break;
   3001 			}
   3002 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   3003 			/*FALLTHROUGH*/
   3004 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   3005 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   3006 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   3007 				if (link_mode
   3008 				    == CTRL_EXT_LINK_MODE_SGMII) {
   3009 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3010 					sc->sc_flags |= WM_F_SGMII;
   3011 					aprint_verbose_dev(sc->sc_dev,
   3012 					    "SGMII\n");
   3013 				} else {
   3014 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   3015 					aprint_verbose_dev(sc->sc_dev,
   3016 					    "SERDES\n");
   3017 				}
   3018 				break;
   3019 			}
   3020 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   3021 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   3022 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3023 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   3024 				sc->sc_flags |= WM_F_SGMII;
   3025 			}
   3026 			/* Do not change link mode for 100BaseFX */
   3027 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   3028 				break;
   3029 
   3030 			/* Change current link mode setting */
   3031 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   3032 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3033 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   3034 			else
   3035 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   3036 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3037 			break;
   3038 		case CTRL_EXT_LINK_MODE_GMII:
   3039 		default:
   3040 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   3041 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3042 			break;
   3043 		}
   3044 
   3045 		reg &= ~CTRL_EXT_I2C_ENA;
   3046 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3047 			reg |= CTRL_EXT_I2C_ENA;
   3048 		else
   3049 			reg &= ~CTRL_EXT_I2C_ENA;
   3050 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3051 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3052 			if (!wm_sgmii_uses_mdio(sc))
   3053 				wm_gmii_setup_phytype(sc, 0, 0);
   3054 			wm_reset_mdicnfg_82580(sc);
   3055 		}
   3056 	} else if (sc->sc_type < WM_T_82543 ||
   3057 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3058 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3059 			aprint_error_dev(sc->sc_dev,
   3060 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3061 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3062 		}
   3063 	} else {
   3064 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3065 			aprint_error_dev(sc->sc_dev,
   3066 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3067 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3068 		}
   3069 	}
   3070 
   3071 	if (sc->sc_type >= WM_T_PCH2)
   3072 		sc->sc_flags |= WM_F_EEE;
   3073 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3074 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3075 		/* XXX: Need special handling for I354. (not yet) */
   3076 		if (sc->sc_type != WM_T_I354)
   3077 			sc->sc_flags |= WM_F_EEE;
   3078 	}
   3079 
   3080 	/*
   3081 	 * The I350 has a bug where it always strips the CRC whether
   3082 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3083 	 */
   3084 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3085 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3086 		sc->sc_flags |= WM_F_CRC_STRIP;
   3087 
   3088 	/*
   3089 	 * Workaround for some chips to delay sending LINK_STATE_UP.
   3090 	 * Some systems can't send packet soon after linkup. See also
   3091 	 * wm_linkintr_gmii(), wm_tick() and wm_gmii_mediastatus().
   3092 	 */
   3093 	switch (sc->sc_type) {
   3094 	case WM_T_I350:
   3095 	case WM_T_I354:
   3096 	case WM_T_I210:
   3097 	case WM_T_I211:
   3098 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3099 			sc->sc_flags |= WM_F_DELAY_LINKUP;
   3100 		break;
   3101 	default:
   3102 		break;
   3103 	}
   3104 
   3105 	/* Set device properties (macflags) */
   3106 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3107 
   3108 	if (sc->sc_flags != 0) {
   3109 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3110 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3111 	}
   3112 
   3113 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3114 
   3115 	/* Initialize the media structures accordingly. */
   3116 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3117 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3118 	else
   3119 		wm_tbi_mediainit(sc); /* All others */
   3120 
   3121 	ifp = &sc->sc_ethercom.ec_if;
   3122 	xname = device_xname(sc->sc_dev);
   3123 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3124 	ifp->if_softc = sc;
   3125 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3126 	ifp->if_extflags = IFEF_MPSAFE;
   3127 	ifp->if_ioctl = wm_ioctl;
   3128 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3129 		ifp->if_start = wm_nq_start;
   3130 		/*
   3131 		 * When the number of CPUs is one and the controller can use
   3132 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3133 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3134 		 * and the other is used for link status changing.
   3135 		 * In this situation, wm_nq_transmit() is disadvantageous
   3136 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3137 		 */
   3138 		if (wm_is_using_multiqueue(sc))
   3139 			ifp->if_transmit = wm_nq_transmit;
   3140 	} else {
   3141 		ifp->if_start = wm_start;
   3142 		/*
   3143 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3144 		 * described above.
   3145 		 */
   3146 		if (wm_is_using_multiqueue(sc))
   3147 			ifp->if_transmit = wm_transmit;
   3148 	}
   3149 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3150 	ifp->if_init = wm_init;
   3151 	ifp->if_stop = wm_stop;
   3152 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3153 	IFQ_SET_READY(&ifp->if_snd);
   3154 
   3155 	/* Check for jumbo frame */
   3156 	switch (sc->sc_type) {
   3157 	case WM_T_82573:
   3158 		/* XXX limited to 9234 if ASPM is disabled */
   3159 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3160 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3161 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3162 		break;
   3163 	case WM_T_82571:
   3164 	case WM_T_82572:
   3165 	case WM_T_82574:
   3166 	case WM_T_82583:
   3167 	case WM_T_82575:
   3168 	case WM_T_82576:
   3169 	case WM_T_82580:
   3170 	case WM_T_I350:
   3171 	case WM_T_I354:
   3172 	case WM_T_I210:
   3173 	case WM_T_I211:
   3174 	case WM_T_80003:
   3175 	case WM_T_ICH9:
   3176 	case WM_T_ICH10:
   3177 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3178 	case WM_T_PCH_LPT:
   3179 	case WM_T_PCH_SPT:
   3180 	case WM_T_PCH_CNP:
   3181 		/* XXX limited to 9234 */
   3182 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3183 		break;
   3184 	case WM_T_PCH:
   3185 		/* XXX limited to 4096 */
   3186 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3187 		break;
   3188 	case WM_T_82542_2_0:
   3189 	case WM_T_82542_2_1:
   3190 	case WM_T_ICH8:
   3191 		/* No support for jumbo frame */
   3192 		break;
   3193 	default:
   3194 		/* ETHER_MAX_LEN_JUMBO */
   3195 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3196 		break;
   3197 	}
   3198 
   3199 	/* If we're a i82543 or greater, we can support VLANs. */
   3200 	if (sc->sc_type >= WM_T_82543) {
   3201 		sc->sc_ethercom.ec_capabilities |=
   3202 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3203 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3204 	}
   3205 
   3206 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3207 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3208 
   3209 	/*
   3210 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3211 	 * on i82543 and later.
   3212 	 */
   3213 	if (sc->sc_type >= WM_T_82543) {
   3214 		ifp->if_capabilities |=
   3215 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3216 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3217 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3218 		    IFCAP_CSUM_TCPv6_Tx |
   3219 		    IFCAP_CSUM_UDPv6_Tx;
   3220 	}
   3221 
   3222 	/*
   3223 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3224 	 *
   3225 	 *	82541GI (8086:1076) ... no
   3226 	 *	82572EI (8086:10b9) ... yes
   3227 	 */
   3228 	if (sc->sc_type >= WM_T_82571) {
   3229 		ifp->if_capabilities |=
   3230 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3231 	}
   3232 
   3233 	/*
   3234 	 * If we're a i82544 or greater (except i82547), we can do
   3235 	 * TCP segmentation offload.
   3236 	 */
   3237 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3238 		ifp->if_capabilities |= IFCAP_TSOv4;
   3239 
   3240 	if (sc->sc_type >= WM_T_82571)
   3241 		ifp->if_capabilities |= IFCAP_TSOv6;
   3242 
   3243 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3244 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3245 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3246 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3247 
   3248 	/* Attach the interface. */
   3249 	if_initialize(ifp);
   3250 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3251 	ether_ifattach(ifp, enaddr);
   3252 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3253 	if_register(ifp);
   3254 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3255 	    RND_FLAG_DEFAULT);
   3256 
   3257 #ifdef WM_EVENT_COUNTERS
   3258 	/* Attach event counters. */
   3259 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3260 	    NULL, xname, "linkintr");
   3261 
   3262 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3263 	    NULL, xname, "CRC Error");
   3264 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3265 	    NULL, xname, "Symbol Error");
   3266 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3267 	    NULL, xname, "Missed Packets");
   3268 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3269 	    NULL, xname, "Collision");
   3270 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3271 	    NULL, xname, "Sequence Error");
   3272 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3273 	    NULL, xname, "Receive Length Error");
   3274 
   3275 	if (sc->sc_type >= WM_T_82543) {
   3276 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3277 		    NULL, xname, "Alignment Error");
   3278 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3279 		    NULL, xname, "Receive Error");
   3280 		/* XXX Does 82575 have HTDPMC? */
   3281 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3282 			evcnt_attach_dynamic(&sc->sc_ev_cexterr,
   3283 			    EVCNT_TYPE_MISC, NULL, xname,
   3284 			    "Carrier Extension Error");
   3285 		else
   3286 			evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
   3287 			    EVCNT_TYPE_MISC, NULL, xname,
   3288 			    "Host Transmit Discarded Packets by MAC");
   3289 
   3290 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3291 		    NULL, xname, "Tx with No CRS");
   3292 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3293 		    NULL, xname, "TCP Segmentation Context Tx");
   3294 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3295 			evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
   3296 			    EVCNT_TYPE_MISC, NULL, xname,
   3297 			    "TCP Segmentation Context Tx Fail");
   3298 		else {
   3299 			/* XXX Is the circuit breaker only for 82576? */
   3300 			evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
   3301 			    EVCNT_TYPE_MISC, NULL, xname,
   3302 			    "Circuit Breaker Rx Dropped Packet");
   3303 			evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
   3304 			    EVCNT_TYPE_MISC, NULL, xname,
   3305 			    "Circuit Breaker Rx Manageability Packet");
   3306 		}
   3307 	}
   3308 
   3309 	if (sc->sc_type >= WM_T_82542_2_1) {
   3310 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3311 		    NULL, xname, "XOFF Transmitted");
   3312 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3313 		    NULL, xname, "XON Transmitted");
   3314 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3315 		    NULL, xname, "XOFF Received");
   3316 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3317 		    NULL, xname, "XON Received");
   3318 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3319 		    NULL, xname, "FC Received Unsupported");
   3320 	}
   3321 
   3322 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3323 	    NULL, xname, "Single Collision");
   3324 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3325 	    NULL, xname, "Excessive Collisions");
   3326 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3327 	    NULL, xname, "Multiple Collision");
   3328 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3329 	    NULL, xname, "Late Collisions");
   3330 
   3331 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3332 		evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
   3333 		    NULL, xname, "Circuit Breaker Tx Manageability Packet");
   3334 
   3335 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3336 	    NULL, xname, "Defer");
   3337 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3338 	    NULL, xname, "Packets Rx (64 bytes)");
   3339 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3340 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3341 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3342 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3343 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3344 	    NULL, xname, "Packets Rx (256-511 bytes)");
   3345 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3346 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3347 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3348 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3349 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3350 	    NULL, xname, "Good Packets Rx");
   3351 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3352 	    NULL, xname, "Broadcast Packets Rx");
   3353 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3354 	    NULL, xname, "Multicast Packets Rx");
   3355 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3356 	    NULL, xname, "Good Packets Tx");
   3357 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3358 	    NULL, xname, "Good Octets Rx");
   3359 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3360 	    NULL, xname, "Good Octets Tx");
   3361 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3362 	    NULL, xname, "Rx No Buffers");
   3363 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3364 	    NULL, xname, "Rx Undersize (valid CRC)");
   3365 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3366 	    NULL, xname, "Rx Fragment (bad CRC)");
   3367 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3368 	    NULL, xname, "Rx Oversize (valid CRC)");
   3369 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3370 	    NULL, xname, "Rx Jabber (bad CRC)");
   3371 	if (sc->sc_type >= WM_T_82540) {
   3372 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3373 		    NULL, xname, "Management Packets RX");
   3374 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3375 		    NULL, xname, "Management Packets Dropped");
   3376 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3377 		    NULL, xname, "Management Packets TX");
   3378 	}
   3379 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3380 	    NULL, xname, "Total Octets Rx");
   3381 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3382 	    NULL, xname, "Total Octets Tx");
   3383 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3384 	    NULL, xname, "Total Packets Rx");
   3385 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3386 	    NULL, xname, "Total Packets Tx");
   3387 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3388 	    NULL, xname, "Packets Tx (64 bytes)");
   3389 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3390 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3391 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3392 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3393 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3394 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3395 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3396 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3397 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3398 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3399 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3400 	    NULL, xname, "Multicast Packets Tx");
   3401 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3402 	    NULL, xname, "Broadcast Packets Tx");
   3403 	if (sc->sc_type >= WM_T_82571) /* PCIe, 80003 and ICH/PCHs */
   3404 		evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3405 		    NULL, xname, "Interrupt Assertion");
   3406 	if (sc->sc_type < WM_T_82575) {
   3407 		evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3408 		    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3409 		evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3410 		    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3411 		evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3412 		    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3413 		evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC,
   3414 		    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3415 		evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3416 		    NULL, xname, "Intr. Cause Tx Queue Empty");
   3417 		evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3418 		    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3419 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3420 		    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3421 
   3422 		/* XXX 82575 document says it has ICRXOC. Is that right? */
   3423 		evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3424 		    NULL, xname, "Interrupt Cause Receiver Overrun");
   3425 	} else if (!WM_IS_ICHPCH(sc)) {
   3426 		/*
   3427 		 * For 82575 and newer.
   3428 		 *
   3429 		 * On 80003, ICHs and PCHs, it seems all of the following
   3430 		 * registers are zero.
   3431 		 */
   3432 		evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
   3433 		    NULL, xname, "Rx Packets To Host");
   3434 		evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
   3435 		    NULL, xname, "Debug Counter 1");
   3436 		evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
   3437 		    NULL, xname, "Debug Counter 2");
   3438 		evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
   3439 		    NULL, xname, "Debug Counter 3");
   3440 
   3441 		/*
   3442 		 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
   3443 		 * I think it's wrong. The real count I observed is the same
   3444 		 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
   3445 		 * It's HGPTC(Host Good Packets Tx) which is described in
   3446 		 * 82576's datasheet.
   3447 		 */
   3448 		evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
   3449 		    NULL, xname, "Host Good Packets TX");
   3450 
   3451 		evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
   3452 		    NULL, xname, "Debug Counter 4");
   3453 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3454 		    NULL, xname, "Rx Desc Min Thresh");
   3455 		/* XXX Is the circuit breaker only for 82576? */
   3456 		evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
   3457 		    NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
   3458 
   3459 		evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
   3460 		    NULL, xname, "Host Good Octets Rx");
   3461 		evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
   3462 		    NULL, xname, "Host Good Octets Tx");
   3463 		evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
   3464 		    NULL, xname, "Length Errors (length/type <= 1500)");
   3465 		evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
   3466 		    NULL, xname, "SerDes/SGMII Code Violation Packet");
   3467 		evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
   3468 		    NULL, xname, "Header Redirection Missed Packet");
   3469 	}
   3470 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3471 		evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
   3472 		    NULL, xname, "EEE Tx LPI");
   3473 		evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
   3474 		    NULL, xname, "EEE Rx LPI");
   3475 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3476 		    NULL, xname, "BMC2OS Packets received by host");
   3477 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3478 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3479 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3480 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3481 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3482 		    NULL, xname, "OS2BMC Packets received by BMC");
   3483 	}
   3484 #endif /* WM_EVENT_COUNTERS */
   3485 
   3486 	sc->sc_txrx_use_workqueue = false;
   3487 
   3488 	if (wm_phy_need_linkdown_discard(sc)) {
   3489 		DPRINTF(sc, WM_DEBUG_LINK,
   3490 		    ("%s: %s: Set linkdown discard flag\n",
   3491 			device_xname(sc->sc_dev), __func__));
   3492 		wm_set_linkdown_discard(sc);
   3493 	}
   3494 
   3495 	wm_init_sysctls(sc);
   3496 
   3497 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3498 		pmf_class_network_register(self, ifp);
   3499 	else
   3500 		aprint_error_dev(self, "couldn't establish power handler\n");
   3501 
   3502 	sc->sc_flags |= WM_F_ATTACHED;
   3503 out:
   3504 	return;
   3505 }
   3506 
   3507 /* The detach function (ca_detach) */
   3508 static int
   3509 wm_detach(device_t self, int flags __unused)
   3510 {
   3511 	struct wm_softc *sc = device_private(self);
   3512 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3513 	int i;
   3514 
   3515 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3516 		return 0;
   3517 
   3518 	/* Stop the interface. Callouts are stopped in it. */
   3519 	IFNET_LOCK(ifp);
   3520 	sc->sc_dying = true;
   3521 	wm_stop(ifp, 1);
   3522 	IFNET_UNLOCK(ifp);
   3523 
   3524 	pmf_device_deregister(self);
   3525 
   3526 	sysctl_teardown(&sc->sc_sysctllog);
   3527 
   3528 #ifdef WM_EVENT_COUNTERS
   3529 	evcnt_detach(&sc->sc_ev_linkintr);
   3530 
   3531 	evcnt_detach(&sc->sc_ev_crcerrs);
   3532 	evcnt_detach(&sc->sc_ev_symerrc);
   3533 	evcnt_detach(&sc->sc_ev_mpc);
   3534 	evcnt_detach(&sc->sc_ev_colc);
   3535 	evcnt_detach(&sc->sc_ev_sec);
   3536 	evcnt_detach(&sc->sc_ev_rlec);
   3537 
   3538 	if (sc->sc_type >= WM_T_82543) {
   3539 		evcnt_detach(&sc->sc_ev_algnerrc);
   3540 		evcnt_detach(&sc->sc_ev_rxerrc);
   3541 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3542 			evcnt_detach(&sc->sc_ev_cexterr);
   3543 		else
   3544 			evcnt_detach(&sc->sc_ev_htdpmc);
   3545 
   3546 		evcnt_detach(&sc->sc_ev_tncrs);
   3547 		evcnt_detach(&sc->sc_ev_tsctc);
   3548 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3549 			evcnt_detach(&sc->sc_ev_tsctfc);
   3550 		else {
   3551 			evcnt_detach(&sc->sc_ev_cbrdpc);
   3552 			evcnt_detach(&sc->sc_ev_cbrmpc);
   3553 		}
   3554 	}
   3555 
   3556 	if (sc->sc_type >= WM_T_82542_2_1) {
   3557 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3558 		evcnt_detach(&sc->sc_ev_tx_xon);
   3559 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3560 		evcnt_detach(&sc->sc_ev_rx_xon);
   3561 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3562 	}
   3563 
   3564 	evcnt_detach(&sc->sc_ev_scc);
   3565 	evcnt_detach(&sc->sc_ev_ecol);
   3566 	evcnt_detach(&sc->sc_ev_mcc);
   3567 	evcnt_detach(&sc->sc_ev_latecol);
   3568 
   3569 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3570 		evcnt_detach(&sc->sc_ev_cbtmpc);
   3571 
   3572 	evcnt_detach(&sc->sc_ev_dc);
   3573 	evcnt_detach(&sc->sc_ev_prc64);
   3574 	evcnt_detach(&sc->sc_ev_prc127);
   3575 	evcnt_detach(&sc->sc_ev_prc255);
   3576 	evcnt_detach(&sc->sc_ev_prc511);
   3577 	evcnt_detach(&sc->sc_ev_prc1023);
   3578 	evcnt_detach(&sc->sc_ev_prc1522);
   3579 	evcnt_detach(&sc->sc_ev_gprc);
   3580 	evcnt_detach(&sc->sc_ev_bprc);
   3581 	evcnt_detach(&sc->sc_ev_mprc);
   3582 	evcnt_detach(&sc->sc_ev_gptc);
   3583 	evcnt_detach(&sc->sc_ev_gorc);
   3584 	evcnt_detach(&sc->sc_ev_gotc);
   3585 	evcnt_detach(&sc->sc_ev_rnbc);
   3586 	evcnt_detach(&sc->sc_ev_ruc);
   3587 	evcnt_detach(&sc->sc_ev_rfc);
   3588 	evcnt_detach(&sc->sc_ev_roc);
   3589 	evcnt_detach(&sc->sc_ev_rjc);
   3590 	if (sc->sc_type >= WM_T_82540) {
   3591 		evcnt_detach(&sc->sc_ev_mgtprc);
   3592 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3593 		evcnt_detach(&sc->sc_ev_mgtptc);
   3594 	}
   3595 	evcnt_detach(&sc->sc_ev_tor);
   3596 	evcnt_detach(&sc->sc_ev_tot);
   3597 	evcnt_detach(&sc->sc_ev_tpr);
   3598 	evcnt_detach(&sc->sc_ev_tpt);
   3599 	evcnt_detach(&sc->sc_ev_ptc64);
   3600 	evcnt_detach(&sc->sc_ev_ptc127);
   3601 	evcnt_detach(&sc->sc_ev_ptc255);
   3602 	evcnt_detach(&sc->sc_ev_ptc511);
   3603 	evcnt_detach(&sc->sc_ev_ptc1023);
   3604 	evcnt_detach(&sc->sc_ev_ptc1522);
   3605 	evcnt_detach(&sc->sc_ev_mptc);
   3606 	evcnt_detach(&sc->sc_ev_bptc);
   3607 	if (sc->sc_type >= WM_T_82571)
   3608 		evcnt_detach(&sc->sc_ev_iac);
   3609 	if (sc->sc_type < WM_T_82575) {
   3610 		evcnt_detach(&sc->sc_ev_icrxptc);
   3611 		evcnt_detach(&sc->sc_ev_icrxatc);
   3612 		evcnt_detach(&sc->sc_ev_ictxptc);
   3613 		evcnt_detach(&sc->sc_ev_ictxatc);
   3614 		evcnt_detach(&sc->sc_ev_ictxqec);
   3615 		evcnt_detach(&sc->sc_ev_ictxqmtc);
   3616 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3617 		evcnt_detach(&sc->sc_ev_icrxoc);
   3618 	} else if (!WM_IS_ICHPCH(sc)) {
   3619 		evcnt_detach(&sc->sc_ev_rpthc);
   3620 		evcnt_detach(&sc->sc_ev_debug1);
   3621 		evcnt_detach(&sc->sc_ev_debug2);
   3622 		evcnt_detach(&sc->sc_ev_debug3);
   3623 		evcnt_detach(&sc->sc_ev_hgptc);
   3624 		evcnt_detach(&sc->sc_ev_debug4);
   3625 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3626 		evcnt_detach(&sc->sc_ev_htcbdpc);
   3627 
   3628 		evcnt_detach(&sc->sc_ev_hgorc);
   3629 		evcnt_detach(&sc->sc_ev_hgotc);
   3630 		evcnt_detach(&sc->sc_ev_lenerrs);
   3631 		evcnt_detach(&sc->sc_ev_scvpc);
   3632 		evcnt_detach(&sc->sc_ev_hrmpc);
   3633 	}
   3634 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3635 		evcnt_detach(&sc->sc_ev_tlpic);
   3636 		evcnt_detach(&sc->sc_ev_rlpic);
   3637 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3638 		evcnt_detach(&sc->sc_ev_o2bspc);
   3639 		evcnt_detach(&sc->sc_ev_b2ospc);
   3640 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3641 	}
   3642 #endif /* WM_EVENT_COUNTERS */
   3643 
   3644 	rnd_detach_source(&sc->rnd_source);
   3645 
   3646 	/* Tell the firmware about the release */
   3647 	mutex_enter(sc->sc_core_lock);
   3648 	wm_release_manageability(sc);
   3649 	wm_release_hw_control(sc);
   3650 	wm_enable_wakeup(sc);
   3651 	mutex_exit(sc->sc_core_lock);
   3652 
   3653 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3654 
   3655 	ether_ifdetach(ifp);
   3656 	if_detach(ifp);
   3657 	if_percpuq_destroy(sc->sc_ipq);
   3658 
   3659 	/* Delete all remaining media. */
   3660 	ifmedia_fini(&sc->sc_mii.mii_media);
   3661 
   3662 	/* Unload RX dmamaps and free mbufs */
   3663 	for (i = 0; i < sc->sc_nqueues; i++) {
   3664 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3665 		mutex_enter(rxq->rxq_lock);
   3666 		wm_rxdrain(rxq);
   3667 		mutex_exit(rxq->rxq_lock);
   3668 	}
   3669 	/* Must unlock here */
   3670 
   3671 	/* Disestablish the interrupt handler */
   3672 	for (i = 0; i < sc->sc_nintrs; i++) {
   3673 		if (sc->sc_ihs[i] != NULL) {
   3674 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3675 			sc->sc_ihs[i] = NULL;
   3676 		}
   3677 	}
   3678 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3679 
   3680 	/* wm_stop() ensured that the workqueues are stopped. */
   3681 	workqueue_destroy(sc->sc_queue_wq);
   3682 	workqueue_destroy(sc->sc_reset_wq);
   3683 
   3684 	for (i = 0; i < sc->sc_nqueues; i++)
   3685 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3686 
   3687 	wm_free_txrx_queues(sc);
   3688 
   3689 	/* Unmap the registers */
   3690 	if (sc->sc_ss) {
   3691 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3692 		sc->sc_ss = 0;
   3693 	}
   3694 	if (sc->sc_ios) {
   3695 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3696 		sc->sc_ios = 0;
   3697 	}
   3698 	if (sc->sc_flashs) {
   3699 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3700 		sc->sc_flashs = 0;
   3701 	}
   3702 
   3703 	if (sc->sc_core_lock)
   3704 		mutex_obj_free(sc->sc_core_lock);
   3705 	if (sc->sc_ich_phymtx)
   3706 		mutex_obj_free(sc->sc_ich_phymtx);
   3707 	if (sc->sc_ich_nvmmtx)
   3708 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3709 
   3710 	return 0;
   3711 }
   3712 
   3713 static bool
   3714 wm_suspend(device_t self, const pmf_qual_t *qual)
   3715 {
   3716 	struct wm_softc *sc = device_private(self);
   3717 
   3718 	wm_release_manageability(sc);
   3719 	wm_release_hw_control(sc);
   3720 	wm_enable_wakeup(sc);
   3721 
   3722 	return true;
   3723 }
   3724 
   3725 static bool
   3726 wm_resume(device_t self, const pmf_qual_t *qual)
   3727 {
   3728 	struct wm_softc *sc = device_private(self);
   3729 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3730 	pcireg_t reg;
   3731 	char buf[256];
   3732 
   3733 	reg = CSR_READ(sc, WMREG_WUS);
   3734 	if (reg != 0) {
   3735 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3736 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3737 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3738 	}
   3739 
   3740 	if (sc->sc_type >= WM_T_PCH2)
   3741 		wm_resume_workarounds_pchlan(sc);
   3742 	IFNET_LOCK(ifp);
   3743 	if ((ifp->if_flags & IFF_UP) == 0) {
   3744 		/* >= PCH_SPT hardware workaround before reset. */
   3745 		if (sc->sc_type >= WM_T_PCH_SPT)
   3746 			wm_flush_desc_rings(sc);
   3747 
   3748 		wm_reset(sc);
   3749 		/* Non-AMT based hardware can now take control from firmware */
   3750 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3751 			wm_get_hw_control(sc);
   3752 		wm_init_manageability(sc);
   3753 	} else {
   3754 		/*
   3755 		 * We called pmf_class_network_register(), so if_init() is
   3756 		 * automatically called when IFF_UP. wm_reset(),
   3757 		 * wm_get_hw_control() and wm_init_manageability() are called
   3758 		 * via wm_init().
   3759 		 */
   3760 	}
   3761 	IFNET_UNLOCK(ifp);
   3762 
   3763 	return true;
   3764 }
   3765 
   3766 /*
   3767  * wm_watchdog:
   3768  *
   3769  *	Watchdog checker.
   3770  */
   3771 static bool
   3772 wm_watchdog(struct ifnet *ifp)
   3773 {
   3774 	int qid;
   3775 	struct wm_softc *sc = ifp->if_softc;
   3776 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3777 
   3778 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3779 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3780 
   3781 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3782 	}
   3783 
   3784 #ifdef WM_DEBUG
   3785 	if (sc->sc_trigger_reset) {
   3786 		/* debug operation, no need for atomicity or reliability */
   3787 		sc->sc_trigger_reset = 0;
   3788 		hang_queue++;
   3789 	}
   3790 #endif
   3791 
   3792 	if (hang_queue == 0)
   3793 		return true;
   3794 
   3795 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3796 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3797 
   3798 	return false;
   3799 }
   3800 
   3801 /*
   3802  * Perform an interface watchdog reset.
   3803  */
   3804 static void
   3805 wm_handle_reset_work(struct work *work, void *arg)
   3806 {
   3807 	struct wm_softc * const sc = arg;
   3808 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3809 
   3810 	/* Don't want ioctl operations to happen */
   3811 	IFNET_LOCK(ifp);
   3812 
   3813 	/* reset the interface. */
   3814 	wm_init(ifp);
   3815 
   3816 	IFNET_UNLOCK(ifp);
   3817 
   3818 	/*
   3819 	 * There are still some upper layer processing which call
   3820 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3821 	 */
   3822 	/* Try to get more packets going. */
   3823 	ifp->if_start(ifp);
   3824 
   3825 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3826 }
   3827 
   3828 
   3829 static void
   3830 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3831 {
   3832 
   3833 	mutex_enter(txq->txq_lock);
   3834 	if (txq->txq_sending &&
   3835 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3836 		wm_watchdog_txq_locked(ifp, txq, hang);
   3837 
   3838 	mutex_exit(txq->txq_lock);
   3839 }
   3840 
   3841 static void
   3842 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3843     uint16_t *hang)
   3844 {
   3845 	struct wm_softc *sc = ifp->if_softc;
   3846 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3847 
   3848 	KASSERT(mutex_owned(txq->txq_lock));
   3849 
   3850 	/*
   3851 	 * Since we're using delayed interrupts, sweep up
   3852 	 * before we report an error.
   3853 	 */
   3854 	wm_txeof(txq, UINT_MAX);
   3855 
   3856 	if (txq->txq_sending)
   3857 		*hang |= __BIT(wmq->wmq_id);
   3858 
   3859 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3860 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3861 		    device_xname(sc->sc_dev));
   3862 	} else {
   3863 #ifdef WM_DEBUG
   3864 		int i, j;
   3865 		struct wm_txsoft *txs;
   3866 #endif
   3867 		log(LOG_ERR,
   3868 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3869 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3870 		    txq->txq_next);
   3871 		if_statinc(ifp, if_oerrors);
   3872 #ifdef WM_DEBUG
   3873 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3874 		     i = WM_NEXTTXS(txq, i)) {
   3875 			txs = &txq->txq_soft[i];
   3876 			printf("txs %d tx %d -> %d\n",
   3877 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3878 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3879 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3880 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3881 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3882 					printf("\t %#08x%08x\n",
   3883 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3884 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3885 				} else {
   3886 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3887 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3888 					    txq->txq_descs[j].wtx_addr.wa_low);
   3889 					printf("\t %#04x%02x%02x%08x\n",
   3890 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3891 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3892 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3893 					    txq->txq_descs[j].wtx_cmdlen);
   3894 				}
   3895 				if (j == txs->txs_lastdesc)
   3896 					break;
   3897 			}
   3898 		}
   3899 #endif
   3900 	}
   3901 }
   3902 
   3903 /*
   3904  * wm_tick:
   3905  *
   3906  *	One second timer, used to check link status, sweep up
   3907  *	completed transmit jobs, etc.
   3908  */
   3909 static void
   3910 wm_tick(void *arg)
   3911 {
   3912 	struct wm_softc *sc = arg;
   3913 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3914 
   3915 	mutex_enter(sc->sc_core_lock);
   3916 
   3917 	if (sc->sc_core_stopping) {
   3918 		mutex_exit(sc->sc_core_lock);
   3919 		return;
   3920 	}
   3921 
   3922 	wm_update_stats(sc);
   3923 
   3924 	if (sc->sc_flags & WM_F_HAS_MII) {
   3925 		bool dotick = true;
   3926 
   3927 		/*
   3928 		 * Workaround for some chips to delay sending LINK_STATE_UP.
   3929 		 * See also wm_linkintr_gmii() and wm_gmii_mediastatus().
   3930 		 */
   3931 		if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   3932 			struct timeval now;
   3933 
   3934 			getmicrotime(&now);
   3935 			if (timercmp(&now, &sc->sc_linkup_delay_time, <))
   3936 				dotick = false;
   3937 			else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   3938 				/* Simplify by checking tv_sec only. */
   3939 
   3940 				sc->sc_linkup_delay_time.tv_sec = 0;
   3941 				sc->sc_linkup_delay_time.tv_usec = 0;
   3942 			}
   3943 		}
   3944 		if (dotick)
   3945 			mii_tick(&sc->sc_mii);
   3946 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3947 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3948 		wm_serdes_tick(sc);
   3949 	else
   3950 		wm_tbi_tick(sc);
   3951 
   3952 	mutex_exit(sc->sc_core_lock);
   3953 
   3954 	if (wm_watchdog(ifp))
   3955 		callout_schedule(&sc->sc_tick_ch, hz);
   3956 }
   3957 
   3958 static int
   3959 wm_ifflags_cb(struct ethercom *ec)
   3960 {
   3961 	struct ifnet *ifp = &ec->ec_if;
   3962 	struct wm_softc *sc = ifp->if_softc;
   3963 	u_short iffchange;
   3964 	int ecchange;
   3965 	bool needreset = false;
   3966 	int rc = 0;
   3967 
   3968 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3969 		device_xname(sc->sc_dev), __func__));
   3970 
   3971 	KASSERT(IFNET_LOCKED(ifp));
   3972 
   3973 	mutex_enter(sc->sc_core_lock);
   3974 
   3975 	/*
   3976 	 * Check for if_flags.
   3977 	 * Main usage is to prevent linkdown when opening bpf.
   3978 	 */
   3979 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3980 	sc->sc_if_flags = ifp->if_flags;
   3981 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3982 		needreset = true;
   3983 		goto ec;
   3984 	}
   3985 
   3986 	/* iff related updates */
   3987 	if ((iffchange & IFF_PROMISC) != 0)
   3988 		wm_set_filter(sc);
   3989 
   3990 	wm_set_vlan(sc);
   3991 
   3992 ec:
   3993 	/* Check for ec_capenable. */
   3994 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3995 	sc->sc_ec_capenable = ec->ec_capenable;
   3996 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3997 		needreset = true;
   3998 		goto out;
   3999 	}
   4000 
   4001 	/* ec related updates */
   4002 	wm_set_eee(sc);
   4003 
   4004 out:
   4005 	if (needreset)
   4006 		rc = ENETRESET;
   4007 	mutex_exit(sc->sc_core_lock);
   4008 
   4009 	return rc;
   4010 }
   4011 
   4012 static bool
   4013 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   4014 {
   4015 
   4016 	switch (sc->sc_phytype) {
   4017 	case WMPHY_82577: /* ihphy */
   4018 	case WMPHY_82578: /* atphy */
   4019 	case WMPHY_82579: /* ihphy */
   4020 	case WMPHY_I217: /* ihphy */
   4021 	case WMPHY_82580: /* ihphy */
   4022 	case WMPHY_I350: /* ihphy */
   4023 		return true;
   4024 	default:
   4025 		return false;
   4026 	}
   4027 }
   4028 
   4029 static void
   4030 wm_set_linkdown_discard(struct wm_softc *sc)
   4031 {
   4032 
   4033 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4034 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4035 
   4036 		mutex_enter(txq->txq_lock);
   4037 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   4038 		mutex_exit(txq->txq_lock);
   4039 	}
   4040 }
   4041 
   4042 static void
   4043 wm_clear_linkdown_discard(struct wm_softc *sc)
   4044 {
   4045 
   4046 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4047 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4048 
   4049 		mutex_enter(txq->txq_lock);
   4050 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   4051 		mutex_exit(txq->txq_lock);
   4052 	}
   4053 }
   4054 
   4055 /*
   4056  * wm_ioctl:		[ifnet interface function]
   4057  *
   4058  *	Handle control requests from the operator.
   4059  */
   4060 static int
   4061 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   4062 {
   4063 	struct wm_softc *sc = ifp->if_softc;
   4064 	struct ifreq *ifr = (struct ifreq *)data;
   4065 	struct ifaddr *ifa = (struct ifaddr *)data;
   4066 	struct sockaddr_dl *sdl;
   4067 	int error;
   4068 
   4069 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4070 		device_xname(sc->sc_dev), __func__));
   4071 
   4072 	switch (cmd) {
   4073 	case SIOCADDMULTI:
   4074 	case SIOCDELMULTI:
   4075 		break;
   4076 	default:
   4077 		KASSERT(IFNET_LOCKED(ifp));
   4078 	}
   4079 
   4080 	if (cmd == SIOCZIFDATA) {
   4081 		/*
   4082 		 * Special handling for SIOCZIFDATA.
   4083 		 * Copying and clearing the if_data structure is done with
   4084 		 * ether_ioctl() below.
   4085 		 */
   4086 		mutex_enter(sc->sc_core_lock);
   4087 		wm_update_stats(sc);
   4088 		wm_clear_evcnt(sc);
   4089 		mutex_exit(sc->sc_core_lock);
   4090 	}
   4091 
   4092 	switch (cmd) {
   4093 	case SIOCSIFMEDIA:
   4094 		mutex_enter(sc->sc_core_lock);
   4095 		/* Flow control requires full-duplex mode. */
   4096 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4097 		    (ifr->ifr_media & IFM_FDX) == 0)
   4098 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4099 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4100 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4101 				/* We can do both TXPAUSE and RXPAUSE. */
   4102 				ifr->ifr_media |=
   4103 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4104 			}
   4105 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4106 		}
   4107 		mutex_exit(sc->sc_core_lock);
   4108 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4109 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4110 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4111 				DPRINTF(sc, WM_DEBUG_LINK,
   4112 				    ("%s: %s: Set linkdown discard flag\n",
   4113 					device_xname(sc->sc_dev), __func__));
   4114 				wm_set_linkdown_discard(sc);
   4115 			}
   4116 		}
   4117 		break;
   4118 	case SIOCINITIFADDR:
   4119 		mutex_enter(sc->sc_core_lock);
   4120 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4121 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4122 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4123 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4124 			/* Unicast address is the first multicast entry */
   4125 			wm_set_filter(sc);
   4126 			error = 0;
   4127 			mutex_exit(sc->sc_core_lock);
   4128 			break;
   4129 		}
   4130 		mutex_exit(sc->sc_core_lock);
   4131 		/*FALLTHROUGH*/
   4132 	default:
   4133 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4134 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4135 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4136 				DPRINTF(sc, WM_DEBUG_LINK,
   4137 				    ("%s: %s: Set linkdown discard flag\n",
   4138 					device_xname(sc->sc_dev), __func__));
   4139 				wm_set_linkdown_discard(sc);
   4140 			}
   4141 		}
   4142 		const int s = splnet();
   4143 		/* It may call wm_start, so unlock here */
   4144 		error = ether_ioctl(ifp, cmd, data);
   4145 		splx(s);
   4146 		if (error != ENETRESET)
   4147 			break;
   4148 
   4149 		error = 0;
   4150 
   4151 		if (cmd == SIOCSIFCAP)
   4152 			error = if_init(ifp);
   4153 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4154 			mutex_enter(sc->sc_core_lock);
   4155 			if (sc->sc_if_flags & IFF_RUNNING) {
   4156 				/*
   4157 				 * Multicast list has changed; set the
   4158 				 * hardware filter accordingly.
   4159 				 */
   4160 				wm_set_filter(sc);
   4161 			}
   4162 			mutex_exit(sc->sc_core_lock);
   4163 		}
   4164 		break;
   4165 	}
   4166 
   4167 	return error;
   4168 }
   4169 
   4170 /* MAC address related */
   4171 
   4172 /*
   4173  * Get the offset of MAC address and return it.
   4174  * If error occured, use offset 0.
   4175  */
   4176 static uint16_t
   4177 wm_check_alt_mac_addr(struct wm_softc *sc)
   4178 {
   4179 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4180 	uint16_t offset = NVM_OFF_MACADDR;
   4181 
   4182 	/* Try to read alternative MAC address pointer */
   4183 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4184 		return 0;
   4185 
   4186 	/* Check pointer if it's valid or not. */
   4187 	if ((offset == 0x0000) || (offset == 0xffff))
   4188 		return 0;
   4189 
   4190 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4191 	/*
   4192 	 * Check whether alternative MAC address is valid or not.
   4193 	 * Some cards have non 0xffff pointer but those don't use
   4194 	 * alternative MAC address in reality.
   4195 	 *
   4196 	 * Check whether the broadcast bit is set or not.
   4197 	 */
   4198 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4199 		if (((myea[0] & 0xff) & 0x01) == 0)
   4200 			return offset; /* Found */
   4201 
   4202 	/* Not found */
   4203 	return 0;
   4204 }
   4205 
   4206 static int
   4207 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4208 {
   4209 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4210 	uint16_t offset = NVM_OFF_MACADDR;
   4211 	int do_invert = 0;
   4212 
   4213 	switch (sc->sc_type) {
   4214 	case WM_T_82580:
   4215 	case WM_T_I350:
   4216 	case WM_T_I354:
   4217 		/* EEPROM Top Level Partitioning */
   4218 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4219 		break;
   4220 	case WM_T_82571:
   4221 	case WM_T_82575:
   4222 	case WM_T_82576:
   4223 	case WM_T_80003:
   4224 	case WM_T_I210:
   4225 	case WM_T_I211:
   4226 		offset = wm_check_alt_mac_addr(sc);
   4227 		if (offset == 0)
   4228 			if ((sc->sc_funcid & 0x01) == 1)
   4229 				do_invert = 1;
   4230 		break;
   4231 	default:
   4232 		if ((sc->sc_funcid & 0x01) == 1)
   4233 			do_invert = 1;
   4234 		break;
   4235 	}
   4236 
   4237 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4238 		goto bad;
   4239 
   4240 	enaddr[0] = myea[0] & 0xff;
   4241 	enaddr[1] = myea[0] >> 8;
   4242 	enaddr[2] = myea[1] & 0xff;
   4243 	enaddr[3] = myea[1] >> 8;
   4244 	enaddr[4] = myea[2] & 0xff;
   4245 	enaddr[5] = myea[2] >> 8;
   4246 
   4247 	/*
   4248 	 * Toggle the LSB of the MAC address on the second port
   4249 	 * of some dual port cards.
   4250 	 */
   4251 	if (do_invert != 0)
   4252 		enaddr[5] ^= 1;
   4253 
   4254 	return 0;
   4255 
   4256 bad:
   4257 	return -1;
   4258 }
   4259 
   4260 /*
   4261  * wm_set_ral:
   4262  *
   4263  *	Set an entery in the receive address list.
   4264  */
   4265 static void
   4266 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4267 {
   4268 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4269 	uint32_t wlock_mac;
   4270 	int rv;
   4271 
   4272 	if (enaddr != NULL) {
   4273 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4274 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4275 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4276 		ral_hi |= RAL_AV;
   4277 	} else {
   4278 		ral_lo = 0;
   4279 		ral_hi = 0;
   4280 	}
   4281 
   4282 	switch (sc->sc_type) {
   4283 	case WM_T_82542_2_0:
   4284 	case WM_T_82542_2_1:
   4285 	case WM_T_82543:
   4286 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4287 		CSR_WRITE_FLUSH(sc);
   4288 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4289 		CSR_WRITE_FLUSH(sc);
   4290 		break;
   4291 	case WM_T_PCH2:
   4292 	case WM_T_PCH_LPT:
   4293 	case WM_T_PCH_SPT:
   4294 	case WM_T_PCH_CNP:
   4295 		if (idx == 0) {
   4296 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4297 			CSR_WRITE_FLUSH(sc);
   4298 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4299 			CSR_WRITE_FLUSH(sc);
   4300 			return;
   4301 		}
   4302 		if (sc->sc_type != WM_T_PCH2) {
   4303 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4304 			    FWSM_WLOCK_MAC);
   4305 			addrl = WMREG_SHRAL(idx - 1);
   4306 			addrh = WMREG_SHRAH(idx - 1);
   4307 		} else {
   4308 			wlock_mac = 0;
   4309 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4310 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4311 		}
   4312 
   4313 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4314 			rv = wm_get_swflag_ich8lan(sc);
   4315 			if (rv != 0)
   4316 				return;
   4317 			CSR_WRITE(sc, addrl, ral_lo);
   4318 			CSR_WRITE_FLUSH(sc);
   4319 			CSR_WRITE(sc, addrh, ral_hi);
   4320 			CSR_WRITE_FLUSH(sc);
   4321 			wm_put_swflag_ich8lan(sc);
   4322 		}
   4323 
   4324 		break;
   4325 	default:
   4326 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4327 		CSR_WRITE_FLUSH(sc);
   4328 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4329 		CSR_WRITE_FLUSH(sc);
   4330 		break;
   4331 	}
   4332 }
   4333 
   4334 /*
   4335  * wm_mchash:
   4336  *
   4337  *	Compute the hash of the multicast address for the 4096-bit
   4338  *	multicast filter.
   4339  */
   4340 static uint32_t
   4341 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4342 {
   4343 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4344 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4345 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4346 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4347 	uint32_t hash;
   4348 
   4349 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4350 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4351 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4352 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4353 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4354 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4355 		return (hash & 0x3ff);
   4356 	}
   4357 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4358 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4359 
   4360 	return (hash & 0xfff);
   4361 }
   4362 
   4363 /*
   4364  *
   4365  *
   4366  */
   4367 static int
   4368 wm_rar_count(struct wm_softc *sc)
   4369 {
   4370 	int size;
   4371 
   4372 	switch (sc->sc_type) {
   4373 	case WM_T_ICH8:
   4374 		size = WM_RAL_TABSIZE_ICH8 -1;
   4375 		break;
   4376 	case WM_T_ICH9:
   4377 	case WM_T_ICH10:
   4378 	case WM_T_PCH:
   4379 		size = WM_RAL_TABSIZE_ICH8;
   4380 		break;
   4381 	case WM_T_PCH2:
   4382 		size = WM_RAL_TABSIZE_PCH2;
   4383 		break;
   4384 	case WM_T_PCH_LPT:
   4385 	case WM_T_PCH_SPT:
   4386 	case WM_T_PCH_CNP:
   4387 		size = WM_RAL_TABSIZE_PCH_LPT;
   4388 		break;
   4389 	case WM_T_82575:
   4390 	case WM_T_I210:
   4391 	case WM_T_I211:
   4392 		size = WM_RAL_TABSIZE_82575;
   4393 		break;
   4394 	case WM_T_82576:
   4395 	case WM_T_82580:
   4396 		size = WM_RAL_TABSIZE_82576;
   4397 		break;
   4398 	case WM_T_I350:
   4399 	case WM_T_I354:
   4400 		size = WM_RAL_TABSIZE_I350;
   4401 		break;
   4402 	default:
   4403 		size = WM_RAL_TABSIZE;
   4404 	}
   4405 
   4406 	return size;
   4407 }
   4408 
   4409 /*
   4410  * wm_set_filter:
   4411  *
   4412  *	Set up the receive filter.
   4413  */
   4414 static void
   4415 wm_set_filter(struct wm_softc *sc)
   4416 {
   4417 	struct ethercom *ec = &sc->sc_ethercom;
   4418 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4419 	struct ether_multi *enm;
   4420 	struct ether_multistep step;
   4421 	bus_addr_t mta_reg;
   4422 	uint32_t hash, reg, bit;
   4423 	int i, size, ralmax, rv;
   4424 
   4425 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4426 		device_xname(sc->sc_dev), __func__));
   4427 	KASSERT(mutex_owned(sc->sc_core_lock));
   4428 
   4429 	if (sc->sc_type >= WM_T_82544)
   4430 		mta_reg = WMREG_CORDOVA_MTA;
   4431 	else
   4432 		mta_reg = WMREG_MTA;
   4433 
   4434 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4435 
   4436 	if (sc->sc_if_flags & IFF_BROADCAST)
   4437 		sc->sc_rctl |= RCTL_BAM;
   4438 	if (sc->sc_if_flags & IFF_PROMISC) {
   4439 		sc->sc_rctl |= RCTL_UPE;
   4440 		ETHER_LOCK(ec);
   4441 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4442 		ETHER_UNLOCK(ec);
   4443 		goto allmulti;
   4444 	}
   4445 
   4446 	/*
   4447 	 * Set the station address in the first RAL slot, and
   4448 	 * clear the remaining slots.
   4449 	 */
   4450 	size = wm_rar_count(sc);
   4451 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4452 
   4453 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4454 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4455 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4456 		switch (i) {
   4457 		case 0:
   4458 			/* We can use all entries */
   4459 			ralmax = size;
   4460 			break;
   4461 		case 1:
   4462 			/* Only RAR[0] */
   4463 			ralmax = 1;
   4464 			break;
   4465 		default:
   4466 			/* Available SHRA + RAR[0] */
   4467 			ralmax = i + 1;
   4468 		}
   4469 	} else
   4470 		ralmax = size;
   4471 	for (i = 1; i < size; i++) {
   4472 		if (i < ralmax)
   4473 			wm_set_ral(sc, NULL, i);
   4474 	}
   4475 
   4476 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4477 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4478 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4479 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4480 		size = WM_ICH8_MC_TABSIZE;
   4481 	else
   4482 		size = WM_MC_TABSIZE;
   4483 	/* Clear out the multicast table. */
   4484 	for (i = 0; i < size; i++) {
   4485 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4486 		CSR_WRITE_FLUSH(sc);
   4487 	}
   4488 
   4489 	ETHER_LOCK(ec);
   4490 	ETHER_FIRST_MULTI(step, ec, enm);
   4491 	while (enm != NULL) {
   4492 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4493 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4494 			ETHER_UNLOCK(ec);
   4495 			/*
   4496 			 * We must listen to a range of multicast addresses.
   4497 			 * For now, just accept all multicasts, rather than
   4498 			 * trying to set only those filter bits needed to match
   4499 			 * the range.  (At this time, the only use of address
   4500 			 * ranges is for IP multicast routing, for which the
   4501 			 * range is big enough to require all bits set.)
   4502 			 */
   4503 			goto allmulti;
   4504 		}
   4505 
   4506 		hash = wm_mchash(sc, enm->enm_addrlo);
   4507 
   4508 		reg = (hash >> 5);
   4509 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4510 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4511 		    || (sc->sc_type == WM_T_PCH2)
   4512 		    || (sc->sc_type == WM_T_PCH_LPT)
   4513 		    || (sc->sc_type == WM_T_PCH_SPT)
   4514 		    || (sc->sc_type == WM_T_PCH_CNP))
   4515 			reg &= 0x1f;
   4516 		else
   4517 			reg &= 0x7f;
   4518 		bit = hash & 0x1f;
   4519 
   4520 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4521 		hash |= 1U << bit;
   4522 
   4523 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4524 			/*
   4525 			 * 82544 Errata 9: Certain register cannot be written
   4526 			 * with particular alignments in PCI-X bus operation
   4527 			 * (FCAH, MTA and VFTA).
   4528 			 */
   4529 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4530 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4531 			CSR_WRITE_FLUSH(sc);
   4532 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4533 			CSR_WRITE_FLUSH(sc);
   4534 		} else {
   4535 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4536 			CSR_WRITE_FLUSH(sc);
   4537 		}
   4538 
   4539 		ETHER_NEXT_MULTI(step, enm);
   4540 	}
   4541 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4542 	ETHER_UNLOCK(ec);
   4543 
   4544 	goto setit;
   4545 
   4546 allmulti:
   4547 	sc->sc_rctl |= RCTL_MPE;
   4548 
   4549 setit:
   4550 	if (sc->sc_type >= WM_T_PCH2) {
   4551 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4552 		    && (ifp->if_mtu > ETHERMTU))
   4553 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4554 		else
   4555 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4556 		if (rv != 0)
   4557 			device_printf(sc->sc_dev,
   4558 			    "Failed to do workaround for jumbo frame.\n");
   4559 	}
   4560 
   4561 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4562 }
   4563 
   4564 /* Reset and init related */
   4565 
   4566 static void
   4567 wm_set_vlan(struct wm_softc *sc)
   4568 {
   4569 
   4570 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4571 		device_xname(sc->sc_dev), __func__));
   4572 
   4573 	/* Deal with VLAN enables. */
   4574 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4575 		sc->sc_ctrl |= CTRL_VME;
   4576 	else
   4577 		sc->sc_ctrl &= ~CTRL_VME;
   4578 
   4579 	/* Write the control registers. */
   4580 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4581 }
   4582 
   4583 static void
   4584 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4585 {
   4586 	uint32_t gcr;
   4587 	pcireg_t ctrl2;
   4588 
   4589 	gcr = CSR_READ(sc, WMREG_GCR);
   4590 
   4591 	/* Only take action if timeout value is defaulted to 0 */
   4592 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4593 		goto out;
   4594 
   4595 	if ((gcr & GCR_CAP_VER2) == 0) {
   4596 		gcr |= GCR_CMPL_TMOUT_10MS;
   4597 		goto out;
   4598 	}
   4599 
   4600 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4601 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4602 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4603 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4604 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4605 
   4606 out:
   4607 	/* Disable completion timeout resend */
   4608 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4609 
   4610 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4611 }
   4612 
   4613 void
   4614 wm_get_auto_rd_done(struct wm_softc *sc)
   4615 {
   4616 	int i;
   4617 
   4618 	/* wait for eeprom to reload */
   4619 	switch (sc->sc_type) {
   4620 	case WM_T_82571:
   4621 	case WM_T_82572:
   4622 	case WM_T_82573:
   4623 	case WM_T_82574:
   4624 	case WM_T_82583:
   4625 	case WM_T_82575:
   4626 	case WM_T_82576:
   4627 	case WM_T_82580:
   4628 	case WM_T_I350:
   4629 	case WM_T_I354:
   4630 	case WM_T_I210:
   4631 	case WM_T_I211:
   4632 	case WM_T_80003:
   4633 	case WM_T_ICH8:
   4634 	case WM_T_ICH9:
   4635 		for (i = 0; i < 10; i++) {
   4636 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4637 				break;
   4638 			delay(1000);
   4639 		}
   4640 		if (i == 10) {
   4641 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4642 			    "complete\n", device_xname(sc->sc_dev));
   4643 		}
   4644 		break;
   4645 	default:
   4646 		break;
   4647 	}
   4648 }
   4649 
   4650 void
   4651 wm_lan_init_done(struct wm_softc *sc)
   4652 {
   4653 	uint32_t reg = 0;
   4654 	int i;
   4655 
   4656 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4657 		device_xname(sc->sc_dev), __func__));
   4658 
   4659 	/* Wait for eeprom to reload */
   4660 	switch (sc->sc_type) {
   4661 	case WM_T_ICH10:
   4662 	case WM_T_PCH:
   4663 	case WM_T_PCH2:
   4664 	case WM_T_PCH_LPT:
   4665 	case WM_T_PCH_SPT:
   4666 	case WM_T_PCH_CNP:
   4667 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4668 			reg = CSR_READ(sc, WMREG_STATUS);
   4669 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4670 				break;
   4671 			delay(100);
   4672 		}
   4673 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4674 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4675 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4676 		}
   4677 		break;
   4678 	default:
   4679 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4680 		    __func__);
   4681 		break;
   4682 	}
   4683 
   4684 	reg &= ~STATUS_LAN_INIT_DONE;
   4685 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4686 }
   4687 
   4688 void
   4689 wm_get_cfg_done(struct wm_softc *sc)
   4690 {
   4691 	int mask;
   4692 	uint32_t reg;
   4693 	int i;
   4694 
   4695 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4696 		device_xname(sc->sc_dev), __func__));
   4697 
   4698 	/* Wait for eeprom to reload */
   4699 	switch (sc->sc_type) {
   4700 	case WM_T_82542_2_0:
   4701 	case WM_T_82542_2_1:
   4702 		/* null */
   4703 		break;
   4704 	case WM_T_82543:
   4705 	case WM_T_82544:
   4706 	case WM_T_82540:
   4707 	case WM_T_82545:
   4708 	case WM_T_82545_3:
   4709 	case WM_T_82546:
   4710 	case WM_T_82546_3:
   4711 	case WM_T_82541:
   4712 	case WM_T_82541_2:
   4713 	case WM_T_82547:
   4714 	case WM_T_82547_2:
   4715 	case WM_T_82573:
   4716 	case WM_T_82574:
   4717 	case WM_T_82583:
   4718 		/* generic */
   4719 		delay(10*1000);
   4720 		break;
   4721 	case WM_T_80003:
   4722 	case WM_T_82571:
   4723 	case WM_T_82572:
   4724 	case WM_T_82575:
   4725 	case WM_T_82576:
   4726 	case WM_T_82580:
   4727 	case WM_T_I350:
   4728 	case WM_T_I354:
   4729 	case WM_T_I210:
   4730 	case WM_T_I211:
   4731 		if (sc->sc_type == WM_T_82571) {
   4732 			/* Only 82571 shares port 0 */
   4733 			mask = EEMNGCTL_CFGDONE_0;
   4734 		} else
   4735 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4736 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4737 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4738 				break;
   4739 			delay(1000);
   4740 		}
   4741 		if (i >= WM_PHY_CFG_TIMEOUT)
   4742 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4743 				device_xname(sc->sc_dev), __func__));
   4744 		break;
   4745 	case WM_T_ICH8:
   4746 	case WM_T_ICH9:
   4747 	case WM_T_ICH10:
   4748 	case WM_T_PCH:
   4749 	case WM_T_PCH2:
   4750 	case WM_T_PCH_LPT:
   4751 	case WM_T_PCH_SPT:
   4752 	case WM_T_PCH_CNP:
   4753 		delay(10*1000);
   4754 		if (sc->sc_type >= WM_T_ICH10)
   4755 			wm_lan_init_done(sc);
   4756 		else
   4757 			wm_get_auto_rd_done(sc);
   4758 
   4759 		/* Clear PHY Reset Asserted bit */
   4760 		reg = CSR_READ(sc, WMREG_STATUS);
   4761 		if ((reg & STATUS_PHYRA) != 0)
   4762 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4763 		break;
   4764 	default:
   4765 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4766 		    __func__);
   4767 		break;
   4768 	}
   4769 }
   4770 
   4771 int
   4772 wm_phy_post_reset(struct wm_softc *sc)
   4773 {
   4774 	device_t dev = sc->sc_dev;
   4775 	uint16_t reg;
   4776 	int rv = 0;
   4777 
   4778 	/* This function is only for ICH8 and newer. */
   4779 	if (sc->sc_type < WM_T_ICH8)
   4780 		return 0;
   4781 
   4782 	if (wm_phy_resetisblocked(sc)) {
   4783 		/* XXX */
   4784 		device_printf(dev, "PHY is blocked\n");
   4785 		return -1;
   4786 	}
   4787 
   4788 	/* Allow time for h/w to get to quiescent state after reset */
   4789 	delay(10*1000);
   4790 
   4791 	/* Perform any necessary post-reset workarounds */
   4792 	if (sc->sc_type == WM_T_PCH)
   4793 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4794 	else if (sc->sc_type == WM_T_PCH2)
   4795 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4796 	if (rv != 0)
   4797 		return rv;
   4798 
   4799 	/* Clear the host wakeup bit after lcd reset */
   4800 	if (sc->sc_type >= WM_T_PCH) {
   4801 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4802 		reg &= ~BM_WUC_HOST_WU_BIT;
   4803 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4804 	}
   4805 
   4806 	/* Configure the LCD with the extended configuration region in NVM */
   4807 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4808 		return rv;
   4809 
   4810 	/* Configure the LCD with the OEM bits in NVM */
   4811 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4812 
   4813 	if (sc->sc_type == WM_T_PCH2) {
   4814 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4815 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4816 			delay(10 * 1000);
   4817 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4818 		}
   4819 		/* Set EEE LPI Update Timer to 200usec */
   4820 		rv = sc->phy.acquire(sc);
   4821 		if (rv)
   4822 			return rv;
   4823 		rv = wm_write_emi_reg_locked(dev,
   4824 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4825 		sc->phy.release(sc);
   4826 	}
   4827 
   4828 	return rv;
   4829 }
   4830 
   4831 /* Only for PCH and newer */
   4832 static int
   4833 wm_write_smbus_addr(struct wm_softc *sc)
   4834 {
   4835 	uint32_t strap, freq;
   4836 	uint16_t phy_data;
   4837 	int rv;
   4838 
   4839 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4840 		device_xname(sc->sc_dev), __func__));
   4841 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4842 
   4843 	strap = CSR_READ(sc, WMREG_STRAP);
   4844 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4845 
   4846 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4847 	if (rv != 0)
   4848 		return rv;
   4849 
   4850 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4851 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4852 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4853 
   4854 	if (sc->sc_phytype == WMPHY_I217) {
   4855 		/* Restore SMBus frequency */
   4856 		if (freq --) {
   4857 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4858 			    | HV_SMB_ADDR_FREQ_HIGH);
   4859 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4860 			    HV_SMB_ADDR_FREQ_LOW);
   4861 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4862 			    HV_SMB_ADDR_FREQ_HIGH);
   4863 		} else
   4864 			DPRINTF(sc, WM_DEBUG_INIT,
   4865 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4866 				device_xname(sc->sc_dev), __func__));
   4867 	}
   4868 
   4869 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4870 	    phy_data);
   4871 }
   4872 
   4873 static int
   4874 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4875 {
   4876 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4877 	uint16_t phy_page = 0;
   4878 	int rv = 0;
   4879 
   4880 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4881 		device_xname(sc->sc_dev), __func__));
   4882 
   4883 	switch (sc->sc_type) {
   4884 	case WM_T_ICH8:
   4885 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4886 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4887 			return 0;
   4888 
   4889 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4890 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4891 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4892 			break;
   4893 		}
   4894 		/* FALLTHROUGH */
   4895 	case WM_T_PCH:
   4896 	case WM_T_PCH2:
   4897 	case WM_T_PCH_LPT:
   4898 	case WM_T_PCH_SPT:
   4899 	case WM_T_PCH_CNP:
   4900 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4901 		break;
   4902 	default:
   4903 		return 0;
   4904 	}
   4905 
   4906 	if ((rv = sc->phy.acquire(sc)) != 0)
   4907 		return rv;
   4908 
   4909 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4910 	if ((reg & sw_cfg_mask) == 0)
   4911 		goto release;
   4912 
   4913 	/*
   4914 	 * Make sure HW does not configure LCD from PHY extended configuration
   4915 	 * before SW configuration
   4916 	 */
   4917 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4918 	if ((sc->sc_type < WM_T_PCH2)
   4919 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4920 		goto release;
   4921 
   4922 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4923 		device_xname(sc->sc_dev), __func__));
   4924 	/* word_addr is in DWORD */
   4925 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4926 
   4927 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4928 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4929 	if (cnf_size == 0)
   4930 		goto release;
   4931 
   4932 	if (((sc->sc_type == WM_T_PCH)
   4933 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4934 	    || (sc->sc_type > WM_T_PCH)) {
   4935 		/*
   4936 		 * HW configures the SMBus address and LEDs when the OEM and
   4937 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4938 		 * are cleared, SW will configure them instead.
   4939 		 */
   4940 		DPRINTF(sc, WM_DEBUG_INIT,
   4941 		    ("%s: %s: Configure SMBus and LED\n",
   4942 			device_xname(sc->sc_dev), __func__));
   4943 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4944 			goto release;
   4945 
   4946 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4947 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4948 		    (uint16_t)reg);
   4949 		if (rv != 0)
   4950 			goto release;
   4951 	}
   4952 
   4953 	/* Configure LCD from extended configuration region. */
   4954 	for (i = 0; i < cnf_size; i++) {
   4955 		uint16_t reg_data, reg_addr;
   4956 
   4957 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4958 			goto release;
   4959 
   4960 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4961 			goto release;
   4962 
   4963 		if (reg_addr == IGPHY_PAGE_SELECT)
   4964 			phy_page = reg_data;
   4965 
   4966 		reg_addr &= IGPHY_MAXREGADDR;
   4967 		reg_addr |= phy_page;
   4968 
   4969 		KASSERT(sc->phy.writereg_locked != NULL);
   4970 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4971 		    reg_data);
   4972 	}
   4973 
   4974 release:
   4975 	sc->phy.release(sc);
   4976 	return rv;
   4977 }
   4978 
   4979 /*
   4980  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4981  *  @sc:       pointer to the HW structure
   4982  *  @d0_state: boolean if entering d0 or d3 device state
   4983  *
   4984  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4985  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4986  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4987  */
   4988 int
   4989 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4990 {
   4991 	uint32_t mac_reg;
   4992 	uint16_t oem_reg;
   4993 	int rv;
   4994 
   4995 	if (sc->sc_type < WM_T_PCH)
   4996 		return 0;
   4997 
   4998 	rv = sc->phy.acquire(sc);
   4999 	if (rv != 0)
   5000 		return rv;
   5001 
   5002 	if (sc->sc_type == WM_T_PCH) {
   5003 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   5004 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   5005 			goto release;
   5006 	}
   5007 
   5008 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   5009 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   5010 		goto release;
   5011 
   5012 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   5013 
   5014 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   5015 	if (rv != 0)
   5016 		goto release;
   5017 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   5018 
   5019 	if (d0_state) {
   5020 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   5021 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5022 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   5023 			oem_reg |= HV_OEM_BITS_LPLU;
   5024 	} else {
   5025 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   5026 		    != 0)
   5027 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5028 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   5029 		    != 0)
   5030 			oem_reg |= HV_OEM_BITS_LPLU;
   5031 	}
   5032 
   5033 	/* Set Restart auto-neg to activate the bits */
   5034 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   5035 	    && (wm_phy_resetisblocked(sc) == false))
   5036 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   5037 
   5038 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   5039 
   5040 release:
   5041 	sc->phy.release(sc);
   5042 
   5043 	return rv;
   5044 }
   5045 
   5046 /* Init hardware bits */
   5047 void
   5048 wm_initialize_hardware_bits(struct wm_softc *sc)
   5049 {
   5050 	uint32_t tarc0, tarc1, reg;
   5051 
   5052 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5053 		device_xname(sc->sc_dev), __func__));
   5054 
   5055 	/* For 82571 variant, 80003 and ICHs */
   5056 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   5057 	    || WM_IS_ICHPCH(sc)) {
   5058 
   5059 		/* Transmit Descriptor Control 0 */
   5060 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   5061 		reg |= TXDCTL_COUNT_DESC;
   5062 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   5063 
   5064 		/* Transmit Descriptor Control 1 */
   5065 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   5066 		reg |= TXDCTL_COUNT_DESC;
   5067 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   5068 
   5069 		/* TARC0 */
   5070 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   5071 		switch (sc->sc_type) {
   5072 		case WM_T_82571:
   5073 		case WM_T_82572:
   5074 		case WM_T_82573:
   5075 		case WM_T_82574:
   5076 		case WM_T_82583:
   5077 		case WM_T_80003:
   5078 			/* Clear bits 30..27 */
   5079 			tarc0 &= ~__BITS(30, 27);
   5080 			break;
   5081 		default:
   5082 			break;
   5083 		}
   5084 
   5085 		switch (sc->sc_type) {
   5086 		case WM_T_82571:
   5087 		case WM_T_82572:
   5088 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5089 
   5090 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5091 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5092 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5093 			/* 8257[12] Errata No.7 */
   5094 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5095 
   5096 			/* TARC1 bit 28 */
   5097 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5098 				tarc1 &= ~__BIT(28);
   5099 			else
   5100 				tarc1 |= __BIT(28);
   5101 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5102 
   5103 			/*
   5104 			 * 8257[12] Errata No.13
   5105 			 * Disable Dyamic Clock Gating.
   5106 			 */
   5107 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5108 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5109 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5110 			break;
   5111 		case WM_T_82573:
   5112 		case WM_T_82574:
   5113 		case WM_T_82583:
   5114 			if ((sc->sc_type == WM_T_82574)
   5115 			    || (sc->sc_type == WM_T_82583))
   5116 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5117 
   5118 			/* Extended Device Control */
   5119 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5120 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5121 			reg |= __BIT(22);	/* Set bit 22 */
   5122 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5123 
   5124 			/* Device Control */
   5125 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5126 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5127 
   5128 			/* PCIe Control Register */
   5129 			/*
   5130 			 * 82573 Errata (unknown).
   5131 			 *
   5132 			 * 82574 Errata 25 and 82583 Errata 12
   5133 			 * "Dropped Rx Packets":
   5134 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5135 			 */
   5136 			reg = CSR_READ(sc, WMREG_GCR);
   5137 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5138 			CSR_WRITE(sc, WMREG_GCR, reg);
   5139 
   5140 			if ((sc->sc_type == WM_T_82574)
   5141 			    || (sc->sc_type == WM_T_82583)) {
   5142 				/*
   5143 				 * Document says this bit must be set for
   5144 				 * proper operation.
   5145 				 */
   5146 				reg = CSR_READ(sc, WMREG_GCR);
   5147 				reg |= __BIT(22);
   5148 				CSR_WRITE(sc, WMREG_GCR, reg);
   5149 
   5150 				/*
   5151 				 * Apply workaround for hardware errata
   5152 				 * documented in errata docs Fixes issue where
   5153 				 * some error prone or unreliable PCIe
   5154 				 * completions are occurring, particularly
   5155 				 * with ASPM enabled. Without fix, issue can
   5156 				 * cause Tx timeouts.
   5157 				 */
   5158 				reg = CSR_READ(sc, WMREG_GCR2);
   5159 				reg |= __BIT(0);
   5160 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5161 			}
   5162 			break;
   5163 		case WM_T_80003:
   5164 			/* TARC0 */
   5165 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5166 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5167 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5168 
   5169 			/* TARC1 bit 28 */
   5170 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5171 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5172 				tarc1 &= ~__BIT(28);
   5173 			else
   5174 				tarc1 |= __BIT(28);
   5175 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5176 			break;
   5177 		case WM_T_ICH8:
   5178 		case WM_T_ICH9:
   5179 		case WM_T_ICH10:
   5180 		case WM_T_PCH:
   5181 		case WM_T_PCH2:
   5182 		case WM_T_PCH_LPT:
   5183 		case WM_T_PCH_SPT:
   5184 		case WM_T_PCH_CNP:
   5185 			/* TARC0 */
   5186 			if (sc->sc_type == WM_T_ICH8) {
   5187 				/* Set TARC0 bits 29 and 28 */
   5188 				tarc0 |= __BITS(29, 28);
   5189 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5190 				tarc0 |= __BIT(29);
   5191 				/*
   5192 				 *  Drop bit 28. From Linux.
   5193 				 * See I218/I219 spec update
   5194 				 * "5. Buffer Overrun While the I219 is
   5195 				 * Processing DMA Transactions"
   5196 				 */
   5197 				tarc0 &= ~__BIT(28);
   5198 			}
   5199 			/* Set TARC0 bits 23,24,26,27 */
   5200 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5201 
   5202 			/* CTRL_EXT */
   5203 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5204 			reg |= __BIT(22);	/* Set bit 22 */
   5205 			/*
   5206 			 * Enable PHY low-power state when MAC is at D3
   5207 			 * w/o WoL
   5208 			 */
   5209 			if (sc->sc_type >= WM_T_PCH)
   5210 				reg |= CTRL_EXT_PHYPDEN;
   5211 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5212 
   5213 			/* TARC1 */
   5214 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5215 			/* bit 28 */
   5216 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5217 				tarc1 &= ~__BIT(28);
   5218 			else
   5219 				tarc1 |= __BIT(28);
   5220 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5221 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5222 
   5223 			/* Device Status */
   5224 			if (sc->sc_type == WM_T_ICH8) {
   5225 				reg = CSR_READ(sc, WMREG_STATUS);
   5226 				reg &= ~__BIT(31);
   5227 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5228 
   5229 			}
   5230 
   5231 			/* IOSFPC */
   5232 			if (sc->sc_type == WM_T_PCH_SPT) {
   5233 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5234 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5235 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5236 			}
   5237 			/*
   5238 			 * Work-around descriptor data corruption issue during
   5239 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5240 			 * capability.
   5241 			 */
   5242 			reg = CSR_READ(sc, WMREG_RFCTL);
   5243 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5244 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5245 			break;
   5246 		default:
   5247 			break;
   5248 		}
   5249 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5250 
   5251 		switch (sc->sc_type) {
   5252 		case WM_T_82571:
   5253 		case WM_T_82572:
   5254 		case WM_T_82573:
   5255 		case WM_T_80003:
   5256 		case WM_T_ICH8:
   5257 			/*
   5258 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
   5259 			 * others to avoid RSS Hash Value bug.
   5260 			 */
   5261 			reg = CSR_READ(sc, WMREG_RFCTL);
   5262 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5263 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5264 			break;
   5265 		case WM_T_82574:
   5266 			/* Use extened Rx descriptor. */
   5267 			reg = CSR_READ(sc, WMREG_RFCTL);
   5268 			reg |= WMREG_RFCTL_EXSTEN;
   5269 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5270 			break;
   5271 		default:
   5272 			break;
   5273 		}
   5274 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5275 		/*
   5276 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5277 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5278 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5279 		 * Correctly by the Device"
   5280 		 *
   5281 		 * I354(C2000) Errata AVR53:
   5282 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5283 		 * Hang"
   5284 		 */
   5285 		reg = CSR_READ(sc, WMREG_RFCTL);
   5286 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5287 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5288 	}
   5289 }
   5290 
   5291 static uint32_t
   5292 wm_rxpbs_adjust_82580(uint32_t val)
   5293 {
   5294 	uint32_t rv = 0;
   5295 
   5296 	if (val < __arraycount(wm_82580_rxpbs_table))
   5297 		rv = wm_82580_rxpbs_table[val];
   5298 
   5299 	return rv;
   5300 }
   5301 
   5302 /*
   5303  * wm_reset_phy:
   5304  *
   5305  *	generic PHY reset function.
   5306  *	Same as e1000_phy_hw_reset_generic()
   5307  */
   5308 static int
   5309 wm_reset_phy(struct wm_softc *sc)
   5310 {
   5311 	uint32_t reg;
   5312 	int rv;
   5313 
   5314 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5315 		device_xname(sc->sc_dev), __func__));
   5316 	if (wm_phy_resetisblocked(sc))
   5317 		return -1;
   5318 
   5319 	rv = sc->phy.acquire(sc);
   5320 	if (rv) {
   5321 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5322 		    __func__, rv);
   5323 		return rv;
   5324 	}
   5325 
   5326 	reg = CSR_READ(sc, WMREG_CTRL);
   5327 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5328 	CSR_WRITE_FLUSH(sc);
   5329 
   5330 	delay(sc->phy.reset_delay_us);
   5331 
   5332 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5333 	CSR_WRITE_FLUSH(sc);
   5334 
   5335 	delay(150);
   5336 
   5337 	sc->phy.release(sc);
   5338 
   5339 	wm_get_cfg_done(sc);
   5340 	wm_phy_post_reset(sc);
   5341 
   5342 	return 0;
   5343 }
   5344 
   5345 /*
   5346  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5347  *
   5348  * In i219, the descriptor rings must be emptied before resetting the HW
   5349  * or before changing the device state to D3 during runtime (runtime PM).
   5350  *
   5351  * Failure to do this will cause the HW to enter a unit hang state which can
   5352  * only be released by PCI reset on the device.
   5353  *
   5354  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5355  */
   5356 static void
   5357 wm_flush_desc_rings(struct wm_softc *sc)
   5358 {
   5359 	pcireg_t preg;
   5360 	uint32_t reg;
   5361 	struct wm_txqueue *txq;
   5362 	wiseman_txdesc_t *txd;
   5363 	int nexttx;
   5364 	uint32_t rctl;
   5365 
   5366 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5367 
   5368 	/* First, disable MULR fix in FEXTNVM11 */
   5369 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5370 	reg |= FEXTNVM11_DIS_MULRFIX;
   5371 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5372 
   5373 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5374 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5375 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5376 		return;
   5377 
   5378 	/*
   5379 	 * Remove all descriptors from the tx_ring.
   5380 	 *
   5381 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5382 	 * happens when the HW reads the regs. We assign the ring itself as
   5383 	 * the data of the next descriptor. We don't care about the data we are
   5384 	 * about to reset the HW.
   5385 	 */
   5386 #ifdef WM_DEBUG
   5387 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5388 #endif
   5389 	reg = CSR_READ(sc, WMREG_TCTL);
   5390 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5391 
   5392 	txq = &sc->sc_queue[0].wmq_txq;
   5393 	nexttx = txq->txq_next;
   5394 	txd = &txq->txq_descs[nexttx];
   5395 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5396 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5397 	txd->wtx_fields.wtxu_status = 0;
   5398 	txd->wtx_fields.wtxu_options = 0;
   5399 	txd->wtx_fields.wtxu_vlan = 0;
   5400 
   5401 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5402 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5403 
   5404 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5405 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5406 	CSR_WRITE_FLUSH(sc);
   5407 	delay(250);
   5408 
   5409 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5410 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5411 		return;
   5412 
   5413 	/*
   5414 	 * Mark all descriptors in the RX ring as consumed and disable the
   5415 	 * rx ring.
   5416 	 */
   5417 #ifdef WM_DEBUG
   5418 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5419 #endif
   5420 	rctl = CSR_READ(sc, WMREG_RCTL);
   5421 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5422 	CSR_WRITE_FLUSH(sc);
   5423 	delay(150);
   5424 
   5425 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5426 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5427 	reg &= 0xffffc000;
   5428 	/*
   5429 	 * Update thresholds: prefetch threshold to 31, host threshold
   5430 	 * to 1 and make sure the granularity is "descriptors" and not
   5431 	 * "cache lines"
   5432 	 */
   5433 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5434 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5435 
   5436 	/* Momentarily enable the RX ring for the changes to take effect */
   5437 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5438 	CSR_WRITE_FLUSH(sc);
   5439 	delay(150);
   5440 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5441 }
   5442 
   5443 /*
   5444  * wm_reset:
   5445  *
   5446  *	Reset the i82542 chip.
   5447  */
   5448 static void
   5449 wm_reset(struct wm_softc *sc)
   5450 {
   5451 	int phy_reset = 0;
   5452 	int i, error = 0;
   5453 	uint32_t reg;
   5454 	uint16_t kmreg;
   5455 	int rv;
   5456 
   5457 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5458 		device_xname(sc->sc_dev), __func__));
   5459 	KASSERT(sc->sc_type != 0);
   5460 
   5461 	/*
   5462 	 * Allocate on-chip memory according to the MTU size.
   5463 	 * The Packet Buffer Allocation register must be written
   5464 	 * before the chip is reset.
   5465 	 */
   5466 	switch (sc->sc_type) {
   5467 	case WM_T_82547:
   5468 	case WM_T_82547_2:
   5469 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5470 		    PBA_22K : PBA_30K;
   5471 		for (i = 0; i < sc->sc_nqueues; i++) {
   5472 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5473 			txq->txq_fifo_head = 0;
   5474 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5475 			txq->txq_fifo_size =
   5476 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5477 			txq->txq_fifo_stall = 0;
   5478 		}
   5479 		break;
   5480 	case WM_T_82571:
   5481 	case WM_T_82572:
   5482 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5483 	case WM_T_80003:
   5484 		sc->sc_pba = PBA_32K;
   5485 		break;
   5486 	case WM_T_82573:
   5487 		sc->sc_pba = PBA_12K;
   5488 		break;
   5489 	case WM_T_82574:
   5490 	case WM_T_82583:
   5491 		sc->sc_pba = PBA_20K;
   5492 		break;
   5493 	case WM_T_82576:
   5494 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5495 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5496 		break;
   5497 	case WM_T_82580:
   5498 	case WM_T_I350:
   5499 	case WM_T_I354:
   5500 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5501 		break;
   5502 	case WM_T_I210:
   5503 	case WM_T_I211:
   5504 		sc->sc_pba = PBA_34K;
   5505 		break;
   5506 	case WM_T_ICH8:
   5507 		/* Workaround for a bit corruption issue in FIFO memory */
   5508 		sc->sc_pba = PBA_8K;
   5509 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5510 		break;
   5511 	case WM_T_ICH9:
   5512 	case WM_T_ICH10:
   5513 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5514 		    PBA_14K : PBA_10K;
   5515 		break;
   5516 	case WM_T_PCH:
   5517 	case WM_T_PCH2:	/* XXX 14K? */
   5518 	case WM_T_PCH_LPT:
   5519 	case WM_T_PCH_SPT:
   5520 	case WM_T_PCH_CNP:
   5521 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5522 		    PBA_12K : PBA_26K;
   5523 		break;
   5524 	default:
   5525 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5526 		    PBA_40K : PBA_48K;
   5527 		break;
   5528 	}
   5529 	/*
   5530 	 * Only old or non-multiqueue devices have the PBA register
   5531 	 * XXX Need special handling for 82575.
   5532 	 */
   5533 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5534 	    || (sc->sc_type == WM_T_82575))
   5535 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5536 
   5537 	/* Prevent the PCI-E bus from sticking */
   5538 	if (sc->sc_flags & WM_F_PCIE) {
   5539 		int timeout = 800;
   5540 
   5541 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5542 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5543 
   5544 		while (timeout--) {
   5545 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5546 			    == 0)
   5547 				break;
   5548 			delay(100);
   5549 		}
   5550 		if (timeout == 0)
   5551 			device_printf(sc->sc_dev,
   5552 			    "failed to disable bus mastering\n");
   5553 	}
   5554 
   5555 	/* Set the completion timeout for interface */
   5556 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5557 	    || (sc->sc_type == WM_T_82580)
   5558 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5559 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5560 		wm_set_pcie_completion_timeout(sc);
   5561 
   5562 	/* Clear interrupt */
   5563 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5564 	if (wm_is_using_msix(sc)) {
   5565 		if (sc->sc_type != WM_T_82574) {
   5566 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5567 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5568 		} else
   5569 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5570 	}
   5571 
   5572 	/* Stop the transmit and receive processes. */
   5573 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5574 	sc->sc_rctl &= ~RCTL_EN;
   5575 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5576 	CSR_WRITE_FLUSH(sc);
   5577 
   5578 	/* XXX set_tbi_sbp_82543() */
   5579 
   5580 	delay(10*1000);
   5581 
   5582 	/* Must acquire the MDIO ownership before MAC reset */
   5583 	switch (sc->sc_type) {
   5584 	case WM_T_82573:
   5585 	case WM_T_82574:
   5586 	case WM_T_82583:
   5587 		error = wm_get_hw_semaphore_82573(sc);
   5588 		break;
   5589 	default:
   5590 		break;
   5591 	}
   5592 
   5593 	/*
   5594 	 * 82541 Errata 29? & 82547 Errata 28?
   5595 	 * See also the description about PHY_RST bit in CTRL register
   5596 	 * in 8254x_GBe_SDM.pdf.
   5597 	 */
   5598 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5599 		CSR_WRITE(sc, WMREG_CTRL,
   5600 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5601 		CSR_WRITE_FLUSH(sc);
   5602 		delay(5000);
   5603 	}
   5604 
   5605 	switch (sc->sc_type) {
   5606 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5607 	case WM_T_82541:
   5608 	case WM_T_82541_2:
   5609 	case WM_T_82547:
   5610 	case WM_T_82547_2:
   5611 		/*
   5612 		 * On some chipsets, a reset through a memory-mapped write
   5613 		 * cycle can cause the chip to reset before completing the
   5614 		 * write cycle. This causes major headache that can be avoided
   5615 		 * by issuing the reset via indirect register writes through
   5616 		 * I/O space.
   5617 		 *
   5618 		 * So, if we successfully mapped the I/O BAR at attach time,
   5619 		 * use that. Otherwise, try our luck with a memory-mapped
   5620 		 * reset.
   5621 		 */
   5622 		if (sc->sc_flags & WM_F_IOH_VALID)
   5623 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5624 		else
   5625 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5626 		break;
   5627 	case WM_T_82545_3:
   5628 	case WM_T_82546_3:
   5629 		/* Use the shadow control register on these chips. */
   5630 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5631 		break;
   5632 	case WM_T_80003:
   5633 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5634 		if (sc->phy.acquire(sc) != 0)
   5635 			break;
   5636 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5637 		sc->phy.release(sc);
   5638 		break;
   5639 	case WM_T_ICH8:
   5640 	case WM_T_ICH9:
   5641 	case WM_T_ICH10:
   5642 	case WM_T_PCH:
   5643 	case WM_T_PCH2:
   5644 	case WM_T_PCH_LPT:
   5645 	case WM_T_PCH_SPT:
   5646 	case WM_T_PCH_CNP:
   5647 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5648 		if (wm_phy_resetisblocked(sc) == false) {
   5649 			/*
   5650 			 * Gate automatic PHY configuration by hardware on
   5651 			 * non-managed 82579
   5652 			 */
   5653 			if ((sc->sc_type == WM_T_PCH2)
   5654 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5655 				== 0))
   5656 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5657 
   5658 			reg |= CTRL_PHY_RESET;
   5659 			phy_reset = 1;
   5660 		} else
   5661 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5662 		if (sc->phy.acquire(sc) != 0)
   5663 			break;
   5664 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5665 		/* Don't insert a completion barrier when reset */
   5666 		delay(20*1000);
   5667 		/*
   5668 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
   5669 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
   5670 		 * only. See also wm_get_swflag_ich8lan().
   5671 		 */
   5672 		mutex_exit(sc->sc_ich_phymtx);
   5673 		break;
   5674 	case WM_T_82580:
   5675 	case WM_T_I350:
   5676 	case WM_T_I354:
   5677 	case WM_T_I210:
   5678 	case WM_T_I211:
   5679 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5680 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5681 			CSR_WRITE_FLUSH(sc);
   5682 		delay(5000);
   5683 		break;
   5684 	case WM_T_82542_2_0:
   5685 	case WM_T_82542_2_1:
   5686 	case WM_T_82543:
   5687 	case WM_T_82540:
   5688 	case WM_T_82545:
   5689 	case WM_T_82546:
   5690 	case WM_T_82571:
   5691 	case WM_T_82572:
   5692 	case WM_T_82573:
   5693 	case WM_T_82574:
   5694 	case WM_T_82575:
   5695 	case WM_T_82576:
   5696 	case WM_T_82583:
   5697 	default:
   5698 		/* Everything else can safely use the documented method. */
   5699 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5700 		break;
   5701 	}
   5702 
   5703 	/* Must release the MDIO ownership after MAC reset */
   5704 	switch (sc->sc_type) {
   5705 	case WM_T_82573:
   5706 	case WM_T_82574:
   5707 	case WM_T_82583:
   5708 		if (error == 0)
   5709 			wm_put_hw_semaphore_82573(sc);
   5710 		break;
   5711 	default:
   5712 		break;
   5713 	}
   5714 
   5715 	/* Set Phy Config Counter to 50msec */
   5716 	if (sc->sc_type == WM_T_PCH2) {
   5717 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5718 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5719 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5720 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5721 	}
   5722 
   5723 	if (phy_reset != 0)
   5724 		wm_get_cfg_done(sc);
   5725 
   5726 	/* Reload EEPROM */
   5727 	switch (sc->sc_type) {
   5728 	case WM_T_82542_2_0:
   5729 	case WM_T_82542_2_1:
   5730 	case WM_T_82543:
   5731 	case WM_T_82544:
   5732 		delay(10);
   5733 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5734 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5735 		CSR_WRITE_FLUSH(sc);
   5736 		delay(2000);
   5737 		break;
   5738 	case WM_T_82540:
   5739 	case WM_T_82545:
   5740 	case WM_T_82545_3:
   5741 	case WM_T_82546:
   5742 	case WM_T_82546_3:
   5743 		delay(5*1000);
   5744 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5745 		break;
   5746 	case WM_T_82541:
   5747 	case WM_T_82541_2:
   5748 	case WM_T_82547:
   5749 	case WM_T_82547_2:
   5750 		delay(20000);
   5751 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5752 		break;
   5753 	case WM_T_82571:
   5754 	case WM_T_82572:
   5755 	case WM_T_82573:
   5756 	case WM_T_82574:
   5757 	case WM_T_82583:
   5758 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5759 			delay(10);
   5760 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5761 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5762 			CSR_WRITE_FLUSH(sc);
   5763 		}
   5764 		/* check EECD_EE_AUTORD */
   5765 		wm_get_auto_rd_done(sc);
   5766 		/*
   5767 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5768 		 * is set.
   5769 		 */
   5770 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5771 		    || (sc->sc_type == WM_T_82583))
   5772 			delay(25*1000);
   5773 		break;
   5774 	case WM_T_82575:
   5775 	case WM_T_82576:
   5776 	case WM_T_82580:
   5777 	case WM_T_I350:
   5778 	case WM_T_I354:
   5779 	case WM_T_I210:
   5780 	case WM_T_I211:
   5781 	case WM_T_80003:
   5782 		/* check EECD_EE_AUTORD */
   5783 		wm_get_auto_rd_done(sc);
   5784 		break;
   5785 	case WM_T_ICH8:
   5786 	case WM_T_ICH9:
   5787 	case WM_T_ICH10:
   5788 	case WM_T_PCH:
   5789 	case WM_T_PCH2:
   5790 	case WM_T_PCH_LPT:
   5791 	case WM_T_PCH_SPT:
   5792 	case WM_T_PCH_CNP:
   5793 		break;
   5794 	default:
   5795 		panic("%s: unknown type\n", __func__);
   5796 	}
   5797 
   5798 	/* Check whether EEPROM is present or not */
   5799 	switch (sc->sc_type) {
   5800 	case WM_T_82575:
   5801 	case WM_T_82576:
   5802 	case WM_T_82580:
   5803 	case WM_T_I350:
   5804 	case WM_T_I354:
   5805 	case WM_T_ICH8:
   5806 	case WM_T_ICH9:
   5807 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5808 			/* Not found */
   5809 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5810 			if (sc->sc_type == WM_T_82575)
   5811 				wm_reset_init_script_82575(sc);
   5812 		}
   5813 		break;
   5814 	default:
   5815 		break;
   5816 	}
   5817 
   5818 	if (phy_reset != 0)
   5819 		wm_phy_post_reset(sc);
   5820 
   5821 	if ((sc->sc_type == WM_T_82580)
   5822 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5823 		/* Clear global device reset status bit */
   5824 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5825 	}
   5826 
   5827 	/* Clear any pending interrupt events. */
   5828 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5829 	reg = CSR_READ(sc, WMREG_ICR);
   5830 	if (wm_is_using_msix(sc)) {
   5831 		if (sc->sc_type != WM_T_82574) {
   5832 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5833 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5834 		} else
   5835 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5836 	}
   5837 
   5838 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5839 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5840 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5841 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5842 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5843 		reg |= KABGTXD_BGSQLBIAS;
   5844 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5845 	}
   5846 
   5847 	/* Reload sc_ctrl */
   5848 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5849 
   5850 	wm_set_eee(sc);
   5851 
   5852 	/*
   5853 	 * For PCH, this write will make sure that any noise will be detected
   5854 	 * as a CRC error and be dropped rather than show up as a bad packet
   5855 	 * to the DMA engine
   5856 	 */
   5857 	if (sc->sc_type == WM_T_PCH)
   5858 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5859 
   5860 	if (sc->sc_type >= WM_T_82544)
   5861 		CSR_WRITE(sc, WMREG_WUC, 0);
   5862 
   5863 	if (sc->sc_type < WM_T_82575)
   5864 		wm_disable_aspm(sc); /* Workaround for some chips */
   5865 
   5866 	wm_reset_mdicnfg_82580(sc);
   5867 
   5868 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5869 		wm_pll_workaround_i210(sc);
   5870 
   5871 	if (sc->sc_type == WM_T_80003) {
   5872 		/* Default to TRUE to enable the MDIC W/A */
   5873 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5874 
   5875 		rv = wm_kmrn_readreg(sc,
   5876 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5877 		if (rv == 0) {
   5878 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5879 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5880 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5881 			else
   5882 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5883 		}
   5884 	}
   5885 }
   5886 
   5887 /*
   5888  * wm_add_rxbuf:
   5889  *
   5890  *	Add a receive buffer to the indiciated descriptor.
   5891  */
   5892 static int
   5893 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5894 {
   5895 	struct wm_softc *sc = rxq->rxq_sc;
   5896 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5897 	struct mbuf *m;
   5898 	int error;
   5899 
   5900 	KASSERT(mutex_owned(rxq->rxq_lock));
   5901 
   5902 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5903 	if (m == NULL)
   5904 		return ENOBUFS;
   5905 
   5906 	MCLGET(m, M_DONTWAIT);
   5907 	if ((m->m_flags & M_EXT) == 0) {
   5908 		m_freem(m);
   5909 		return ENOBUFS;
   5910 	}
   5911 
   5912 	if (rxs->rxs_mbuf != NULL)
   5913 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5914 
   5915 	rxs->rxs_mbuf = m;
   5916 
   5917 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5918 	/*
   5919 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5920 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5921 	 */
   5922 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5923 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5924 	if (error) {
   5925 		/* XXX XXX XXX */
   5926 		aprint_error_dev(sc->sc_dev,
   5927 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5928 		panic("wm_add_rxbuf");
   5929 	}
   5930 
   5931 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5932 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5933 
   5934 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5935 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5936 			wm_init_rxdesc(rxq, idx);
   5937 	} else
   5938 		wm_init_rxdesc(rxq, idx);
   5939 
   5940 	return 0;
   5941 }
   5942 
   5943 /*
   5944  * wm_rxdrain:
   5945  *
   5946  *	Drain the receive queue.
   5947  */
   5948 static void
   5949 wm_rxdrain(struct wm_rxqueue *rxq)
   5950 {
   5951 	struct wm_softc *sc = rxq->rxq_sc;
   5952 	struct wm_rxsoft *rxs;
   5953 	int i;
   5954 
   5955 	KASSERT(mutex_owned(rxq->rxq_lock));
   5956 
   5957 	for (i = 0; i < WM_NRXDESC; i++) {
   5958 		rxs = &rxq->rxq_soft[i];
   5959 		if (rxs->rxs_mbuf != NULL) {
   5960 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5961 			m_freem(rxs->rxs_mbuf);
   5962 			rxs->rxs_mbuf = NULL;
   5963 		}
   5964 	}
   5965 }
   5966 
   5967 /*
   5968  * Setup registers for RSS.
   5969  *
   5970  * XXX not yet VMDq support
   5971  */
   5972 static void
   5973 wm_init_rss(struct wm_softc *sc)
   5974 {
   5975 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5976 	int i;
   5977 
   5978 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5979 
   5980 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5981 		unsigned int qid, reta_ent;
   5982 
   5983 		qid  = i % sc->sc_nqueues;
   5984 		switch (sc->sc_type) {
   5985 		case WM_T_82574:
   5986 			reta_ent = __SHIFTIN(qid,
   5987 			    RETA_ENT_QINDEX_MASK_82574);
   5988 			break;
   5989 		case WM_T_82575:
   5990 			reta_ent = __SHIFTIN(qid,
   5991 			    RETA_ENT_QINDEX1_MASK_82575);
   5992 			break;
   5993 		default:
   5994 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5995 			break;
   5996 		}
   5997 
   5998 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5999 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   6000 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   6001 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   6002 	}
   6003 
   6004 	rss_getkey((uint8_t *)rss_key);
   6005 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   6006 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   6007 
   6008 	if (sc->sc_type == WM_T_82574)
   6009 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   6010 	else
   6011 		mrqc = MRQC_ENABLE_RSS_MQ;
   6012 
   6013 	/*
   6014 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   6015 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   6016 	 */
   6017 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   6018 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   6019 #if 0
   6020 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   6021 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   6022 #endif
   6023 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   6024 
   6025 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   6026 }
   6027 
   6028 /*
   6029  * Adjust TX and RX queue numbers which the system actulally uses.
   6030  *
   6031  * The numbers are affected by below parameters.
   6032  *     - The nubmer of hardware queues
   6033  *     - The number of MSI-X vectors (= "nvectors" argument)
   6034  *     - ncpu
   6035  */
   6036 static void
   6037 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   6038 {
   6039 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   6040 
   6041 	if (nvectors < 2) {
   6042 		sc->sc_nqueues = 1;
   6043 		return;
   6044 	}
   6045 
   6046 	switch (sc->sc_type) {
   6047 	case WM_T_82572:
   6048 		hw_ntxqueues = 2;
   6049 		hw_nrxqueues = 2;
   6050 		break;
   6051 	case WM_T_82574:
   6052 		hw_ntxqueues = 2;
   6053 		hw_nrxqueues = 2;
   6054 		break;
   6055 	case WM_T_82575:
   6056 		hw_ntxqueues = 4;
   6057 		hw_nrxqueues = 4;
   6058 		break;
   6059 	case WM_T_82576:
   6060 		hw_ntxqueues = 16;
   6061 		hw_nrxqueues = 16;
   6062 		break;
   6063 	case WM_T_82580:
   6064 	case WM_T_I350:
   6065 	case WM_T_I354:
   6066 		hw_ntxqueues = 8;
   6067 		hw_nrxqueues = 8;
   6068 		break;
   6069 	case WM_T_I210:
   6070 		hw_ntxqueues = 4;
   6071 		hw_nrxqueues = 4;
   6072 		break;
   6073 	case WM_T_I211:
   6074 		hw_ntxqueues = 2;
   6075 		hw_nrxqueues = 2;
   6076 		break;
   6077 		/*
   6078 		 * The below Ethernet controllers do not support MSI-X;
   6079 		 * this driver doesn't let them use multiqueue.
   6080 		 *     - WM_T_80003
   6081 		 *     - WM_T_ICH8
   6082 		 *     - WM_T_ICH9
   6083 		 *     - WM_T_ICH10
   6084 		 *     - WM_T_PCH
   6085 		 *     - WM_T_PCH2
   6086 		 *     - WM_T_PCH_LPT
   6087 		 */
   6088 	default:
   6089 		hw_ntxqueues = 1;
   6090 		hw_nrxqueues = 1;
   6091 		break;
   6092 	}
   6093 
   6094 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6095 
   6096 	/*
   6097 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6098 	 * the number of queues used actually.
   6099 	 */
   6100 	if (nvectors < hw_nqueues + 1)
   6101 		sc->sc_nqueues = nvectors - 1;
   6102 	else
   6103 		sc->sc_nqueues = hw_nqueues;
   6104 
   6105 	/*
   6106 	 * As queues more than CPUs cannot improve scaling, we limit
   6107 	 * the number of queues used actually.
   6108 	 */
   6109 	if (ncpu < sc->sc_nqueues)
   6110 		sc->sc_nqueues = ncpu;
   6111 }
   6112 
   6113 static inline bool
   6114 wm_is_using_msix(struct wm_softc *sc)
   6115 {
   6116 
   6117 	return (sc->sc_nintrs > 1);
   6118 }
   6119 
   6120 static inline bool
   6121 wm_is_using_multiqueue(struct wm_softc *sc)
   6122 {
   6123 
   6124 	return (sc->sc_nqueues > 1);
   6125 }
   6126 
   6127 static int
   6128 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6129 {
   6130 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6131 
   6132 	wmq->wmq_id = qidx;
   6133 	wmq->wmq_intr_idx = intr_idx;
   6134 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6135 	    wm_handle_queue, wmq);
   6136 	if (wmq->wmq_si != NULL)
   6137 		return 0;
   6138 
   6139 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6140 	    wmq->wmq_id);
   6141 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6142 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6143 	return ENOMEM;
   6144 }
   6145 
   6146 /*
   6147  * Both single interrupt MSI and INTx can use this function.
   6148  */
   6149 static int
   6150 wm_setup_legacy(struct wm_softc *sc)
   6151 {
   6152 	pci_chipset_tag_t pc = sc->sc_pc;
   6153 	const char *intrstr = NULL;
   6154 	char intrbuf[PCI_INTRSTR_LEN];
   6155 	int error;
   6156 
   6157 	error = wm_alloc_txrx_queues(sc);
   6158 	if (error) {
   6159 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6160 		    error);
   6161 		return ENOMEM;
   6162 	}
   6163 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6164 	    sizeof(intrbuf));
   6165 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6166 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6167 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6168 	if (sc->sc_ihs[0] == NULL) {
   6169 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6170 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6171 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6172 		return ENOMEM;
   6173 	}
   6174 
   6175 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6176 	sc->sc_nintrs = 1;
   6177 
   6178 	return wm_softint_establish_queue(sc, 0, 0);
   6179 }
   6180 
   6181 static int
   6182 wm_setup_msix(struct wm_softc *sc)
   6183 {
   6184 	void *vih;
   6185 	kcpuset_t *affinity;
   6186 	int qidx, error, intr_idx, txrx_established;
   6187 	pci_chipset_tag_t pc = sc->sc_pc;
   6188 	const char *intrstr = NULL;
   6189 	char intrbuf[PCI_INTRSTR_LEN];
   6190 	char intr_xname[INTRDEVNAMEBUF];
   6191 
   6192 	if (sc->sc_nqueues < ncpu) {
   6193 		/*
   6194 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6195 		 * interrupts start from CPU#1.
   6196 		 */
   6197 		sc->sc_affinity_offset = 1;
   6198 	} else {
   6199 		/*
   6200 		 * In this case, this device use all CPUs. So, we unify
   6201 		 * affinitied cpu_index to msix vector number for readability.
   6202 		 */
   6203 		sc->sc_affinity_offset = 0;
   6204 	}
   6205 
   6206 	error = wm_alloc_txrx_queues(sc);
   6207 	if (error) {
   6208 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6209 		    error);
   6210 		return ENOMEM;
   6211 	}
   6212 
   6213 	kcpuset_create(&affinity, false);
   6214 	intr_idx = 0;
   6215 
   6216 	/*
   6217 	 * TX and RX
   6218 	 */
   6219 	txrx_established = 0;
   6220 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6221 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6222 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6223 
   6224 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6225 		    sizeof(intrbuf));
   6226 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6227 		    PCI_INTR_MPSAFE, true);
   6228 		memset(intr_xname, 0, sizeof(intr_xname));
   6229 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6230 		    device_xname(sc->sc_dev), qidx);
   6231 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6232 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6233 		if (vih == NULL) {
   6234 			aprint_error_dev(sc->sc_dev,
   6235 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6236 			    intrstr ? " at " : "",
   6237 			    intrstr ? intrstr : "");
   6238 
   6239 			goto fail;
   6240 		}
   6241 		kcpuset_zero(affinity);
   6242 		/* Round-robin affinity */
   6243 		kcpuset_set(affinity, affinity_to);
   6244 		error = interrupt_distribute(vih, affinity, NULL);
   6245 		if (error == 0) {
   6246 			aprint_normal_dev(sc->sc_dev,
   6247 			    "for TX and RX interrupting at %s affinity to %u\n",
   6248 			    intrstr, affinity_to);
   6249 		} else {
   6250 			aprint_normal_dev(sc->sc_dev,
   6251 			    "for TX and RX interrupting at %s\n", intrstr);
   6252 		}
   6253 		sc->sc_ihs[intr_idx] = vih;
   6254 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6255 			goto fail;
   6256 		txrx_established++;
   6257 		intr_idx++;
   6258 	}
   6259 
   6260 	/* LINK */
   6261 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6262 	    sizeof(intrbuf));
   6263 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6264 	memset(intr_xname, 0, sizeof(intr_xname));
   6265 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6266 	    device_xname(sc->sc_dev));
   6267 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6268 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6269 	if (vih == NULL) {
   6270 		aprint_error_dev(sc->sc_dev,
   6271 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6272 		    intrstr ? " at " : "",
   6273 		    intrstr ? intrstr : "");
   6274 
   6275 		goto fail;
   6276 	}
   6277 	/* Keep default affinity to LINK interrupt */
   6278 	aprint_normal_dev(sc->sc_dev,
   6279 	    "for LINK interrupting at %s\n", intrstr);
   6280 	sc->sc_ihs[intr_idx] = vih;
   6281 	sc->sc_link_intr_idx = intr_idx;
   6282 
   6283 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6284 	kcpuset_destroy(affinity);
   6285 	return 0;
   6286 
   6287 fail:
   6288 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6289 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6290 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6291 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6292 	}
   6293 
   6294 	kcpuset_destroy(affinity);
   6295 	return ENOMEM;
   6296 }
   6297 
   6298 static void
   6299 wm_unset_stopping_flags(struct wm_softc *sc)
   6300 {
   6301 	int i;
   6302 
   6303 	KASSERT(mutex_owned(sc->sc_core_lock));
   6304 
   6305 	/* Must unset stopping flags in ascending order. */
   6306 	for (i = 0; i < sc->sc_nqueues; i++) {
   6307 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6308 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6309 
   6310 		mutex_enter(txq->txq_lock);
   6311 		txq->txq_stopping = false;
   6312 		mutex_exit(txq->txq_lock);
   6313 
   6314 		mutex_enter(rxq->rxq_lock);
   6315 		rxq->rxq_stopping = false;
   6316 		mutex_exit(rxq->rxq_lock);
   6317 	}
   6318 
   6319 	sc->sc_core_stopping = false;
   6320 }
   6321 
   6322 static void
   6323 wm_set_stopping_flags(struct wm_softc *sc)
   6324 {
   6325 	int i;
   6326 
   6327 	KASSERT(mutex_owned(sc->sc_core_lock));
   6328 
   6329 	sc->sc_core_stopping = true;
   6330 
   6331 	/* Must set stopping flags in ascending order. */
   6332 	for (i = 0; i < sc->sc_nqueues; i++) {
   6333 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6334 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6335 
   6336 		mutex_enter(rxq->rxq_lock);
   6337 		rxq->rxq_stopping = true;
   6338 		mutex_exit(rxq->rxq_lock);
   6339 
   6340 		mutex_enter(txq->txq_lock);
   6341 		txq->txq_stopping = true;
   6342 		mutex_exit(txq->txq_lock);
   6343 	}
   6344 }
   6345 
   6346 /*
   6347  * Write interrupt interval value to ITR or EITR
   6348  */
   6349 static void
   6350 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6351 {
   6352 
   6353 	if (!wmq->wmq_set_itr)
   6354 		return;
   6355 
   6356 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6357 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6358 
   6359 		/*
   6360 		 * 82575 doesn't have CNT_INGR field.
   6361 		 * So, overwrite counter field by software.
   6362 		 */
   6363 		if (sc->sc_type == WM_T_82575)
   6364 			eitr |= __SHIFTIN(wmq->wmq_itr,
   6365 			    EITR_COUNTER_MASK_82575);
   6366 		else
   6367 			eitr |= EITR_CNT_INGR;
   6368 
   6369 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6370 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6371 		/*
   6372 		 * 82574 has both ITR and EITR. SET EITR when we use
   6373 		 * the multi queue function with MSI-X.
   6374 		 */
   6375 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6376 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6377 	} else {
   6378 		KASSERT(wmq->wmq_id == 0);
   6379 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6380 	}
   6381 
   6382 	wmq->wmq_set_itr = false;
   6383 }
   6384 
   6385 /*
   6386  * TODO
   6387  * Below dynamic calculation of itr is almost the same as Linux igb,
   6388  * however it does not fit to wm(4). So, we will have been disable AIM
   6389  * until we will find appropriate calculation of itr.
   6390  */
   6391 /*
   6392  * Calculate interrupt interval value to be going to write register in
   6393  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6394  */
   6395 static void
   6396 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6397 {
   6398 #ifdef NOTYET
   6399 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6400 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6401 	uint32_t avg_size = 0;
   6402 	uint32_t new_itr;
   6403 
   6404 	if (rxq->rxq_packets)
   6405 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6406 	if (txq->txq_packets)
   6407 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6408 
   6409 	if (avg_size == 0) {
   6410 		new_itr = 450; /* restore default value */
   6411 		goto out;
   6412 	}
   6413 
   6414 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6415 	avg_size += 24;
   6416 
   6417 	/* Don't starve jumbo frames */
   6418 	avg_size = uimin(avg_size, 3000);
   6419 
   6420 	/* Give a little boost to mid-size frames */
   6421 	if ((avg_size > 300) && (avg_size < 1200))
   6422 		new_itr = avg_size / 3;
   6423 	else
   6424 		new_itr = avg_size / 2;
   6425 
   6426 out:
   6427 	/*
   6428 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6429 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6430 	 */
   6431 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6432 		new_itr *= 4;
   6433 
   6434 	if (new_itr != wmq->wmq_itr) {
   6435 		wmq->wmq_itr = new_itr;
   6436 		wmq->wmq_set_itr = true;
   6437 	} else
   6438 		wmq->wmq_set_itr = false;
   6439 
   6440 	rxq->rxq_packets = 0;
   6441 	rxq->rxq_bytes = 0;
   6442 	txq->txq_packets = 0;
   6443 	txq->txq_bytes = 0;
   6444 #endif
   6445 }
   6446 
   6447 static void
   6448 wm_init_sysctls(struct wm_softc *sc)
   6449 {
   6450 	struct sysctllog **log;
   6451 	const struct sysctlnode *rnode, *qnode, *cnode;
   6452 	int i, rv;
   6453 	const char *dvname;
   6454 
   6455 	log = &sc->sc_sysctllog;
   6456 	dvname = device_xname(sc->sc_dev);
   6457 
   6458 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6459 	    0, CTLTYPE_NODE, dvname,
   6460 	    SYSCTL_DESCR("wm information and settings"),
   6461 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6462 	if (rv != 0)
   6463 		goto err;
   6464 
   6465 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6466 	    CTLTYPE_BOOL, "txrx_workqueue",
   6467 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   6468 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6469 	if (rv != 0)
   6470 		goto teardown;
   6471 
   6472 	for (i = 0; i < sc->sc_nqueues; i++) {
   6473 		struct wm_queue *wmq = &sc->sc_queue[i];
   6474 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6475 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6476 
   6477 		snprintf(sc->sc_queue[i].sysctlname,
   6478 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6479 
   6480 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6481 		    0, CTLTYPE_NODE,
   6482 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6483 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6484 			break;
   6485 
   6486 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6487 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6488 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6489 		    NULL, 0, &txq->txq_free,
   6490 		    0, CTL_CREATE, CTL_EOL) != 0)
   6491 			break;
   6492 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6493 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6494 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6495 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6496 		    0, CTL_CREATE, CTL_EOL) != 0)
   6497 			break;
   6498 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6499 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6500 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6501 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6502 		    0, CTL_CREATE, CTL_EOL) != 0)
   6503 			break;
   6504 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6505 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6506 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6507 		    NULL, 0, &txq->txq_next,
   6508 		    0, CTL_CREATE, CTL_EOL) != 0)
   6509 			break;
   6510 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6511 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6512 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6513 		    NULL, 0, &txq->txq_sfree,
   6514 		    0, CTL_CREATE, CTL_EOL) != 0)
   6515 			break;
   6516 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6517 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6518 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6519 		    NULL, 0, &txq->txq_snext,
   6520 		    0, CTL_CREATE, CTL_EOL) != 0)
   6521 			break;
   6522 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6523 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6524 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6525 		    NULL, 0, &txq->txq_sdirty,
   6526 		    0, CTL_CREATE, CTL_EOL) != 0)
   6527 			break;
   6528 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6529 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6530 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6531 		    NULL, 0, &txq->txq_flags,
   6532 		    0, CTL_CREATE, CTL_EOL) != 0)
   6533 			break;
   6534 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6535 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6536 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6537 		    NULL, 0, &txq->txq_stopping,
   6538 		    0, CTL_CREATE, CTL_EOL) != 0)
   6539 			break;
   6540 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6541 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6542 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6543 		    NULL, 0, &txq->txq_sending,
   6544 		    0, CTL_CREATE, CTL_EOL) != 0)
   6545 			break;
   6546 
   6547 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6548 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6549 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6550 		    NULL, 0, &rxq->rxq_ptr,
   6551 		    0, CTL_CREATE, CTL_EOL) != 0)
   6552 			break;
   6553 	}
   6554 
   6555 #ifdef WM_DEBUG
   6556 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6557 	    CTLTYPE_INT, "debug_flags",
   6558 	    SYSCTL_DESCR(
   6559 		    "Debug flags:\n"	\
   6560 		    "\t0x01 LINK\n"	\
   6561 		    "\t0x02 TX\n"	\
   6562 		    "\t0x04 RX\n"	\
   6563 		    "\t0x08 GMII\n"	\
   6564 		    "\t0x10 MANAGE\n"	\
   6565 		    "\t0x20 NVM\n"	\
   6566 		    "\t0x40 INIT\n"	\
   6567 		    "\t0x80 LOCK"),
   6568 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6569 	if (rv != 0)
   6570 		goto teardown;
   6571 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6572 	    CTLTYPE_BOOL, "trigger_reset",
   6573 	    SYSCTL_DESCR("Trigger an interface reset"),
   6574 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6575 	if (rv != 0)
   6576 		goto teardown;
   6577 #endif
   6578 
   6579 	return;
   6580 
   6581 teardown:
   6582 	sysctl_teardown(log);
   6583 err:
   6584 	sc->sc_sysctllog = NULL;
   6585 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6586 	    __func__, rv);
   6587 }
   6588 
   6589 static void
   6590 wm_update_stats(struct wm_softc *sc)
   6591 {
   6592 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6593 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   6594 	    cexterr;
   6595 
   6596 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   6597 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   6598 	mpc = CSR_READ(sc, WMREG_MPC);
   6599 	colc = CSR_READ(sc, WMREG_COLC);
   6600 	sec = CSR_READ(sc, WMREG_SEC);
   6601 	rlec = CSR_READ(sc, WMREG_RLEC);
   6602 
   6603 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   6604 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   6605 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   6606 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   6607 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   6608 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   6609 
   6610 	if (sc->sc_type >= WM_T_82543) {
   6611 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   6612 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   6613 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   6614 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   6615 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
   6616 			cexterr = CSR_READ(sc, WMREG_CEXTERR);
   6617 			WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   6618 		} else {
   6619 			cexterr = 0;
   6620 			/* Excessive collision + Link down */
   6621 			WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
   6622 			    CSR_READ(sc, WMREG_HTDPMC));
   6623 		}
   6624 
   6625 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   6626 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   6627 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6628 			WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
   6629 			    CSR_READ(sc, WMREG_TSCTFC));
   6630 		else {
   6631 			WM_EVCNT_ADD(&sc->sc_ev_cbrdpc,
   6632 			    CSR_READ(sc, WMREG_CBRDPC));
   6633 			WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
   6634 			    CSR_READ(sc, WMREG_CBRMPC));
   6635 		}
   6636 	} else
   6637 		algnerrc = rxerrc = cexterr = 0;
   6638 
   6639 	if (sc->sc_type >= WM_T_82542_2_1) {
   6640 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   6641 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   6642 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   6643 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   6644 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   6645 	}
   6646 
   6647 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   6648 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   6649 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   6650 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   6651 
   6652 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6653 		WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
   6654 	}
   6655 
   6656 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   6657 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   6658 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   6659 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   6660 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   6661 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   6662 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   6663 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   6664 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   6665 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   6666 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   6667 
   6668 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   6669 	    CSR_READ(sc, WMREG_GORCL) +
   6670 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
   6671 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   6672 	    CSR_READ(sc, WMREG_GOTCL) +
   6673 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
   6674 
   6675 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   6676 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   6677 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   6678 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   6679 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   6680 
   6681 	if (sc->sc_type >= WM_T_82540) {
   6682 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   6683 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   6684 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   6685 	}
   6686 
   6687 	/*
   6688 	 * The TOR(L) register includes:
   6689 	 *  - Error
   6690 	 *  - Flow control
   6691 	 *  - Broadcast rejected (This note is described in 82574 and newer
   6692 	 *    datasheets. What does "broadcast rejected" mean?)
   6693 	 */
   6694 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   6695 	    CSR_READ(sc, WMREG_TORL) +
   6696 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
   6697 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   6698 	    CSR_READ(sc, WMREG_TOTL) +
   6699 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
   6700 
   6701 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   6702 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   6703 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   6704 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   6705 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   6706 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   6707 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   6708 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   6709 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   6710 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   6711 	if (sc->sc_type >= WM_T_82571)
   6712 		WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   6713 	if (sc->sc_type < WM_T_82575) {
   6714 		WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   6715 		WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   6716 		WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   6717 		WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC));
   6718 		WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   6719 		WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
   6720 		    CSR_READ(sc, WMREG_ICTXQMTC));
   6721 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
   6722 		    CSR_READ(sc, WMREG_ICRXDMTC));
   6723 		WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   6724 	} else if (!WM_IS_ICHPCH(sc)) {
   6725 		WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
   6726 		WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
   6727 		WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
   6728 		WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
   6729 		WM_EVCNT_ADD(&sc->sc_ev_hgptc,  CSR_READ(sc, WMREG_HGPTC));
   6730 		WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
   6731 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
   6732 		WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
   6733 
   6734 		WM_EVCNT_ADD(&sc->sc_ev_hgorc,
   6735 		    CSR_READ(sc, WMREG_HGORCL) +
   6736 		    ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
   6737 		WM_EVCNT_ADD(&sc->sc_ev_hgotc,
   6738 		    CSR_READ(sc, WMREG_HGOTCL) +
   6739 		    ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
   6740 		WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
   6741 		WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
   6742 		WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
   6743 	}
   6744 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6745 		WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
   6746 		WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
   6747 		if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
   6748 			WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
   6749 			    CSR_READ(sc, WMREG_B2OGPRC));
   6750 			WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
   6751 			    CSR_READ(sc, WMREG_O2BSPC));
   6752 			WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
   6753 			    CSR_READ(sc, WMREG_B2OSPC));
   6754 			WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
   6755 			    CSR_READ(sc, WMREG_O2BGPTC));
   6756 		}
   6757 	}
   6758 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   6759 	if_statadd_ref(nsr, if_collisions, colc);
   6760 	if_statadd_ref(nsr, if_ierrors,
   6761 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   6762 	/*
   6763 	 * WMREG_RNBC is incremented when there are no available buffers in
   6764 	 * host memory. It does not mean the number of dropped packets, because
   6765 	 * an Ethernet controller can receive packets in such case if there is
   6766 	 * space in the phy's FIFO.
   6767 	 *
   6768 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   6769 	 * own EVCNT instead of if_iqdrops.
   6770 	 */
   6771 	if_statadd_ref(nsr, if_iqdrops, mpc);
   6772 	IF_STAT_PUTREF(ifp);
   6773 }
   6774 
   6775 void
   6776 wm_clear_evcnt(struct wm_softc *sc)
   6777 {
   6778 #ifdef WM_EVENT_COUNTERS
   6779 	int i;
   6780 
   6781 	/* RX queues */
   6782 	for (i = 0; i < sc->sc_nqueues; i++) {
   6783 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6784 
   6785 		WM_Q_EVCNT_STORE(rxq, intr, 0);
   6786 		WM_Q_EVCNT_STORE(rxq, defer, 0);
   6787 		WM_Q_EVCNT_STORE(rxq, ipsum, 0);
   6788 		WM_Q_EVCNT_STORE(rxq, tusum, 0);
   6789 	}
   6790 
   6791 	/* TX queues */
   6792 	for (i = 0; i < sc->sc_nqueues; i++) {
   6793 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6794 		int j;
   6795 
   6796 		WM_Q_EVCNT_STORE(txq, txsstall, 0);
   6797 		WM_Q_EVCNT_STORE(txq, txdstall, 0);
   6798 		WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
   6799 		WM_Q_EVCNT_STORE(txq, txdw, 0);
   6800 		WM_Q_EVCNT_STORE(txq, txqe, 0);
   6801 		WM_Q_EVCNT_STORE(txq, ipsum, 0);
   6802 		WM_Q_EVCNT_STORE(txq, tusum, 0);
   6803 		WM_Q_EVCNT_STORE(txq, tusum6, 0);
   6804 		WM_Q_EVCNT_STORE(txq, tso, 0);
   6805 		WM_Q_EVCNT_STORE(txq, tso6, 0);
   6806 		WM_Q_EVCNT_STORE(txq, tsopain, 0);
   6807 
   6808 		for (j = 0; j < WM_NTXSEGS; j++)
   6809 			WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
   6810 
   6811 		WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
   6812 		WM_Q_EVCNT_STORE(txq, descdrop, 0);
   6813 		WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
   6814 		WM_Q_EVCNT_STORE(txq, defrag, 0);
   6815 		if (sc->sc_type <= WM_T_82544)
   6816 			WM_Q_EVCNT_STORE(txq, underrun, 0);
   6817 		WM_Q_EVCNT_STORE(txq, skipcontext, 0);
   6818 	}
   6819 
   6820 	/* Miscs */
   6821 	WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0);
   6822 
   6823 	WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0);
   6824 	WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0);
   6825 	WM_EVCNT_STORE(&sc->sc_ev_mpc, 0);
   6826 	WM_EVCNT_STORE(&sc->sc_ev_colc, 0);
   6827 	WM_EVCNT_STORE(&sc->sc_ev_sec, 0);
   6828 	WM_EVCNT_STORE(&sc->sc_ev_rlec, 0);
   6829 
   6830 	if (sc->sc_type >= WM_T_82543) {
   6831 		WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0);
   6832 		WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0);
   6833 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6834 			WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0);
   6835 		else
   6836 			WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0);
   6837 
   6838 		WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0);
   6839 		WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0);
   6840 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6841 			WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0);
   6842 		else {
   6843 			WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0);
   6844 			WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0);
   6845 		}
   6846 	}
   6847 
   6848 	if (sc->sc_type >= WM_T_82542_2_1) {
   6849 		WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0);
   6850 		WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0);
   6851 		WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0);
   6852 		WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0);
   6853 		WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0);
   6854 	}
   6855 
   6856 	WM_EVCNT_STORE(&sc->sc_ev_scc, 0);
   6857 	WM_EVCNT_STORE(&sc->sc_ev_ecol, 0);
   6858 	WM_EVCNT_STORE(&sc->sc_ev_mcc, 0);
   6859 	WM_EVCNT_STORE(&sc->sc_ev_latecol, 0);
   6860 
   6861 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   6862 		WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0);
   6863 
   6864 	WM_EVCNT_STORE(&sc->sc_ev_dc, 0);
   6865 	WM_EVCNT_STORE(&sc->sc_ev_prc64, 0);
   6866 	WM_EVCNT_STORE(&sc->sc_ev_prc127, 0);
   6867 	WM_EVCNT_STORE(&sc->sc_ev_prc255, 0);
   6868 	WM_EVCNT_STORE(&sc->sc_ev_prc511, 0);
   6869 	WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0);
   6870 	WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0);
   6871 	WM_EVCNT_STORE(&sc->sc_ev_gprc, 0);
   6872 	WM_EVCNT_STORE(&sc->sc_ev_bprc, 0);
   6873 	WM_EVCNT_STORE(&sc->sc_ev_mprc, 0);
   6874 	WM_EVCNT_STORE(&sc->sc_ev_gptc, 0);
   6875 	WM_EVCNT_STORE(&sc->sc_ev_gorc, 0);
   6876 	WM_EVCNT_STORE(&sc->sc_ev_gotc, 0);
   6877 	WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0);
   6878 	WM_EVCNT_STORE(&sc->sc_ev_ruc, 0);
   6879 	WM_EVCNT_STORE(&sc->sc_ev_rfc, 0);
   6880 	WM_EVCNT_STORE(&sc->sc_ev_roc, 0);
   6881 	WM_EVCNT_STORE(&sc->sc_ev_rjc, 0);
   6882 	if (sc->sc_type >= WM_T_82540) {
   6883 		WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0);
   6884 		WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0);
   6885 		WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0);
   6886 	}
   6887 	WM_EVCNT_STORE(&sc->sc_ev_tor, 0);
   6888 	WM_EVCNT_STORE(&sc->sc_ev_tot, 0);
   6889 	WM_EVCNT_STORE(&sc->sc_ev_tpr, 0);
   6890 	WM_EVCNT_STORE(&sc->sc_ev_tpt, 0);
   6891 	WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0);
   6892 	WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0);
   6893 	WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0);
   6894 	WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0);
   6895 	WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0);
   6896 	WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0);
   6897 	WM_EVCNT_STORE(&sc->sc_ev_mptc, 0);
   6898 	WM_EVCNT_STORE(&sc->sc_ev_bptc, 0);
   6899 	if (sc->sc_type >= WM_T_82571)
   6900 		WM_EVCNT_STORE(&sc->sc_ev_iac, 0);
   6901 	if (sc->sc_type < WM_T_82575) {
   6902 		WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0);
   6903 		WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0);
   6904 		WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0);
   6905 		WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0);
   6906 		WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0);
   6907 		WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0);
   6908 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6909 		WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0);
   6910 	} else if (!WM_IS_ICHPCH(sc)) {
   6911 		WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0);
   6912 		WM_EVCNT_STORE(&sc->sc_ev_debug1, 0);
   6913 		WM_EVCNT_STORE(&sc->sc_ev_debug2, 0);
   6914 		WM_EVCNT_STORE(&sc->sc_ev_debug3, 0);
   6915 		WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0);
   6916 		WM_EVCNT_STORE(&sc->sc_ev_debug4, 0);
   6917 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6918 		WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0);
   6919 
   6920 		WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0);
   6921 		WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0);
   6922 		WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0);
   6923 		WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0);
   6924 		WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0);
   6925 	}
   6926 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6927 		WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0);
   6928 		WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0);
   6929 		WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0);
   6930 		WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0);
   6931 		WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0);
   6932 		WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0);
   6933 	}
   6934 #endif
   6935 }
   6936 
   6937 /*
   6938  * wm_init:		[ifnet interface function]
   6939  *
   6940  *	Initialize the interface.
   6941  */
   6942 static int
   6943 wm_init(struct ifnet *ifp)
   6944 {
   6945 	struct wm_softc *sc = ifp->if_softc;
   6946 	int ret;
   6947 
   6948 	KASSERT(IFNET_LOCKED(ifp));
   6949 
   6950 	if (sc->sc_dying)
   6951 		return ENXIO;
   6952 
   6953 	mutex_enter(sc->sc_core_lock);
   6954 	ret = wm_init_locked(ifp);
   6955 	mutex_exit(sc->sc_core_lock);
   6956 
   6957 	return ret;
   6958 }
   6959 
   6960 static int
   6961 wm_init_locked(struct ifnet *ifp)
   6962 {
   6963 	struct wm_softc *sc = ifp->if_softc;
   6964 	struct ethercom *ec = &sc->sc_ethercom;
   6965 	int i, j, trynum, error = 0;
   6966 	uint32_t reg, sfp_mask = 0;
   6967 
   6968 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6969 		device_xname(sc->sc_dev), __func__));
   6970 	KASSERT(IFNET_LOCKED(ifp));
   6971 	KASSERT(mutex_owned(sc->sc_core_lock));
   6972 
   6973 	/*
   6974 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6975 	 * There is a small but measurable benefit to avoiding the adjusment
   6976 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6977 	 * on such platforms.  One possibility is that the DMA itself is
   6978 	 * slightly more efficient if the front of the entire packet (instead
   6979 	 * of the front of the headers) is aligned.
   6980 	 *
   6981 	 * Note we must always set align_tweak to 0 if we are using
   6982 	 * jumbo frames.
   6983 	 */
   6984 #ifdef __NO_STRICT_ALIGNMENT
   6985 	sc->sc_align_tweak = 0;
   6986 #else
   6987 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6988 		sc->sc_align_tweak = 0;
   6989 	else
   6990 		sc->sc_align_tweak = 2;
   6991 #endif /* __NO_STRICT_ALIGNMENT */
   6992 
   6993 	/* Cancel any pending I/O. */
   6994 	wm_stop_locked(ifp, false, false);
   6995 
   6996 	/* Update statistics before reset */
   6997 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6998 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6999 
   7000 	/* >= PCH_SPT hardware workaround before reset. */
   7001 	if (sc->sc_type >= WM_T_PCH_SPT)
   7002 		wm_flush_desc_rings(sc);
   7003 
   7004 	/* Reset the chip to a known state. */
   7005 	wm_reset(sc);
   7006 
   7007 	/*
   7008 	 * AMT based hardware can now take control from firmware
   7009 	 * Do this after reset.
   7010 	 */
   7011 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   7012 		wm_get_hw_control(sc);
   7013 
   7014 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   7015 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   7016 		wm_legacy_irq_quirk_spt(sc);
   7017 
   7018 	/* Init hardware bits */
   7019 	wm_initialize_hardware_bits(sc);
   7020 
   7021 	/* Reset the PHY. */
   7022 	if (sc->sc_flags & WM_F_HAS_MII)
   7023 		wm_gmii_reset(sc);
   7024 
   7025 	if (sc->sc_type >= WM_T_ICH8) {
   7026 		reg = CSR_READ(sc, WMREG_GCR);
   7027 		/*
   7028 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   7029 		 * default after reset.
   7030 		 */
   7031 		if (sc->sc_type == WM_T_ICH8)
   7032 			reg |= GCR_NO_SNOOP_ALL;
   7033 		else
   7034 			reg &= ~GCR_NO_SNOOP_ALL;
   7035 		CSR_WRITE(sc, WMREG_GCR, reg);
   7036 	}
   7037 
   7038 	if ((sc->sc_type >= WM_T_ICH8)
   7039 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   7040 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   7041 
   7042 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7043 		reg |= CTRL_EXT_RO_DIS;
   7044 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7045 	}
   7046 
   7047 	/* Calculate (E)ITR value */
   7048 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   7049 		/*
   7050 		 * For NEWQUEUE's EITR (except for 82575).
   7051 		 * 82575's EITR should be set same throttling value as other
   7052 		 * old controllers' ITR because the interrupt/sec calculation
   7053 		 * is the same, that is, 1,000,000,000 / (N * 256).
   7054 		 *
   7055 		 * 82574's EITR should be set same throttling value as ITR.
   7056 		 *
   7057 		 * For N interrupts/sec, set this value to:
   7058 		 * 1,000,000 / N in contrast to ITR throttling value.
   7059 		 */
   7060 		sc->sc_itr_init = 450;
   7061 	} else if (sc->sc_type >= WM_T_82543) {
   7062 		/*
   7063 		 * Set up the interrupt throttling register (units of 256ns)
   7064 		 * Note that a footnote in Intel's documentation says this
   7065 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   7066 		 * or 10Mbit mode.  Empirically, it appears to be the case
   7067 		 * that that is also true for the 1024ns units of the other
   7068 		 * interrupt-related timer registers -- so, really, we ought
   7069 		 * to divide this value by 4 when the link speed is low.
   7070 		 *
   7071 		 * XXX implement this division at link speed change!
   7072 		 */
   7073 
   7074 		/*
   7075 		 * For N interrupts/sec, set this value to:
   7076 		 * 1,000,000,000 / (N * 256).  Note that we set the
   7077 		 * absolute and packet timer values to this value
   7078 		 * divided by 4 to get "simple timer" behavior.
   7079 		 */
   7080 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   7081 	}
   7082 
   7083 	error = wm_init_txrx_queues(sc);
   7084 	if (error)
   7085 		goto out;
   7086 
   7087 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   7088 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   7089 	    (sc->sc_type >= WM_T_82575))
   7090 		wm_serdes_power_up_link_82575(sc);
   7091 
   7092 	/* Clear out the VLAN table -- we don't use it (yet). */
   7093 	CSR_WRITE(sc, WMREG_VET, 0);
   7094 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   7095 		trynum = 10; /* Due to hw errata */
   7096 	else
   7097 		trynum = 1;
   7098 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   7099 		for (j = 0; j < trynum; j++)
   7100 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   7101 
   7102 	/*
   7103 	 * Set up flow-control parameters.
   7104 	 *
   7105 	 * XXX Values could probably stand some tuning.
   7106 	 */
   7107 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   7108 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   7109 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   7110 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   7111 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   7112 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   7113 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   7114 	}
   7115 
   7116 	sc->sc_fcrtl = FCRTL_DFLT;
   7117 	if (sc->sc_type < WM_T_82543) {
   7118 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   7119 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   7120 	} else {
   7121 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   7122 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   7123 	}
   7124 
   7125 	if (sc->sc_type == WM_T_80003)
   7126 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   7127 	else
   7128 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   7129 
   7130 	/* Writes the control register. */
   7131 	wm_set_vlan(sc);
   7132 
   7133 	if (sc->sc_flags & WM_F_HAS_MII) {
   7134 		uint16_t kmreg;
   7135 
   7136 		switch (sc->sc_type) {
   7137 		case WM_T_80003:
   7138 		case WM_T_ICH8:
   7139 		case WM_T_ICH9:
   7140 		case WM_T_ICH10:
   7141 		case WM_T_PCH:
   7142 		case WM_T_PCH2:
   7143 		case WM_T_PCH_LPT:
   7144 		case WM_T_PCH_SPT:
   7145 		case WM_T_PCH_CNP:
   7146 			/*
   7147 			 * Set the mac to wait the maximum time between each
   7148 			 * iteration and increase the max iterations when
   7149 			 * polling the phy; this fixes erroneous timeouts at
   7150 			 * 10Mbps.
   7151 			 */
   7152 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   7153 			    0xFFFF);
   7154 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7155 			    &kmreg);
   7156 			kmreg |= 0x3F;
   7157 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7158 			    kmreg);
   7159 			break;
   7160 		default:
   7161 			break;
   7162 		}
   7163 
   7164 		if (sc->sc_type == WM_T_80003) {
   7165 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7166 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   7167 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7168 
   7169 			/* Bypass RX and TX FIFOs */
   7170 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   7171 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   7172 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   7173 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   7174 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   7175 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   7176 		}
   7177 	}
   7178 #if 0
   7179 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   7180 #endif
   7181 
   7182 	/* Set up checksum offload parameters. */
   7183 	reg = CSR_READ(sc, WMREG_RXCSUM);
   7184 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   7185 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   7186 		reg |= RXCSUM_IPOFL;
   7187 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   7188 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   7189 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   7190 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   7191 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7192 
   7193 	/* Set registers about MSI-X */
   7194 	if (wm_is_using_msix(sc)) {
   7195 		uint32_t ivar, qintr_idx;
   7196 		struct wm_queue *wmq;
   7197 		unsigned int qid;
   7198 
   7199 		if (sc->sc_type == WM_T_82575) {
   7200 			/* Interrupt control */
   7201 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7202 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   7203 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7204 
   7205 			/* TX and RX */
   7206 			for (i = 0; i < sc->sc_nqueues; i++) {
   7207 				wmq = &sc->sc_queue[i];
   7208 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   7209 				    EITR_TX_QUEUE(wmq->wmq_id)
   7210 				    | EITR_RX_QUEUE(wmq->wmq_id));
   7211 			}
   7212 			/* Link status */
   7213 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   7214 			    EITR_OTHER);
   7215 		} else if (sc->sc_type == WM_T_82574) {
   7216 			/* Interrupt control */
   7217 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7218 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   7219 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7220 
   7221 			/*
   7222 			 * Work around issue with spurious interrupts
   7223 			 * in MSI-X mode.
   7224 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   7225 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   7226 			 */
   7227 			reg = CSR_READ(sc, WMREG_RFCTL);
   7228 			reg |= WMREG_RFCTL_ACKDIS;
   7229 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   7230 
   7231 			ivar = 0;
   7232 			/* TX and RX */
   7233 			for (i = 0; i < sc->sc_nqueues; i++) {
   7234 				wmq = &sc->sc_queue[i];
   7235 				qid = wmq->wmq_id;
   7236 				qintr_idx = wmq->wmq_intr_idx;
   7237 
   7238 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7239 				    IVAR_TX_MASK_Q_82574(qid));
   7240 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7241 				    IVAR_RX_MASK_Q_82574(qid));
   7242 			}
   7243 			/* Link status */
   7244 			ivar |= __SHIFTIN((IVAR_VALID_82574
   7245 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   7246 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   7247 		} else {
   7248 			/* Interrupt control */
   7249 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   7250 			    | GPIE_EIAME | GPIE_PBA);
   7251 
   7252 			switch (sc->sc_type) {
   7253 			case WM_T_82580:
   7254 			case WM_T_I350:
   7255 			case WM_T_I354:
   7256 			case WM_T_I210:
   7257 			case WM_T_I211:
   7258 				/* TX and RX */
   7259 				for (i = 0; i < sc->sc_nqueues; i++) {
   7260 					wmq = &sc->sc_queue[i];
   7261 					qid = wmq->wmq_id;
   7262 					qintr_idx = wmq->wmq_intr_idx;
   7263 
   7264 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   7265 					ivar &= ~IVAR_TX_MASK_Q(qid);
   7266 					ivar |= __SHIFTIN((qintr_idx
   7267 						| IVAR_VALID),
   7268 					    IVAR_TX_MASK_Q(qid));
   7269 					ivar &= ~IVAR_RX_MASK_Q(qid);
   7270 					ivar |= __SHIFTIN((qintr_idx
   7271 						| IVAR_VALID),
   7272 					    IVAR_RX_MASK_Q(qid));
   7273 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   7274 				}
   7275 				break;
   7276 			case WM_T_82576:
   7277 				/* TX and RX */
   7278 				for (i = 0; i < sc->sc_nqueues; i++) {
   7279 					wmq = &sc->sc_queue[i];
   7280 					qid = wmq->wmq_id;
   7281 					qintr_idx = wmq->wmq_intr_idx;
   7282 
   7283 					ivar = CSR_READ(sc,
   7284 					    WMREG_IVAR_Q_82576(qid));
   7285 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   7286 					ivar |= __SHIFTIN((qintr_idx
   7287 						| IVAR_VALID),
   7288 					    IVAR_TX_MASK_Q_82576(qid));
   7289 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   7290 					ivar |= __SHIFTIN((qintr_idx
   7291 						| IVAR_VALID),
   7292 					    IVAR_RX_MASK_Q_82576(qid));
   7293 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   7294 					    ivar);
   7295 				}
   7296 				break;
   7297 			default:
   7298 				break;
   7299 			}
   7300 
   7301 			/* Link status */
   7302 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   7303 			    IVAR_MISC_OTHER);
   7304 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   7305 		}
   7306 
   7307 		if (wm_is_using_multiqueue(sc)) {
   7308 			wm_init_rss(sc);
   7309 
   7310 			/*
   7311 			** NOTE: Receive Full-Packet Checksum Offload
   7312 			** is mutually exclusive with Multiqueue. However
   7313 			** this is not the same as TCP/IP checksums which
   7314 			** still work.
   7315 			*/
   7316 			reg = CSR_READ(sc, WMREG_RXCSUM);
   7317 			reg |= RXCSUM_PCSD;
   7318 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7319 		}
   7320 	}
   7321 
   7322 	/* Set up the interrupt registers. */
   7323 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7324 
   7325 	/* Enable SFP module insertion interrupt if it's required */
   7326 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   7327 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   7328 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7329 		sfp_mask = ICR_GPI(0);
   7330 	}
   7331 
   7332 	if (wm_is_using_msix(sc)) {
   7333 		uint32_t mask;
   7334 		struct wm_queue *wmq;
   7335 
   7336 		switch (sc->sc_type) {
   7337 		case WM_T_82574:
   7338 			mask = 0;
   7339 			for (i = 0; i < sc->sc_nqueues; i++) {
   7340 				wmq = &sc->sc_queue[i];
   7341 				mask |= ICR_TXQ(wmq->wmq_id);
   7342 				mask |= ICR_RXQ(wmq->wmq_id);
   7343 			}
   7344 			mask |= ICR_OTHER;
   7345 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   7346 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   7347 			break;
   7348 		default:
   7349 			if (sc->sc_type == WM_T_82575) {
   7350 				mask = 0;
   7351 				for (i = 0; i < sc->sc_nqueues; i++) {
   7352 					wmq = &sc->sc_queue[i];
   7353 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   7354 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   7355 				}
   7356 				mask |= EITR_OTHER;
   7357 			} else {
   7358 				mask = 0;
   7359 				for (i = 0; i < sc->sc_nqueues; i++) {
   7360 					wmq = &sc->sc_queue[i];
   7361 					mask |= 1 << wmq->wmq_intr_idx;
   7362 				}
   7363 				mask |= 1 << sc->sc_link_intr_idx;
   7364 			}
   7365 			CSR_WRITE(sc, WMREG_EIAC, mask);
   7366 			CSR_WRITE(sc, WMREG_EIAM, mask);
   7367 			CSR_WRITE(sc, WMREG_EIMS, mask);
   7368 
   7369 			/* For other interrupts */
   7370 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   7371 			break;
   7372 		}
   7373 	} else {
   7374 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   7375 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   7376 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   7377 	}
   7378 
   7379 	/* Set up the inter-packet gap. */
   7380 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7381 
   7382 	if (sc->sc_type >= WM_T_82543) {
   7383 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7384 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   7385 			wm_itrs_writereg(sc, wmq);
   7386 		}
   7387 		/*
   7388 		 * Link interrupts occur much less than TX
   7389 		 * interrupts and RX interrupts. So, we don't
   7390 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   7391 		 * FreeBSD's if_igb.
   7392 		 */
   7393 	}
   7394 
   7395 	/* Set the VLAN EtherType. */
   7396 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   7397 
   7398 	/*
   7399 	 * Set up the transmit control register; we start out with
   7400 	 * a collision distance suitable for FDX, but update it when
   7401 	 * we resolve the media type.
   7402 	 */
   7403 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   7404 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   7405 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7406 	if (sc->sc_type >= WM_T_82571)
   7407 		sc->sc_tctl |= TCTL_MULR;
   7408 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7409 
   7410 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7411 		/* Write TDT after TCTL.EN is set. See the document. */
   7412 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   7413 	}
   7414 
   7415 	if (sc->sc_type == WM_T_80003) {
   7416 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   7417 		reg &= ~TCTL_EXT_GCEX_MASK;
   7418 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   7419 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   7420 	}
   7421 
   7422 	/* Set the media. */
   7423 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   7424 		goto out;
   7425 
   7426 	/* Configure for OS presence */
   7427 	wm_init_manageability(sc);
   7428 
   7429 	/*
   7430 	 * Set up the receive control register; we actually program the
   7431 	 * register when we set the receive filter. Use multicast address
   7432 	 * offset type 0.
   7433 	 *
   7434 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7435 	 * don't enable that feature.
   7436 	 */
   7437 	sc->sc_mchash_type = 0;
   7438 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7439 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7440 
   7441 	/* 82574 use one buffer extended Rx descriptor. */
   7442 	if (sc->sc_type == WM_T_82574)
   7443 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7444 
   7445 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7446 		sc->sc_rctl |= RCTL_SECRC;
   7447 
   7448 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7449 	    && (ifp->if_mtu > ETHERMTU)) {
   7450 		sc->sc_rctl |= RCTL_LPE;
   7451 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7452 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7453 	}
   7454 
   7455 	if (MCLBYTES == 2048)
   7456 		sc->sc_rctl |= RCTL_2k;
   7457 	else {
   7458 		if (sc->sc_type >= WM_T_82543) {
   7459 			switch (MCLBYTES) {
   7460 			case 4096:
   7461 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7462 				break;
   7463 			case 8192:
   7464 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7465 				break;
   7466 			case 16384:
   7467 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7468 				break;
   7469 			default:
   7470 				panic("wm_init: MCLBYTES %d unsupported",
   7471 				    MCLBYTES);
   7472 				break;
   7473 			}
   7474 		} else
   7475 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7476 	}
   7477 
   7478 	/* Enable ECC */
   7479 	switch (sc->sc_type) {
   7480 	case WM_T_82571:
   7481 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7482 		reg |= PBA_ECC_CORR_EN;
   7483 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7484 		break;
   7485 	case WM_T_PCH_LPT:
   7486 	case WM_T_PCH_SPT:
   7487 	case WM_T_PCH_CNP:
   7488 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7489 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7490 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7491 
   7492 		sc->sc_ctrl |= CTRL_MEHE;
   7493 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7494 		break;
   7495 	default:
   7496 		break;
   7497 	}
   7498 
   7499 	/*
   7500 	 * Set the receive filter.
   7501 	 *
   7502 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7503 	 * the setting of RCTL.EN in wm_set_filter()
   7504 	 */
   7505 	wm_set_filter(sc);
   7506 
   7507 	/* On 575 and later set RDT only if RX enabled */
   7508 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7509 		int qidx;
   7510 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7511 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7512 			for (i = 0; i < WM_NRXDESC; i++) {
   7513 				mutex_enter(rxq->rxq_lock);
   7514 				wm_init_rxdesc(rxq, i);
   7515 				mutex_exit(rxq->rxq_lock);
   7516 
   7517 			}
   7518 		}
   7519 	}
   7520 
   7521 	wm_unset_stopping_flags(sc);
   7522 
   7523 	/* Start the one second link check clock. */
   7524 	callout_schedule(&sc->sc_tick_ch, hz);
   7525 
   7526 	/*
   7527 	 * ...all done! (IFNET_LOCKED asserted above.)
   7528 	 */
   7529 	ifp->if_flags |= IFF_RUNNING;
   7530 
   7531 out:
   7532 	/* Save last flags for the callback */
   7533 	sc->sc_if_flags = ifp->if_flags;
   7534 	sc->sc_ec_capenable = ec->ec_capenable;
   7535 	if (error)
   7536 		log(LOG_ERR, "%s: interface not running\n",
   7537 		    device_xname(sc->sc_dev));
   7538 	return error;
   7539 }
   7540 
   7541 /*
   7542  * wm_stop:		[ifnet interface function]
   7543  *
   7544  *	Stop transmission on the interface.
   7545  */
   7546 static void
   7547 wm_stop(struct ifnet *ifp, int disable)
   7548 {
   7549 	struct wm_softc *sc = ifp->if_softc;
   7550 
   7551 	ASSERT_SLEEPABLE();
   7552 	KASSERT(IFNET_LOCKED(ifp));
   7553 
   7554 	mutex_enter(sc->sc_core_lock);
   7555 	wm_stop_locked(ifp, disable ? true : false, true);
   7556 	mutex_exit(sc->sc_core_lock);
   7557 
   7558 	/*
   7559 	 * After wm_set_stopping_flags(), it is guaranteed that
   7560 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7561 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7562 	 * because it can sleep...
   7563 	 * so, call workqueue_wait() here.
   7564 	 */
   7565 	for (int i = 0; i < sc->sc_nqueues; i++)
   7566 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7567 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7568 }
   7569 
   7570 static void
   7571 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7572 {
   7573 	struct wm_softc *sc = ifp->if_softc;
   7574 	struct wm_txsoft *txs;
   7575 	int i, qidx;
   7576 
   7577 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7578 		device_xname(sc->sc_dev), __func__));
   7579 	KASSERT(IFNET_LOCKED(ifp));
   7580 	KASSERT(mutex_owned(sc->sc_core_lock));
   7581 
   7582 	wm_set_stopping_flags(sc);
   7583 
   7584 	if (sc->sc_flags & WM_F_HAS_MII) {
   7585 		/* Down the MII. */
   7586 		mii_down(&sc->sc_mii);
   7587 	} else {
   7588 #if 0
   7589 		/* Should we clear PHY's status properly? */
   7590 		wm_reset(sc);
   7591 #endif
   7592 	}
   7593 
   7594 	/* Stop the transmit and receive processes. */
   7595 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7596 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7597 	sc->sc_rctl &= ~RCTL_EN;
   7598 
   7599 	/*
   7600 	 * Clear the interrupt mask to ensure the device cannot assert its
   7601 	 * interrupt line.
   7602 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7603 	 * service any currently pending or shared interrupt.
   7604 	 */
   7605 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7606 	sc->sc_icr = 0;
   7607 	if (wm_is_using_msix(sc)) {
   7608 		if (sc->sc_type != WM_T_82574) {
   7609 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7610 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7611 		} else
   7612 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7613 	}
   7614 
   7615 	/*
   7616 	 * Stop callouts after interrupts are disabled; if we have
   7617 	 * to wait for them, we will be releasing the CORE_LOCK
   7618 	 * briefly, which will unblock interrupts on the current CPU.
   7619 	 */
   7620 
   7621 	/* Stop the one second clock. */
   7622 	if (wait)
   7623 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7624 	else
   7625 		callout_stop(&sc->sc_tick_ch);
   7626 
   7627 	/* Stop the 82547 Tx FIFO stall check timer. */
   7628 	if (sc->sc_type == WM_T_82547) {
   7629 		if (wait)
   7630 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7631 		else
   7632 			callout_stop(&sc->sc_txfifo_ch);
   7633 	}
   7634 
   7635 	/* Release any queued transmit buffers. */
   7636 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7637 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7638 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7639 		struct mbuf *m;
   7640 
   7641 		mutex_enter(txq->txq_lock);
   7642 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7643 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7644 			txs = &txq->txq_soft[i];
   7645 			if (txs->txs_mbuf != NULL) {
   7646 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7647 				m_freem(txs->txs_mbuf);
   7648 				txs->txs_mbuf = NULL;
   7649 			}
   7650 		}
   7651 		/* Drain txq_interq */
   7652 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7653 			m_freem(m);
   7654 		mutex_exit(txq->txq_lock);
   7655 	}
   7656 
   7657 	/* Mark the interface as down and cancel the watchdog timer. */
   7658 	ifp->if_flags &= ~IFF_RUNNING;
   7659 	sc->sc_if_flags = ifp->if_flags;
   7660 
   7661 	if (disable) {
   7662 		for (i = 0; i < sc->sc_nqueues; i++) {
   7663 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7664 			mutex_enter(rxq->rxq_lock);
   7665 			wm_rxdrain(rxq);
   7666 			mutex_exit(rxq->rxq_lock);
   7667 		}
   7668 	}
   7669 
   7670 #if 0 /* notyet */
   7671 	if (sc->sc_type >= WM_T_82544)
   7672 		CSR_WRITE(sc, WMREG_WUC, 0);
   7673 #endif
   7674 }
   7675 
   7676 static void
   7677 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7678 {
   7679 	struct mbuf *m;
   7680 	int i;
   7681 
   7682 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7683 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7684 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7685 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7686 		    m->m_data, m->m_len, m->m_flags);
   7687 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7688 	    i, i == 1 ? "" : "s");
   7689 }
   7690 
   7691 /*
   7692  * wm_82547_txfifo_stall:
   7693  *
   7694  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7695  *	reset the FIFO pointers, and restart packet transmission.
   7696  */
   7697 static void
   7698 wm_82547_txfifo_stall(void *arg)
   7699 {
   7700 	struct wm_softc *sc = arg;
   7701 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7702 
   7703 	mutex_enter(txq->txq_lock);
   7704 
   7705 	if (txq->txq_stopping)
   7706 		goto out;
   7707 
   7708 	if (txq->txq_fifo_stall) {
   7709 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7710 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7711 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7712 			/*
   7713 			 * Packets have drained.  Stop transmitter, reset
   7714 			 * FIFO pointers, restart transmitter, and kick
   7715 			 * the packet queue.
   7716 			 */
   7717 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7718 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7719 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7720 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7721 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7722 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7723 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7724 			CSR_WRITE_FLUSH(sc);
   7725 
   7726 			txq->txq_fifo_head = 0;
   7727 			txq->txq_fifo_stall = 0;
   7728 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7729 		} else {
   7730 			/*
   7731 			 * Still waiting for packets to drain; try again in
   7732 			 * another tick.
   7733 			 */
   7734 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7735 		}
   7736 	}
   7737 
   7738 out:
   7739 	mutex_exit(txq->txq_lock);
   7740 }
   7741 
   7742 /*
   7743  * wm_82547_txfifo_bugchk:
   7744  *
   7745  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7746  *	prevent enqueueing a packet that would wrap around the end
   7747  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7748  *
   7749  *	We do this by checking the amount of space before the end
   7750  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7751  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7752  *	the internal FIFO pointers to the beginning, and restart
   7753  *	transmission on the interface.
   7754  */
   7755 #define	WM_FIFO_HDR		0x10
   7756 #define	WM_82547_PAD_LEN	0x3e0
   7757 static int
   7758 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7759 {
   7760 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7761 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7762 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7763 
   7764 	/* Just return if already stalled. */
   7765 	if (txq->txq_fifo_stall)
   7766 		return 1;
   7767 
   7768 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7769 		/* Stall only occurs in half-duplex mode. */
   7770 		goto send_packet;
   7771 	}
   7772 
   7773 	if (len >= WM_82547_PAD_LEN + space) {
   7774 		txq->txq_fifo_stall = 1;
   7775 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7776 		return 1;
   7777 	}
   7778 
   7779 send_packet:
   7780 	txq->txq_fifo_head += len;
   7781 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7782 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7783 
   7784 	return 0;
   7785 }
   7786 
   7787 static int
   7788 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7789 {
   7790 	int error;
   7791 
   7792 	/*
   7793 	 * Allocate the control data structures, and create and load the
   7794 	 * DMA map for it.
   7795 	 *
   7796 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7797 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7798 	 * both sets within the same 4G segment.
   7799 	 */
   7800 	if (sc->sc_type < WM_T_82544)
   7801 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7802 	else
   7803 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7804 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7805 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7806 	else
   7807 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7808 
   7809 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7810 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7811 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7812 		aprint_error_dev(sc->sc_dev,
   7813 		    "unable to allocate TX control data, error = %d\n",
   7814 		    error);
   7815 		goto fail_0;
   7816 	}
   7817 
   7818 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7819 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7820 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7821 		aprint_error_dev(sc->sc_dev,
   7822 		    "unable to map TX control data, error = %d\n", error);
   7823 		goto fail_1;
   7824 	}
   7825 
   7826 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7827 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7828 		aprint_error_dev(sc->sc_dev,
   7829 		    "unable to create TX control data DMA map, error = %d\n",
   7830 		    error);
   7831 		goto fail_2;
   7832 	}
   7833 
   7834 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7835 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7836 		aprint_error_dev(sc->sc_dev,
   7837 		    "unable to load TX control data DMA map, error = %d\n",
   7838 		    error);
   7839 		goto fail_3;
   7840 	}
   7841 
   7842 	return 0;
   7843 
   7844 fail_3:
   7845 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7846 fail_2:
   7847 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7848 	    WM_TXDESCS_SIZE(txq));
   7849 fail_1:
   7850 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7851 fail_0:
   7852 	return error;
   7853 }
   7854 
   7855 static void
   7856 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7857 {
   7858 
   7859 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7860 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7861 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7862 	    WM_TXDESCS_SIZE(txq));
   7863 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7864 }
   7865 
   7866 static int
   7867 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7868 {
   7869 	int error;
   7870 	size_t rxq_descs_size;
   7871 
   7872 	/*
   7873 	 * Allocate the control data structures, and create and load the
   7874 	 * DMA map for it.
   7875 	 *
   7876 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7877 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7878 	 * both sets within the same 4G segment.
   7879 	 */
   7880 	rxq->rxq_ndesc = WM_NRXDESC;
   7881 	if (sc->sc_type == WM_T_82574)
   7882 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7883 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7884 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7885 	else
   7886 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7887 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7888 
   7889 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7890 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7891 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7892 		aprint_error_dev(sc->sc_dev,
   7893 		    "unable to allocate RX control data, error = %d\n",
   7894 		    error);
   7895 		goto fail_0;
   7896 	}
   7897 
   7898 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7899 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7900 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7901 		aprint_error_dev(sc->sc_dev,
   7902 		    "unable to map RX control data, error = %d\n", error);
   7903 		goto fail_1;
   7904 	}
   7905 
   7906 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7907 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7908 		aprint_error_dev(sc->sc_dev,
   7909 		    "unable to create RX control data DMA map, error = %d\n",
   7910 		    error);
   7911 		goto fail_2;
   7912 	}
   7913 
   7914 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7915 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7916 		aprint_error_dev(sc->sc_dev,
   7917 		    "unable to load RX control data DMA map, error = %d\n",
   7918 		    error);
   7919 		goto fail_3;
   7920 	}
   7921 
   7922 	return 0;
   7923 
   7924  fail_3:
   7925 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7926  fail_2:
   7927 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7928 	    rxq_descs_size);
   7929  fail_1:
   7930 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7931  fail_0:
   7932 	return error;
   7933 }
   7934 
   7935 static void
   7936 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7937 {
   7938 
   7939 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7940 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7941 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7942 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7943 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7944 }
   7945 
   7946 
   7947 static int
   7948 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7949 {
   7950 	int i, error;
   7951 
   7952 	/* Create the transmit buffer DMA maps. */
   7953 	WM_TXQUEUELEN(txq) =
   7954 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7955 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7956 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7957 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7958 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7959 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7960 			aprint_error_dev(sc->sc_dev,
   7961 			    "unable to create Tx DMA map %d, error = %d\n",
   7962 			    i, error);
   7963 			goto fail;
   7964 		}
   7965 	}
   7966 
   7967 	return 0;
   7968 
   7969 fail:
   7970 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7971 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7972 			bus_dmamap_destroy(sc->sc_dmat,
   7973 			    txq->txq_soft[i].txs_dmamap);
   7974 	}
   7975 	return error;
   7976 }
   7977 
   7978 static void
   7979 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7980 {
   7981 	int i;
   7982 
   7983 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7984 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7985 			bus_dmamap_destroy(sc->sc_dmat,
   7986 			    txq->txq_soft[i].txs_dmamap);
   7987 	}
   7988 }
   7989 
   7990 static int
   7991 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7992 {
   7993 	int i, error;
   7994 
   7995 	/* Create the receive buffer DMA maps. */
   7996 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7997 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7998 			    MCLBYTES, 0, 0,
   7999 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   8000 			aprint_error_dev(sc->sc_dev,
   8001 			    "unable to create Rx DMA map %d error = %d\n",
   8002 			    i, error);
   8003 			goto fail;
   8004 		}
   8005 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   8006 	}
   8007 
   8008 	return 0;
   8009 
   8010  fail:
   8011 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8012 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   8013 			bus_dmamap_destroy(sc->sc_dmat,
   8014 			    rxq->rxq_soft[i].rxs_dmamap);
   8015 	}
   8016 	return error;
   8017 }
   8018 
   8019 static void
   8020 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8021 {
   8022 	int i;
   8023 
   8024 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8025 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   8026 			bus_dmamap_destroy(sc->sc_dmat,
   8027 			    rxq->rxq_soft[i].rxs_dmamap);
   8028 	}
   8029 }
   8030 
   8031 /*
   8032  * wm_alloc_quques:
   8033  *	Allocate {tx,rx}descs and {tx,rx} buffers
   8034  */
   8035 static int
   8036 wm_alloc_txrx_queues(struct wm_softc *sc)
   8037 {
   8038 	int i, error, tx_done, rx_done;
   8039 
   8040 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   8041 	    KM_SLEEP);
   8042 	if (sc->sc_queue == NULL) {
   8043 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   8044 		error = ENOMEM;
   8045 		goto fail_0;
   8046 	}
   8047 
   8048 	/* For transmission */
   8049 	error = 0;
   8050 	tx_done = 0;
   8051 	for (i = 0; i < sc->sc_nqueues; i++) {
   8052 #ifdef WM_EVENT_COUNTERS
   8053 		int j;
   8054 		const char *xname;
   8055 #endif
   8056 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8057 		txq->txq_sc = sc;
   8058 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8059 
   8060 		error = wm_alloc_tx_descs(sc, txq);
   8061 		if (error)
   8062 			break;
   8063 		error = wm_alloc_tx_buffer(sc, txq);
   8064 		if (error) {
   8065 			wm_free_tx_descs(sc, txq);
   8066 			break;
   8067 		}
   8068 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   8069 		if (txq->txq_interq == NULL) {
   8070 			wm_free_tx_descs(sc, txq);
   8071 			wm_free_tx_buffer(sc, txq);
   8072 			error = ENOMEM;
   8073 			break;
   8074 		}
   8075 
   8076 #ifdef WM_EVENT_COUNTERS
   8077 		xname = device_xname(sc->sc_dev);
   8078 
   8079 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   8080 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   8081 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   8082 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   8083 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   8084 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   8085 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   8086 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   8087 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   8088 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   8089 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   8090 
   8091 		for (j = 0; j < WM_NTXSEGS; j++) {
   8092 			snprintf(txq->txq_txseg_evcnt_names[j],
   8093 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   8094 			    "txq%02dtxseg%d", i, j);
   8095 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   8096 			    EVCNT_TYPE_MISC,
   8097 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   8098 		}
   8099 
   8100 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   8101 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   8102 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   8103 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   8104 		/* Only for 82544 (and earlier?) */
   8105 		if (sc->sc_type <= WM_T_82544)
   8106 			WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   8107 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   8108 #endif /* WM_EVENT_COUNTERS */
   8109 
   8110 		tx_done++;
   8111 	}
   8112 	if (error)
   8113 		goto fail_1;
   8114 
   8115 	/* For receive */
   8116 	error = 0;
   8117 	rx_done = 0;
   8118 	for (i = 0; i < sc->sc_nqueues; i++) {
   8119 #ifdef WM_EVENT_COUNTERS
   8120 		const char *xname;
   8121 #endif
   8122 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8123 		rxq->rxq_sc = sc;
   8124 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8125 
   8126 		error = wm_alloc_rx_descs(sc, rxq);
   8127 		if (error)
   8128 			break;
   8129 
   8130 		error = wm_alloc_rx_buffer(sc, rxq);
   8131 		if (error) {
   8132 			wm_free_rx_descs(sc, rxq);
   8133 			break;
   8134 		}
   8135 
   8136 #ifdef WM_EVENT_COUNTERS
   8137 		xname = device_xname(sc->sc_dev);
   8138 
   8139 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   8140 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   8141 
   8142 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   8143 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   8144 #endif /* WM_EVENT_COUNTERS */
   8145 
   8146 		rx_done++;
   8147 	}
   8148 	if (error)
   8149 		goto fail_2;
   8150 
   8151 	return 0;
   8152 
   8153 fail_2:
   8154 	for (i = 0; i < rx_done; i++) {
   8155 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8156 		wm_free_rx_buffer(sc, rxq);
   8157 		wm_free_rx_descs(sc, rxq);
   8158 		if (rxq->rxq_lock)
   8159 			mutex_obj_free(rxq->rxq_lock);
   8160 	}
   8161 fail_1:
   8162 	for (i = 0; i < tx_done; i++) {
   8163 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8164 		pcq_destroy(txq->txq_interq);
   8165 		wm_free_tx_buffer(sc, txq);
   8166 		wm_free_tx_descs(sc, txq);
   8167 		if (txq->txq_lock)
   8168 			mutex_obj_free(txq->txq_lock);
   8169 	}
   8170 
   8171 	kmem_free(sc->sc_queue,
   8172 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   8173 fail_0:
   8174 	return error;
   8175 }
   8176 
   8177 /*
   8178  * wm_free_quques:
   8179  *	Free {tx,rx}descs and {tx,rx} buffers
   8180  */
   8181 static void
   8182 wm_free_txrx_queues(struct wm_softc *sc)
   8183 {
   8184 	int i;
   8185 
   8186 	for (i = 0; i < sc->sc_nqueues; i++) {
   8187 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8188 
   8189 #ifdef WM_EVENT_COUNTERS
   8190 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   8191 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   8192 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   8193 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   8194 #endif /* WM_EVENT_COUNTERS */
   8195 
   8196 		wm_free_rx_buffer(sc, rxq);
   8197 		wm_free_rx_descs(sc, rxq);
   8198 		if (rxq->rxq_lock)
   8199 			mutex_obj_free(rxq->rxq_lock);
   8200 	}
   8201 
   8202 	for (i = 0; i < sc->sc_nqueues; i++) {
   8203 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8204 		struct mbuf *m;
   8205 #ifdef WM_EVENT_COUNTERS
   8206 		int j;
   8207 
   8208 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   8209 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   8210 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   8211 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   8212 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   8213 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   8214 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   8215 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   8216 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   8217 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   8218 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   8219 
   8220 		for (j = 0; j < WM_NTXSEGS; j++)
   8221 			evcnt_detach(&txq->txq_ev_txseg[j]);
   8222 
   8223 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   8224 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   8225 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   8226 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   8227 		if (sc->sc_type <= WM_T_82544)
   8228 			WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   8229 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   8230 #endif /* WM_EVENT_COUNTERS */
   8231 
   8232 		/* Drain txq_interq */
   8233 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   8234 			m_freem(m);
   8235 		pcq_destroy(txq->txq_interq);
   8236 
   8237 		wm_free_tx_buffer(sc, txq);
   8238 		wm_free_tx_descs(sc, txq);
   8239 		if (txq->txq_lock)
   8240 			mutex_obj_free(txq->txq_lock);
   8241 	}
   8242 
   8243 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   8244 }
   8245 
   8246 static void
   8247 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8248 {
   8249 
   8250 	KASSERT(mutex_owned(txq->txq_lock));
   8251 
   8252 	/* Initialize the transmit descriptor ring. */
   8253 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   8254 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   8255 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8256 	txq->txq_free = WM_NTXDESC(txq);
   8257 	txq->txq_next = 0;
   8258 }
   8259 
   8260 static void
   8261 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8262     struct wm_txqueue *txq)
   8263 {
   8264 
   8265 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8266 		device_xname(sc->sc_dev), __func__));
   8267 	KASSERT(mutex_owned(txq->txq_lock));
   8268 
   8269 	if (sc->sc_type < WM_T_82543) {
   8270 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   8271 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   8272 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   8273 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   8274 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   8275 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   8276 	} else {
   8277 		int qid = wmq->wmq_id;
   8278 
   8279 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   8280 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   8281 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   8282 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   8283 
   8284 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8285 			/*
   8286 			 * Don't write TDT before TCTL.EN is set.
   8287 			 * See the document.
   8288 			 */
   8289 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   8290 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   8291 			    | TXDCTL_WTHRESH(0));
   8292 		else {
   8293 			/* XXX should update with AIM? */
   8294 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   8295 			if (sc->sc_type >= WM_T_82540) {
   8296 				/* Should be the same */
   8297 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   8298 			}
   8299 
   8300 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   8301 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   8302 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   8303 		}
   8304 	}
   8305 }
   8306 
   8307 static void
   8308 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8309 {
   8310 	int i;
   8311 
   8312 	KASSERT(mutex_owned(txq->txq_lock));
   8313 
   8314 	/* Initialize the transmit job descriptors. */
   8315 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   8316 		txq->txq_soft[i].txs_mbuf = NULL;
   8317 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   8318 	txq->txq_snext = 0;
   8319 	txq->txq_sdirty = 0;
   8320 }
   8321 
   8322 static void
   8323 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8324     struct wm_txqueue *txq)
   8325 {
   8326 
   8327 	KASSERT(mutex_owned(txq->txq_lock));
   8328 
   8329 	/*
   8330 	 * Set up some register offsets that are different between
   8331 	 * the i82542 and the i82543 and later chips.
   8332 	 */
   8333 	if (sc->sc_type < WM_T_82543)
   8334 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   8335 	else
   8336 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   8337 
   8338 	wm_init_tx_descs(sc, txq);
   8339 	wm_init_tx_regs(sc, wmq, txq);
   8340 	wm_init_tx_buffer(sc, txq);
   8341 
   8342 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   8343 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   8344 
   8345 	txq->txq_sending = false;
   8346 }
   8347 
   8348 static void
   8349 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8350     struct wm_rxqueue *rxq)
   8351 {
   8352 
   8353 	KASSERT(mutex_owned(rxq->rxq_lock));
   8354 
   8355 	/*
   8356 	 * Initialize the receive descriptor and receive job
   8357 	 * descriptor rings.
   8358 	 */
   8359 	if (sc->sc_type < WM_T_82543) {
   8360 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   8361 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   8362 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   8363 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8364 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   8365 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   8366 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   8367 
   8368 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   8369 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   8370 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   8371 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   8372 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   8373 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   8374 	} else {
   8375 		int qid = wmq->wmq_id;
   8376 
   8377 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   8378 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   8379 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   8380 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8381 
   8382 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8383 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   8384 				panic("%s: MCLBYTES %d unsupported for 82575 "
   8385 				    "or higher\n", __func__, MCLBYTES);
   8386 
   8387 			/*
   8388 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   8389 			 * only.
   8390 			 */
   8391 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   8392 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   8393 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   8394 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   8395 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   8396 			    | RXDCTL_WTHRESH(1));
   8397 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8398 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8399 		} else {
   8400 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8401 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8402 			/* XXX should update with AIM? */
   8403 			CSR_WRITE(sc, WMREG_RDTR,
   8404 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   8405 			/* MUST be same */
   8406 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   8407 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   8408 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   8409 		}
   8410 	}
   8411 }
   8412 
   8413 static int
   8414 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8415 {
   8416 	struct wm_rxsoft *rxs;
   8417 	int error, i;
   8418 
   8419 	KASSERT(mutex_owned(rxq->rxq_lock));
   8420 
   8421 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8422 		rxs = &rxq->rxq_soft[i];
   8423 		if (rxs->rxs_mbuf == NULL) {
   8424 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   8425 				log(LOG_ERR, "%s: unable to allocate or map "
   8426 				    "rx buffer %d, error = %d\n",
   8427 				    device_xname(sc->sc_dev), i, error);
   8428 				/*
   8429 				 * XXX Should attempt to run with fewer receive
   8430 				 * XXX buffers instead of just failing.
   8431 				 */
   8432 				wm_rxdrain(rxq);
   8433 				return ENOMEM;
   8434 			}
   8435 		} else {
   8436 			/*
   8437 			 * For 82575 and 82576, the RX descriptors must be
   8438 			 * initialized after the setting of RCTL.EN in
   8439 			 * wm_set_filter()
   8440 			 */
   8441 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8442 				wm_init_rxdesc(rxq, i);
   8443 		}
   8444 	}
   8445 	rxq->rxq_ptr = 0;
   8446 	rxq->rxq_discard = 0;
   8447 	WM_RXCHAIN_RESET(rxq);
   8448 
   8449 	return 0;
   8450 }
   8451 
   8452 static int
   8453 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8454     struct wm_rxqueue *rxq)
   8455 {
   8456 
   8457 	KASSERT(mutex_owned(rxq->rxq_lock));
   8458 
   8459 	/*
   8460 	 * Set up some register offsets that are different between
   8461 	 * the i82542 and the i82543 and later chips.
   8462 	 */
   8463 	if (sc->sc_type < WM_T_82543)
   8464 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8465 	else
   8466 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8467 
   8468 	wm_init_rx_regs(sc, wmq, rxq);
   8469 	return wm_init_rx_buffer(sc, rxq);
   8470 }
   8471 
   8472 /*
   8473  * wm_init_quques:
   8474  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8475  */
   8476 static int
   8477 wm_init_txrx_queues(struct wm_softc *sc)
   8478 {
   8479 	int i, error = 0;
   8480 
   8481 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8482 		device_xname(sc->sc_dev), __func__));
   8483 
   8484 	for (i = 0; i < sc->sc_nqueues; i++) {
   8485 		struct wm_queue *wmq = &sc->sc_queue[i];
   8486 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8487 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8488 
   8489 		/*
   8490 		 * TODO
   8491 		 * Currently, use constant variable instead of AIM.
   8492 		 * Furthermore, the interrupt interval of multiqueue which use
   8493 		 * polling mode is less than default value.
   8494 		 * More tuning and AIM are required.
   8495 		 */
   8496 		if (wm_is_using_multiqueue(sc))
   8497 			wmq->wmq_itr = 50;
   8498 		else
   8499 			wmq->wmq_itr = sc->sc_itr_init;
   8500 		wmq->wmq_set_itr = true;
   8501 
   8502 		mutex_enter(txq->txq_lock);
   8503 		wm_init_tx_queue(sc, wmq, txq);
   8504 		mutex_exit(txq->txq_lock);
   8505 
   8506 		mutex_enter(rxq->rxq_lock);
   8507 		error = wm_init_rx_queue(sc, wmq, rxq);
   8508 		mutex_exit(rxq->rxq_lock);
   8509 		if (error)
   8510 			break;
   8511 	}
   8512 
   8513 	return error;
   8514 }
   8515 
   8516 /*
   8517  * wm_tx_offload:
   8518  *
   8519  *	Set up TCP/IP checksumming parameters for the
   8520  *	specified packet.
   8521  */
   8522 static void
   8523 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8524     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8525 {
   8526 	struct mbuf *m0 = txs->txs_mbuf;
   8527 	struct livengood_tcpip_ctxdesc *t;
   8528 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8529 	uint32_t ipcse;
   8530 	struct ether_header *eh;
   8531 	int offset, iphl;
   8532 	uint8_t fields;
   8533 
   8534 	/*
   8535 	 * XXX It would be nice if the mbuf pkthdr had offset
   8536 	 * fields for the protocol headers.
   8537 	 */
   8538 
   8539 	eh = mtod(m0, struct ether_header *);
   8540 	switch (htons(eh->ether_type)) {
   8541 	case ETHERTYPE_IP:
   8542 	case ETHERTYPE_IPV6:
   8543 		offset = ETHER_HDR_LEN;
   8544 		break;
   8545 
   8546 	case ETHERTYPE_VLAN:
   8547 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8548 		break;
   8549 
   8550 	default:
   8551 		/* Don't support this protocol or encapsulation. */
   8552 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8553 		txq->txq_last_hw_ipcs = 0;
   8554 		txq->txq_last_hw_tucs = 0;
   8555 		*fieldsp = 0;
   8556 		*cmdp = 0;
   8557 		return;
   8558 	}
   8559 
   8560 	if ((m0->m_pkthdr.csum_flags &
   8561 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8562 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8563 	} else
   8564 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8565 
   8566 	ipcse = offset + iphl - 1;
   8567 
   8568 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8569 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8570 	seg = 0;
   8571 	fields = 0;
   8572 
   8573 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8574 		int hlen = offset + iphl;
   8575 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8576 
   8577 		if (__predict_false(m0->m_len <
   8578 				    (hlen + sizeof(struct tcphdr)))) {
   8579 			/*
   8580 			 * TCP/IP headers are not in the first mbuf; we need
   8581 			 * to do this the slow and painful way. Let's just
   8582 			 * hope this doesn't happen very often.
   8583 			 */
   8584 			struct tcphdr th;
   8585 
   8586 			WM_Q_EVCNT_INCR(txq, tsopain);
   8587 
   8588 			m_copydata(m0, hlen, sizeof(th), &th);
   8589 			if (v4) {
   8590 				struct ip ip;
   8591 
   8592 				m_copydata(m0, offset, sizeof(ip), &ip);
   8593 				ip.ip_len = 0;
   8594 				m_copyback(m0,
   8595 				    offset + offsetof(struct ip, ip_len),
   8596 				    sizeof(ip.ip_len), &ip.ip_len);
   8597 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8598 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8599 			} else {
   8600 				struct ip6_hdr ip6;
   8601 
   8602 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8603 				ip6.ip6_plen = 0;
   8604 				m_copyback(m0,
   8605 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8606 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8607 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8608 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8609 			}
   8610 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8611 			    sizeof(th.th_sum), &th.th_sum);
   8612 
   8613 			hlen += th.th_off << 2;
   8614 		} else {
   8615 			/*
   8616 			 * TCP/IP headers are in the first mbuf; we can do
   8617 			 * this the easy way.
   8618 			 */
   8619 			struct tcphdr *th;
   8620 
   8621 			if (v4) {
   8622 				struct ip *ip =
   8623 				    (void *)(mtod(m0, char *) + offset);
   8624 				th = (void *)(mtod(m0, char *) + hlen);
   8625 
   8626 				ip->ip_len = 0;
   8627 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8628 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8629 			} else {
   8630 				struct ip6_hdr *ip6 =
   8631 				    (void *)(mtod(m0, char *) + offset);
   8632 				th = (void *)(mtod(m0, char *) + hlen);
   8633 
   8634 				ip6->ip6_plen = 0;
   8635 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8636 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8637 			}
   8638 			hlen += th->th_off << 2;
   8639 		}
   8640 
   8641 		if (v4) {
   8642 			WM_Q_EVCNT_INCR(txq, tso);
   8643 			cmdlen |= WTX_TCPIP_CMD_IP;
   8644 		} else {
   8645 			WM_Q_EVCNT_INCR(txq, tso6);
   8646 			ipcse = 0;
   8647 		}
   8648 		cmd |= WTX_TCPIP_CMD_TSE;
   8649 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8650 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8651 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8652 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8653 	}
   8654 
   8655 	/*
   8656 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8657 	 * offload feature, if we load the context descriptor, we
   8658 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8659 	 */
   8660 
   8661 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8662 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8663 	    WTX_TCPIP_IPCSE(ipcse);
   8664 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8665 		WM_Q_EVCNT_INCR(txq, ipsum);
   8666 		fields |= WTX_IXSM;
   8667 	}
   8668 
   8669 	offset += iphl;
   8670 
   8671 	if (m0->m_pkthdr.csum_flags &
   8672 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8673 		WM_Q_EVCNT_INCR(txq, tusum);
   8674 		fields |= WTX_TXSM;
   8675 		tucs = WTX_TCPIP_TUCSS(offset) |
   8676 		    WTX_TCPIP_TUCSO(offset +
   8677 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8678 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8679 	} else if ((m0->m_pkthdr.csum_flags &
   8680 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8681 		WM_Q_EVCNT_INCR(txq, tusum6);
   8682 		fields |= WTX_TXSM;
   8683 		tucs = WTX_TCPIP_TUCSS(offset) |
   8684 		    WTX_TCPIP_TUCSO(offset +
   8685 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8686 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8687 	} else {
   8688 		/* Just initialize it to a valid TCP context. */
   8689 		tucs = WTX_TCPIP_TUCSS(offset) |
   8690 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8691 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8692 	}
   8693 
   8694 	*cmdp = cmd;
   8695 	*fieldsp = fields;
   8696 
   8697 	/*
   8698 	 * We don't have to write context descriptor for every packet
   8699 	 * except for 82574. For 82574, we must write context descriptor
   8700 	 * for every packet when we use two descriptor queues.
   8701 	 *
   8702 	 * The 82574L can only remember the *last* context used
   8703 	 * regardless of queue that it was use for.  We cannot reuse
   8704 	 * contexts on this hardware platform and must generate a new
   8705 	 * context every time.  82574L hardware spec, section 7.2.6,
   8706 	 * second note.
   8707 	 */
   8708 	if (sc->sc_nqueues < 2) {
   8709 		/*
   8710 		 * Setting up new checksum offload context for every
   8711 		 * frames takes a lot of processing time for hardware.
   8712 		 * This also reduces performance a lot for small sized
   8713 		 * frames so avoid it if driver can use previously
   8714 		 * configured checksum offload context.
   8715 		 * For TSO, in theory we can use the same TSO context only if
   8716 		 * frame is the same type(IP/TCP) and the same MSS. However
   8717 		 * checking whether a frame has the same IP/TCP structure is a
   8718 		 * hard thing so just ignore that and always restablish a
   8719 		 * new TSO context.
   8720 		 */
   8721 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8722 		    == 0) {
   8723 			if (txq->txq_last_hw_cmd == cmd &&
   8724 			    txq->txq_last_hw_fields == fields &&
   8725 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8726 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8727 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8728 				return;
   8729 			}
   8730 		}
   8731 
   8732 		txq->txq_last_hw_cmd = cmd;
   8733 		txq->txq_last_hw_fields = fields;
   8734 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8735 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8736 	}
   8737 
   8738 	/* Fill in the context descriptor. */
   8739 	t = (struct livengood_tcpip_ctxdesc *)
   8740 	    &txq->txq_descs[txq->txq_next];
   8741 	t->tcpip_ipcs = htole32(ipcs);
   8742 	t->tcpip_tucs = htole32(tucs);
   8743 	t->tcpip_cmdlen = htole32(cmdlen);
   8744 	t->tcpip_seg = htole32(seg);
   8745 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8746 
   8747 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8748 	txs->txs_ndesc++;
   8749 }
   8750 
   8751 static inline int
   8752 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8753 {
   8754 	struct wm_softc *sc = ifp->if_softc;
   8755 	u_int cpuid = cpu_index(curcpu());
   8756 
   8757 	/*
   8758 	 * Currently, simple distribute strategy.
   8759 	 * TODO:
   8760 	 * distribute by flowid(RSS has value).
   8761 	 */
   8762 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8763 }
   8764 
   8765 static inline bool
   8766 wm_linkdown_discard(struct wm_txqueue *txq)
   8767 {
   8768 
   8769 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8770 		return true;
   8771 
   8772 	return false;
   8773 }
   8774 
   8775 /*
   8776  * wm_start:		[ifnet interface function]
   8777  *
   8778  *	Start packet transmission on the interface.
   8779  */
   8780 static void
   8781 wm_start(struct ifnet *ifp)
   8782 {
   8783 	struct wm_softc *sc = ifp->if_softc;
   8784 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8785 
   8786 	KASSERT(if_is_mpsafe(ifp));
   8787 	/*
   8788 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8789 	 */
   8790 
   8791 	mutex_enter(txq->txq_lock);
   8792 	if (!txq->txq_stopping)
   8793 		wm_start_locked(ifp);
   8794 	mutex_exit(txq->txq_lock);
   8795 }
   8796 
   8797 static void
   8798 wm_start_locked(struct ifnet *ifp)
   8799 {
   8800 	struct wm_softc *sc = ifp->if_softc;
   8801 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8802 
   8803 	wm_send_common_locked(ifp, txq, false);
   8804 }
   8805 
   8806 static int
   8807 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8808 {
   8809 	int qid;
   8810 	struct wm_softc *sc = ifp->if_softc;
   8811 	struct wm_txqueue *txq;
   8812 
   8813 	qid = wm_select_txqueue(ifp, m);
   8814 	txq = &sc->sc_queue[qid].wmq_txq;
   8815 
   8816 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8817 		m_freem(m);
   8818 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8819 		return ENOBUFS;
   8820 	}
   8821 
   8822 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8823 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8824 	if (m->m_flags & M_MCAST)
   8825 		if_statinc_ref(nsr, if_omcasts);
   8826 	IF_STAT_PUTREF(ifp);
   8827 
   8828 	if (mutex_tryenter(txq->txq_lock)) {
   8829 		if (!txq->txq_stopping)
   8830 			wm_transmit_locked(ifp, txq);
   8831 		mutex_exit(txq->txq_lock);
   8832 	}
   8833 
   8834 	return 0;
   8835 }
   8836 
   8837 static void
   8838 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8839 {
   8840 
   8841 	wm_send_common_locked(ifp, txq, true);
   8842 }
   8843 
   8844 static void
   8845 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8846     bool is_transmit)
   8847 {
   8848 	struct wm_softc *sc = ifp->if_softc;
   8849 	struct mbuf *m0;
   8850 	struct wm_txsoft *txs;
   8851 	bus_dmamap_t dmamap;
   8852 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8853 	bus_addr_t curaddr;
   8854 	bus_size_t seglen, curlen;
   8855 	uint32_t cksumcmd;
   8856 	uint8_t cksumfields;
   8857 	bool remap = true;
   8858 
   8859 	KASSERT(mutex_owned(txq->txq_lock));
   8860 	KASSERT(!txq->txq_stopping);
   8861 
   8862 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8863 		return;
   8864 
   8865 	if (__predict_false(wm_linkdown_discard(txq))) {
   8866 		do {
   8867 			if (is_transmit)
   8868 				m0 = pcq_get(txq->txq_interq);
   8869 			else
   8870 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8871 			/*
   8872 			 * increment successed packet counter as in the case
   8873 			 * which the packet is discarded by link down PHY.
   8874 			 */
   8875 			if (m0 != NULL) {
   8876 				if_statinc(ifp, if_opackets);
   8877 				m_freem(m0);
   8878 			}
   8879 		} while (m0 != NULL);
   8880 		return;
   8881 	}
   8882 
   8883 	/* Remember the previous number of free descriptors. */
   8884 	ofree = txq->txq_free;
   8885 
   8886 	/*
   8887 	 * Loop through the send queue, setting up transmit descriptors
   8888 	 * until we drain the queue, or use up all available transmit
   8889 	 * descriptors.
   8890 	 */
   8891 	for (;;) {
   8892 		m0 = NULL;
   8893 
   8894 		/* Get a work queue entry. */
   8895 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8896 			wm_txeof(txq, UINT_MAX);
   8897 			if (txq->txq_sfree == 0) {
   8898 				DPRINTF(sc, WM_DEBUG_TX,
   8899 				    ("%s: TX: no free job descriptors\n",
   8900 					device_xname(sc->sc_dev)));
   8901 				WM_Q_EVCNT_INCR(txq, txsstall);
   8902 				break;
   8903 			}
   8904 		}
   8905 
   8906 		/* Grab a packet off the queue. */
   8907 		if (is_transmit)
   8908 			m0 = pcq_get(txq->txq_interq);
   8909 		else
   8910 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8911 		if (m0 == NULL)
   8912 			break;
   8913 
   8914 		DPRINTF(sc, WM_DEBUG_TX,
   8915 		    ("%s: TX: have packet to transmit: %p\n",
   8916 			device_xname(sc->sc_dev), m0));
   8917 
   8918 		txs = &txq->txq_soft[txq->txq_snext];
   8919 		dmamap = txs->txs_dmamap;
   8920 
   8921 		use_tso = (m0->m_pkthdr.csum_flags &
   8922 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8923 
   8924 		/*
   8925 		 * So says the Linux driver:
   8926 		 * The controller does a simple calculation to make sure
   8927 		 * there is enough room in the FIFO before initiating the
   8928 		 * DMA for each buffer. The calc is:
   8929 		 *	4 = ceil(buffer len / MSS)
   8930 		 * To make sure we don't overrun the FIFO, adjust the max
   8931 		 * buffer len if the MSS drops.
   8932 		 */
   8933 		dmamap->dm_maxsegsz =
   8934 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8935 		    ? m0->m_pkthdr.segsz << 2
   8936 		    : WTX_MAX_LEN;
   8937 
   8938 		/*
   8939 		 * Load the DMA map.  If this fails, the packet either
   8940 		 * didn't fit in the allotted number of segments, or we
   8941 		 * were short on resources.  For the too-many-segments
   8942 		 * case, we simply report an error and drop the packet,
   8943 		 * since we can't sanely copy a jumbo packet to a single
   8944 		 * buffer.
   8945 		 */
   8946 retry:
   8947 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8948 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8949 		if (__predict_false(error)) {
   8950 			if (error == EFBIG) {
   8951 				if (remap == true) {
   8952 					struct mbuf *m;
   8953 
   8954 					remap = false;
   8955 					m = m_defrag(m0, M_NOWAIT);
   8956 					if (m != NULL) {
   8957 						WM_Q_EVCNT_INCR(txq, defrag);
   8958 						m0 = m;
   8959 						goto retry;
   8960 					}
   8961 				}
   8962 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8963 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8964 				    "DMA segments, dropping...\n",
   8965 				    device_xname(sc->sc_dev));
   8966 				wm_dump_mbuf_chain(sc, m0);
   8967 				m_freem(m0);
   8968 				continue;
   8969 			}
   8970 			/* Short on resources, just stop for now. */
   8971 			DPRINTF(sc, WM_DEBUG_TX,
   8972 			    ("%s: TX: dmamap load failed: %d\n",
   8973 				device_xname(sc->sc_dev), error));
   8974 			break;
   8975 		}
   8976 
   8977 		segs_needed = dmamap->dm_nsegs;
   8978 		if (use_tso) {
   8979 			/* For sentinel descriptor; see below. */
   8980 			segs_needed++;
   8981 		}
   8982 
   8983 		/*
   8984 		 * Ensure we have enough descriptors free to describe
   8985 		 * the packet. Note, we always reserve one descriptor
   8986 		 * at the end of the ring due to the semantics of the
   8987 		 * TDT register, plus one more in the event we need
   8988 		 * to load offload context.
   8989 		 */
   8990 		if (segs_needed > txq->txq_free - 2) {
   8991 			/*
   8992 			 * Not enough free descriptors to transmit this
   8993 			 * packet.  We haven't committed anything yet,
   8994 			 * so just unload the DMA map, put the packet
   8995 			 * pack on the queue, and punt. Notify the upper
   8996 			 * layer that there are no more slots left.
   8997 			 */
   8998 			DPRINTF(sc, WM_DEBUG_TX,
   8999 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9000 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9001 				segs_needed, txq->txq_free - 1));
   9002 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9003 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9004 			WM_Q_EVCNT_INCR(txq, txdstall);
   9005 			break;
   9006 		}
   9007 
   9008 		/*
   9009 		 * Check for 82547 Tx FIFO bug. We need to do this
   9010 		 * once we know we can transmit the packet, since we
   9011 		 * do some internal FIFO space accounting here.
   9012 		 */
   9013 		if (sc->sc_type == WM_T_82547 &&
   9014 		    wm_82547_txfifo_bugchk(sc, m0)) {
   9015 			DPRINTF(sc, WM_DEBUG_TX,
   9016 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   9017 				device_xname(sc->sc_dev)));
   9018 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9019 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9020 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   9021 			break;
   9022 		}
   9023 
   9024 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9025 
   9026 		DPRINTF(sc, WM_DEBUG_TX,
   9027 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9028 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9029 
   9030 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9031 
   9032 		/*
   9033 		 * Store a pointer to the packet so that we can free it
   9034 		 * later.
   9035 		 *
   9036 		 * Initially, we consider the number of descriptors the
   9037 		 * packet uses the number of DMA segments.  This may be
   9038 		 * incremented by 1 if we do checksum offload (a descriptor
   9039 		 * is used to set the checksum context).
   9040 		 */
   9041 		txs->txs_mbuf = m0;
   9042 		txs->txs_firstdesc = txq->txq_next;
   9043 		txs->txs_ndesc = segs_needed;
   9044 
   9045 		/* Set up offload parameters for this packet. */
   9046 		if (m0->m_pkthdr.csum_flags &
   9047 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9048 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9049 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9050 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   9051 		} else {
   9052 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   9053 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   9054 			cksumcmd = 0;
   9055 			cksumfields = 0;
   9056 		}
   9057 
   9058 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   9059 
   9060 		/* Sync the DMA map. */
   9061 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9062 		    BUS_DMASYNC_PREWRITE);
   9063 
   9064 		/* Initialize the transmit descriptor. */
   9065 		for (nexttx = txq->txq_next, seg = 0;
   9066 		     seg < dmamap->dm_nsegs; seg++) {
   9067 			for (seglen = dmamap->dm_segs[seg].ds_len,
   9068 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   9069 			     seglen != 0;
   9070 			     curaddr += curlen, seglen -= curlen,
   9071 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   9072 				curlen = seglen;
   9073 
   9074 				/*
   9075 				 * So says the Linux driver:
   9076 				 * Work around for premature descriptor
   9077 				 * write-backs in TSO mode.  Append a
   9078 				 * 4-byte sentinel descriptor.
   9079 				 */
   9080 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   9081 				    curlen > 8)
   9082 					curlen -= 4;
   9083 
   9084 				wm_set_dma_addr(
   9085 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   9086 				txq->txq_descs[nexttx].wtx_cmdlen
   9087 				    = htole32(cksumcmd | curlen);
   9088 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   9089 				    = 0;
   9090 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   9091 				    = cksumfields;
   9092 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9093 				lasttx = nexttx;
   9094 
   9095 				DPRINTF(sc, WM_DEBUG_TX,
   9096 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   9097 					"len %#04zx\n",
   9098 					device_xname(sc->sc_dev), nexttx,
   9099 					(uint64_t)curaddr, curlen));
   9100 			}
   9101 		}
   9102 
   9103 		KASSERT(lasttx != -1);
   9104 
   9105 		/*
   9106 		 * Set up the command byte on the last descriptor of
   9107 		 * the packet. If we're in the interrupt delay window,
   9108 		 * delay the interrupt.
   9109 		 */
   9110 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9111 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9112 
   9113 		/*
   9114 		 * If VLANs are enabled and the packet has a VLAN tag, set
   9115 		 * up the descriptor to encapsulate the packet for us.
   9116 		 *
   9117 		 * This is only valid on the last descriptor of the packet.
   9118 		 */
   9119 		if (vlan_has_tag(m0)) {
   9120 			txq->txq_descs[lasttx].wtx_cmdlen |=
   9121 			    htole32(WTX_CMD_VLE);
   9122 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   9123 			    = htole16(vlan_get_tag(m0));
   9124 		}
   9125 
   9126 		txs->txs_lastdesc = lasttx;
   9127 
   9128 		DPRINTF(sc, WM_DEBUG_TX,
   9129 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9130 			device_xname(sc->sc_dev),
   9131 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9132 
   9133 		/* Sync the descriptors we're using. */
   9134 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9135 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9136 
   9137 		/* Give the packet to the chip. */
   9138 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9139 
   9140 		DPRINTF(sc, WM_DEBUG_TX,
   9141 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9142 
   9143 		DPRINTF(sc, WM_DEBUG_TX,
   9144 		    ("%s: TX: finished transmitting packet, job %d\n",
   9145 			device_xname(sc->sc_dev), txq->txq_snext));
   9146 
   9147 		/* Advance the tx pointer. */
   9148 		txq->txq_free -= txs->txs_ndesc;
   9149 		txq->txq_next = nexttx;
   9150 
   9151 		txq->txq_sfree--;
   9152 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9153 
   9154 		/* Pass the packet to any BPF listeners. */
   9155 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9156 	}
   9157 
   9158 	if (m0 != NULL) {
   9159 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9160 		WM_Q_EVCNT_INCR(txq, descdrop);
   9161 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9162 			__func__));
   9163 		m_freem(m0);
   9164 	}
   9165 
   9166 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9167 		/* No more slots; notify upper layer. */
   9168 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9169 	}
   9170 
   9171 	if (txq->txq_free != ofree) {
   9172 		/* Set a watchdog timer in case the chip flakes out. */
   9173 		txq->txq_lastsent = time_uptime;
   9174 		txq->txq_sending = true;
   9175 	}
   9176 }
   9177 
   9178 /*
   9179  * wm_nq_tx_offload:
   9180  *
   9181  *	Set up TCP/IP checksumming parameters for the
   9182  *	specified packet, for NEWQUEUE devices
   9183  */
   9184 static void
   9185 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   9186     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   9187 {
   9188 	struct mbuf *m0 = txs->txs_mbuf;
   9189 	uint32_t vl_len, mssidx, cmdc;
   9190 	struct ether_header *eh;
   9191 	int offset, iphl;
   9192 
   9193 	/*
   9194 	 * XXX It would be nice if the mbuf pkthdr had offset
   9195 	 * fields for the protocol headers.
   9196 	 */
   9197 	*cmdlenp = 0;
   9198 	*fieldsp = 0;
   9199 
   9200 	eh = mtod(m0, struct ether_header *);
   9201 	switch (htons(eh->ether_type)) {
   9202 	case ETHERTYPE_IP:
   9203 	case ETHERTYPE_IPV6:
   9204 		offset = ETHER_HDR_LEN;
   9205 		break;
   9206 
   9207 	case ETHERTYPE_VLAN:
   9208 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   9209 		break;
   9210 
   9211 	default:
   9212 		/* Don't support this protocol or encapsulation. */
   9213 		*do_csum = false;
   9214 		return;
   9215 	}
   9216 	*do_csum = true;
   9217 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   9218 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   9219 
   9220 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   9221 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   9222 
   9223 	if ((m0->m_pkthdr.csum_flags &
   9224 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   9225 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   9226 	} else {
   9227 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   9228 	}
   9229 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   9230 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   9231 
   9232 	if (vlan_has_tag(m0)) {
   9233 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   9234 		    << NQTXC_VLLEN_VLAN_SHIFT);
   9235 		*cmdlenp |= NQTX_CMD_VLE;
   9236 	}
   9237 
   9238 	mssidx = 0;
   9239 
   9240 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   9241 		int hlen = offset + iphl;
   9242 		int tcp_hlen;
   9243 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   9244 
   9245 		if (__predict_false(m0->m_len <
   9246 				    (hlen + sizeof(struct tcphdr)))) {
   9247 			/*
   9248 			 * TCP/IP headers are not in the first mbuf; we need
   9249 			 * to do this the slow and painful way. Let's just
   9250 			 * hope this doesn't happen very often.
   9251 			 */
   9252 			struct tcphdr th;
   9253 
   9254 			WM_Q_EVCNT_INCR(txq, tsopain);
   9255 
   9256 			m_copydata(m0, hlen, sizeof(th), &th);
   9257 			if (v4) {
   9258 				struct ip ip;
   9259 
   9260 				m_copydata(m0, offset, sizeof(ip), &ip);
   9261 				ip.ip_len = 0;
   9262 				m_copyback(m0,
   9263 				    offset + offsetof(struct ip, ip_len),
   9264 				    sizeof(ip.ip_len), &ip.ip_len);
   9265 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   9266 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   9267 			} else {
   9268 				struct ip6_hdr ip6;
   9269 
   9270 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   9271 				ip6.ip6_plen = 0;
   9272 				m_copyback(m0,
   9273 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   9274 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   9275 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   9276 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   9277 			}
   9278 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   9279 			    sizeof(th.th_sum), &th.th_sum);
   9280 
   9281 			tcp_hlen = th.th_off << 2;
   9282 		} else {
   9283 			/*
   9284 			 * TCP/IP headers are in the first mbuf; we can do
   9285 			 * this the easy way.
   9286 			 */
   9287 			struct tcphdr *th;
   9288 
   9289 			if (v4) {
   9290 				struct ip *ip =
   9291 				    (void *)(mtod(m0, char *) + offset);
   9292 				th = (void *)(mtod(m0, char *) + hlen);
   9293 
   9294 				ip->ip_len = 0;
   9295 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   9296 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   9297 			} else {
   9298 				struct ip6_hdr *ip6 =
   9299 				    (void *)(mtod(m0, char *) + offset);
   9300 				th = (void *)(mtod(m0, char *) + hlen);
   9301 
   9302 				ip6->ip6_plen = 0;
   9303 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   9304 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   9305 			}
   9306 			tcp_hlen = th->th_off << 2;
   9307 		}
   9308 		hlen += tcp_hlen;
   9309 		*cmdlenp |= NQTX_CMD_TSE;
   9310 
   9311 		if (v4) {
   9312 			WM_Q_EVCNT_INCR(txq, tso);
   9313 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   9314 		} else {
   9315 			WM_Q_EVCNT_INCR(txq, tso6);
   9316 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   9317 		}
   9318 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   9319 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9320 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   9321 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   9322 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   9323 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   9324 	} else {
   9325 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   9326 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9327 	}
   9328 
   9329 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   9330 		*fieldsp |= NQTXD_FIELDS_IXSM;
   9331 		cmdc |= NQTXC_CMD_IP4;
   9332 	}
   9333 
   9334 	if (m0->m_pkthdr.csum_flags &
   9335 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   9336 		WM_Q_EVCNT_INCR(txq, tusum);
   9337 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   9338 			cmdc |= NQTXC_CMD_TCP;
   9339 		else
   9340 			cmdc |= NQTXC_CMD_UDP;
   9341 
   9342 		cmdc |= NQTXC_CMD_IP4;
   9343 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9344 	}
   9345 	if (m0->m_pkthdr.csum_flags &
   9346 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   9347 		WM_Q_EVCNT_INCR(txq, tusum6);
   9348 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   9349 			cmdc |= NQTXC_CMD_TCP;
   9350 		else
   9351 			cmdc |= NQTXC_CMD_UDP;
   9352 
   9353 		cmdc |= NQTXC_CMD_IP6;
   9354 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9355 	}
   9356 
   9357 	/*
   9358 	 * We don't have to write context descriptor for every packet to
   9359 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   9360 	 * I210 and I211. It is enough to write once per a Tx queue for these
   9361 	 * controllers.
   9362 	 * It would be overhead to write context descriptor for every packet,
   9363 	 * however it does not cause problems.
   9364 	 */
   9365 	/* Fill in the context descriptor. */
   9366 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
   9367 	    htole32(vl_len);
   9368 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
   9369 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
   9370 	    htole32(cmdc);
   9371 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
   9372 	    htole32(mssidx);
   9373 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   9374 	DPRINTF(sc, WM_DEBUG_TX,
   9375 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   9376 		txq->txq_next, 0, vl_len));
   9377 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   9378 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   9379 	txs->txs_ndesc++;
   9380 }
   9381 
   9382 /*
   9383  * wm_nq_start:		[ifnet interface function]
   9384  *
   9385  *	Start packet transmission on the interface for NEWQUEUE devices
   9386  */
   9387 static void
   9388 wm_nq_start(struct ifnet *ifp)
   9389 {
   9390 	struct wm_softc *sc = ifp->if_softc;
   9391 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9392 
   9393 	KASSERT(if_is_mpsafe(ifp));
   9394 	/*
   9395 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   9396 	 */
   9397 
   9398 	mutex_enter(txq->txq_lock);
   9399 	if (!txq->txq_stopping)
   9400 		wm_nq_start_locked(ifp);
   9401 	mutex_exit(txq->txq_lock);
   9402 }
   9403 
   9404 static void
   9405 wm_nq_start_locked(struct ifnet *ifp)
   9406 {
   9407 	struct wm_softc *sc = ifp->if_softc;
   9408 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9409 
   9410 	wm_nq_send_common_locked(ifp, txq, false);
   9411 }
   9412 
   9413 static int
   9414 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   9415 {
   9416 	int qid;
   9417 	struct wm_softc *sc = ifp->if_softc;
   9418 	struct wm_txqueue *txq;
   9419 
   9420 	qid = wm_select_txqueue(ifp, m);
   9421 	txq = &sc->sc_queue[qid].wmq_txq;
   9422 
   9423 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   9424 		m_freem(m);
   9425 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   9426 		return ENOBUFS;
   9427 	}
   9428 
   9429 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   9430 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9431 	if (m->m_flags & M_MCAST)
   9432 		if_statinc_ref(nsr, if_omcasts);
   9433 	IF_STAT_PUTREF(ifp);
   9434 
   9435 	/*
   9436 	 * The situations which this mutex_tryenter() fails at running time
   9437 	 * are below two patterns.
   9438 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9439 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9440 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9441 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9442 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9443 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9444 	 * stuck, either.
   9445 	 */
   9446 	if (mutex_tryenter(txq->txq_lock)) {
   9447 		if (!txq->txq_stopping)
   9448 			wm_nq_transmit_locked(ifp, txq);
   9449 		mutex_exit(txq->txq_lock);
   9450 	}
   9451 
   9452 	return 0;
   9453 }
   9454 
   9455 static void
   9456 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9457 {
   9458 
   9459 	wm_nq_send_common_locked(ifp, txq, true);
   9460 }
   9461 
   9462 static void
   9463 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9464     bool is_transmit)
   9465 {
   9466 	struct wm_softc *sc = ifp->if_softc;
   9467 	struct mbuf *m0;
   9468 	struct wm_txsoft *txs;
   9469 	bus_dmamap_t dmamap;
   9470 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9471 	bool do_csum, sent;
   9472 	bool remap = true;
   9473 
   9474 	KASSERT(mutex_owned(txq->txq_lock));
   9475 	KASSERT(!txq->txq_stopping);
   9476 
   9477 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9478 		return;
   9479 
   9480 	if (__predict_false(wm_linkdown_discard(txq))) {
   9481 		do {
   9482 			if (is_transmit)
   9483 				m0 = pcq_get(txq->txq_interq);
   9484 			else
   9485 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9486 			/*
   9487 			 * increment successed packet counter as in the case
   9488 			 * which the packet is discarded by link down PHY.
   9489 			 */
   9490 			if (m0 != NULL) {
   9491 				if_statinc(ifp, if_opackets);
   9492 				m_freem(m0);
   9493 			}
   9494 		} while (m0 != NULL);
   9495 		return;
   9496 	}
   9497 
   9498 	sent = false;
   9499 
   9500 	/*
   9501 	 * Loop through the send queue, setting up transmit descriptors
   9502 	 * until we drain the queue, or use up all available transmit
   9503 	 * descriptors.
   9504 	 */
   9505 	for (;;) {
   9506 		m0 = NULL;
   9507 
   9508 		/* Get a work queue entry. */
   9509 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9510 			wm_txeof(txq, UINT_MAX);
   9511 			if (txq->txq_sfree == 0) {
   9512 				DPRINTF(sc, WM_DEBUG_TX,
   9513 				    ("%s: TX: no free job descriptors\n",
   9514 					device_xname(sc->sc_dev)));
   9515 				WM_Q_EVCNT_INCR(txq, txsstall);
   9516 				break;
   9517 			}
   9518 		}
   9519 
   9520 		/* Grab a packet off the queue. */
   9521 		if (is_transmit)
   9522 			m0 = pcq_get(txq->txq_interq);
   9523 		else
   9524 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9525 		if (m0 == NULL)
   9526 			break;
   9527 
   9528 		DPRINTF(sc, WM_DEBUG_TX,
   9529 		    ("%s: TX: have packet to transmit: %p\n",
   9530 			device_xname(sc->sc_dev), m0));
   9531 
   9532 		txs = &txq->txq_soft[txq->txq_snext];
   9533 		dmamap = txs->txs_dmamap;
   9534 
   9535 		/*
   9536 		 * Load the DMA map.  If this fails, the packet either
   9537 		 * didn't fit in the allotted number of segments, or we
   9538 		 * were short on resources.  For the too-many-segments
   9539 		 * case, we simply report an error and drop the packet,
   9540 		 * since we can't sanely copy a jumbo packet to a single
   9541 		 * buffer.
   9542 		 */
   9543 retry:
   9544 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9545 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9546 		if (__predict_false(error)) {
   9547 			if (error == EFBIG) {
   9548 				if (remap == true) {
   9549 					struct mbuf *m;
   9550 
   9551 					remap = false;
   9552 					m = m_defrag(m0, M_NOWAIT);
   9553 					if (m != NULL) {
   9554 						WM_Q_EVCNT_INCR(txq, defrag);
   9555 						m0 = m;
   9556 						goto retry;
   9557 					}
   9558 				}
   9559 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9560 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9561 				    "DMA segments, dropping...\n",
   9562 				    device_xname(sc->sc_dev));
   9563 				wm_dump_mbuf_chain(sc, m0);
   9564 				m_freem(m0);
   9565 				continue;
   9566 			}
   9567 			/* Short on resources, just stop for now. */
   9568 			DPRINTF(sc, WM_DEBUG_TX,
   9569 			    ("%s: TX: dmamap load failed: %d\n",
   9570 				device_xname(sc->sc_dev), error));
   9571 			break;
   9572 		}
   9573 
   9574 		segs_needed = dmamap->dm_nsegs;
   9575 
   9576 		/*
   9577 		 * Ensure we have enough descriptors free to describe
   9578 		 * the packet. Note, we always reserve one descriptor
   9579 		 * at the end of the ring due to the semantics of the
   9580 		 * TDT register, plus one more in the event we need
   9581 		 * to load offload context.
   9582 		 */
   9583 		if (segs_needed > txq->txq_free - 2) {
   9584 			/*
   9585 			 * Not enough free descriptors to transmit this
   9586 			 * packet.  We haven't committed anything yet,
   9587 			 * so just unload the DMA map, put the packet
   9588 			 * pack on the queue, and punt. Notify the upper
   9589 			 * layer that there are no more slots left.
   9590 			 */
   9591 			DPRINTF(sc, WM_DEBUG_TX,
   9592 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9593 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9594 				segs_needed, txq->txq_free - 1));
   9595 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9596 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9597 			WM_Q_EVCNT_INCR(txq, txdstall);
   9598 			break;
   9599 		}
   9600 
   9601 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9602 
   9603 		DPRINTF(sc, WM_DEBUG_TX,
   9604 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9605 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9606 
   9607 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9608 
   9609 		/*
   9610 		 * Store a pointer to the packet so that we can free it
   9611 		 * later.
   9612 		 *
   9613 		 * Initially, we consider the number of descriptors the
   9614 		 * packet uses the number of DMA segments.  This may be
   9615 		 * incremented by 1 if we do checksum offload (a descriptor
   9616 		 * is used to set the checksum context).
   9617 		 */
   9618 		txs->txs_mbuf = m0;
   9619 		txs->txs_firstdesc = txq->txq_next;
   9620 		txs->txs_ndesc = segs_needed;
   9621 
   9622 		/* Set up offload parameters for this packet. */
   9623 		uint32_t cmdlen, fields, dcmdlen;
   9624 		if (m0->m_pkthdr.csum_flags &
   9625 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9626 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9627 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9628 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9629 			    &do_csum);
   9630 		} else {
   9631 			do_csum = false;
   9632 			cmdlen = 0;
   9633 			fields = 0;
   9634 		}
   9635 
   9636 		/* Sync the DMA map. */
   9637 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9638 		    BUS_DMASYNC_PREWRITE);
   9639 
   9640 		/* Initialize the first transmit descriptor. */
   9641 		nexttx = txq->txq_next;
   9642 		if (!do_csum) {
   9643 			/* Set up a legacy descriptor */
   9644 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9645 			    dmamap->dm_segs[0].ds_addr);
   9646 			txq->txq_descs[nexttx].wtx_cmdlen =
   9647 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9648 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9649 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9650 			if (vlan_has_tag(m0)) {
   9651 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9652 				    htole32(WTX_CMD_VLE);
   9653 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9654 				    htole16(vlan_get_tag(m0));
   9655 			} else
   9656 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9657 
   9658 			dcmdlen = 0;
   9659 		} else {
   9660 			/* Set up an advanced data descriptor */
   9661 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9662 			    htole64(dmamap->dm_segs[0].ds_addr);
   9663 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9664 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9665 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9666 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9667 			    htole32(fields);
   9668 			DPRINTF(sc, WM_DEBUG_TX,
   9669 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9670 				device_xname(sc->sc_dev), nexttx,
   9671 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9672 			DPRINTF(sc, WM_DEBUG_TX,
   9673 			    ("\t 0x%08x%08x\n", fields,
   9674 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9675 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9676 		}
   9677 
   9678 		lasttx = nexttx;
   9679 		nexttx = WM_NEXTTX(txq, nexttx);
   9680 		/*
   9681 		 * Fill in the next descriptors. Legacy or advanced format
   9682 		 * is the same here.
   9683 		 */
   9684 		for (seg = 1; seg < dmamap->dm_nsegs;
   9685 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9686 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9687 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9688 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9689 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9690 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9691 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9692 			lasttx = nexttx;
   9693 
   9694 			DPRINTF(sc, WM_DEBUG_TX,
   9695 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9696 				device_xname(sc->sc_dev), nexttx,
   9697 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9698 				dmamap->dm_segs[seg].ds_len));
   9699 		}
   9700 
   9701 		KASSERT(lasttx != -1);
   9702 
   9703 		/*
   9704 		 * Set up the command byte on the last descriptor of
   9705 		 * the packet. If we're in the interrupt delay window,
   9706 		 * delay the interrupt.
   9707 		 */
   9708 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9709 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9710 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9711 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9712 
   9713 		txs->txs_lastdesc = lasttx;
   9714 
   9715 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9716 		    device_xname(sc->sc_dev),
   9717 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9718 
   9719 		/* Sync the descriptors we're using. */
   9720 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9721 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9722 
   9723 		/* Give the packet to the chip. */
   9724 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9725 		sent = true;
   9726 
   9727 		DPRINTF(sc, WM_DEBUG_TX,
   9728 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9729 
   9730 		DPRINTF(sc, WM_DEBUG_TX,
   9731 		    ("%s: TX: finished transmitting packet, job %d\n",
   9732 			device_xname(sc->sc_dev), txq->txq_snext));
   9733 
   9734 		/* Advance the tx pointer. */
   9735 		txq->txq_free -= txs->txs_ndesc;
   9736 		txq->txq_next = nexttx;
   9737 
   9738 		txq->txq_sfree--;
   9739 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9740 
   9741 		/* Pass the packet to any BPF listeners. */
   9742 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9743 	}
   9744 
   9745 	if (m0 != NULL) {
   9746 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9747 		WM_Q_EVCNT_INCR(txq, descdrop);
   9748 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9749 			__func__));
   9750 		m_freem(m0);
   9751 	}
   9752 
   9753 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9754 		/* No more slots; notify upper layer. */
   9755 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9756 	}
   9757 
   9758 	if (sent) {
   9759 		/* Set a watchdog timer in case the chip flakes out. */
   9760 		txq->txq_lastsent = time_uptime;
   9761 		txq->txq_sending = true;
   9762 	}
   9763 }
   9764 
   9765 static void
   9766 wm_deferred_start_locked(struct wm_txqueue *txq)
   9767 {
   9768 	struct wm_softc *sc = txq->txq_sc;
   9769 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9770 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9771 	int qid = wmq->wmq_id;
   9772 
   9773 	KASSERT(mutex_owned(txq->txq_lock));
   9774 	KASSERT(!txq->txq_stopping);
   9775 
   9776 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9777 		/* XXX need for ALTQ or one CPU system */
   9778 		if (qid == 0)
   9779 			wm_nq_start_locked(ifp);
   9780 		wm_nq_transmit_locked(ifp, txq);
   9781 	} else {
   9782 		/* XXX need for ALTQ or one CPU system */
   9783 		if (qid == 0)
   9784 			wm_start_locked(ifp);
   9785 		wm_transmit_locked(ifp, txq);
   9786 	}
   9787 }
   9788 
   9789 /* Interrupt */
   9790 
   9791 /*
   9792  * wm_txeof:
   9793  *
   9794  *	Helper; handle transmit interrupts.
   9795  */
   9796 static bool
   9797 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9798 {
   9799 	struct wm_softc *sc = txq->txq_sc;
   9800 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9801 	struct wm_txsoft *txs;
   9802 	int count = 0;
   9803 	int i;
   9804 	uint8_t status;
   9805 	bool more = false;
   9806 
   9807 	KASSERT(mutex_owned(txq->txq_lock));
   9808 
   9809 	if (txq->txq_stopping)
   9810 		return false;
   9811 
   9812 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9813 
   9814 	/*
   9815 	 * Go through the Tx list and free mbufs for those
   9816 	 * frames which have been transmitted.
   9817 	 */
   9818 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9819 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9820 		txs = &txq->txq_soft[i];
   9821 
   9822 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9823 			device_xname(sc->sc_dev), i));
   9824 
   9825 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9826 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9827 
   9828 		status =
   9829 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9830 		if ((status & WTX_ST_DD) == 0) {
   9831 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9832 			    BUS_DMASYNC_PREREAD);
   9833 			break;
   9834 		}
   9835 
   9836 		if (limit-- == 0) {
   9837 			more = true;
   9838 			DPRINTF(sc, WM_DEBUG_TX,
   9839 			    ("%s: TX: loop limited, job %d is not processed\n",
   9840 				device_xname(sc->sc_dev), i));
   9841 			break;
   9842 		}
   9843 
   9844 		count++;
   9845 		DPRINTF(sc, WM_DEBUG_TX,
   9846 		    ("%s: TX: job %d done: descs %d..%d\n",
   9847 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9848 		    txs->txs_lastdesc));
   9849 
   9850 #ifdef WM_EVENT_COUNTERS
   9851 		if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
   9852 			WM_Q_EVCNT_INCR(txq, underrun);
   9853 #endif /* WM_EVENT_COUNTERS */
   9854 
   9855 		/*
   9856 		 * 82574 and newer's document says the status field has neither
   9857 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9858 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9859 		 * Developer's Manual", 82574 datasheet and newer.
   9860 		 *
   9861 		 * XXX I saw the LC bit was set on I218 even though the media
   9862 		 * was full duplex, so the bit might be used for other
   9863 		 * meaning ...(I have no document).
   9864 		 */
   9865 
   9866 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9867 		    && ((sc->sc_type < WM_T_82574)
   9868 			|| (sc->sc_type == WM_T_80003))) {
   9869 			if_statinc(ifp, if_oerrors);
   9870 			if (status & WTX_ST_LC)
   9871 				log(LOG_WARNING, "%s: late collision\n",
   9872 				    device_xname(sc->sc_dev));
   9873 			else if (status & WTX_ST_EC) {
   9874 				if_statadd(ifp, if_collisions,
   9875 				    TX_COLLISION_THRESHOLD + 1);
   9876 				log(LOG_WARNING, "%s: excessive collisions\n",
   9877 				    device_xname(sc->sc_dev));
   9878 			}
   9879 		} else
   9880 			if_statinc(ifp, if_opackets);
   9881 
   9882 		txq->txq_packets++;
   9883 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9884 
   9885 		txq->txq_free += txs->txs_ndesc;
   9886 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9887 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9888 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9889 		m_freem(txs->txs_mbuf);
   9890 		txs->txs_mbuf = NULL;
   9891 	}
   9892 
   9893 	/* Update the dirty transmit buffer pointer. */
   9894 	txq->txq_sdirty = i;
   9895 	DPRINTF(sc, WM_DEBUG_TX,
   9896 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9897 
   9898 	if (count != 0)
   9899 		rnd_add_uint32(&sc->rnd_source, count);
   9900 
   9901 	/*
   9902 	 * If there are no more pending transmissions, cancel the watchdog
   9903 	 * timer.
   9904 	 */
   9905 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9906 		txq->txq_sending = false;
   9907 
   9908 	return more;
   9909 }
   9910 
   9911 static inline uint32_t
   9912 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9913 {
   9914 	struct wm_softc *sc = rxq->rxq_sc;
   9915 
   9916 	if (sc->sc_type == WM_T_82574)
   9917 		return EXTRXC_STATUS(
   9918 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9919 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9920 		return NQRXC_STATUS(
   9921 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9922 	else
   9923 		return rxq->rxq_descs[idx].wrx_status;
   9924 }
   9925 
   9926 static inline uint32_t
   9927 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9928 {
   9929 	struct wm_softc *sc = rxq->rxq_sc;
   9930 
   9931 	if (sc->sc_type == WM_T_82574)
   9932 		return EXTRXC_ERROR(
   9933 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9934 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9935 		return NQRXC_ERROR(
   9936 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9937 	else
   9938 		return rxq->rxq_descs[idx].wrx_errors;
   9939 }
   9940 
   9941 static inline uint16_t
   9942 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9943 {
   9944 	struct wm_softc *sc = rxq->rxq_sc;
   9945 
   9946 	if (sc->sc_type == WM_T_82574)
   9947 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9948 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9949 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9950 	else
   9951 		return rxq->rxq_descs[idx].wrx_special;
   9952 }
   9953 
   9954 static inline int
   9955 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9956 {
   9957 	struct wm_softc *sc = rxq->rxq_sc;
   9958 
   9959 	if (sc->sc_type == WM_T_82574)
   9960 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9961 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9962 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9963 	else
   9964 		return rxq->rxq_descs[idx].wrx_len;
   9965 }
   9966 
   9967 #ifdef WM_DEBUG
   9968 static inline uint32_t
   9969 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9970 {
   9971 	struct wm_softc *sc = rxq->rxq_sc;
   9972 
   9973 	if (sc->sc_type == WM_T_82574)
   9974 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9975 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9976 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9977 	else
   9978 		return 0;
   9979 }
   9980 
   9981 static inline uint8_t
   9982 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9983 {
   9984 	struct wm_softc *sc = rxq->rxq_sc;
   9985 
   9986 	if (sc->sc_type == WM_T_82574)
   9987 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9988 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9989 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9990 	else
   9991 		return 0;
   9992 }
   9993 #endif /* WM_DEBUG */
   9994 
   9995 static inline bool
   9996 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9997     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9998 {
   9999 
   10000 	if (sc->sc_type == WM_T_82574)
   10001 		return (status & ext_bit) != 0;
   10002 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10003 		return (status & nq_bit) != 0;
   10004 	else
   10005 		return (status & legacy_bit) != 0;
   10006 }
   10007 
   10008 static inline bool
   10009 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   10010     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   10011 {
   10012 
   10013 	if (sc->sc_type == WM_T_82574)
   10014 		return (error & ext_bit) != 0;
   10015 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10016 		return (error & nq_bit) != 0;
   10017 	else
   10018 		return (error & legacy_bit) != 0;
   10019 }
   10020 
   10021 static inline bool
   10022 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   10023 {
   10024 
   10025 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10026 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   10027 		return true;
   10028 	else
   10029 		return false;
   10030 }
   10031 
   10032 static inline bool
   10033 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   10034 {
   10035 	struct wm_softc *sc = rxq->rxq_sc;
   10036 
   10037 	/* XXX missing error bit for newqueue? */
   10038 	if (wm_rxdesc_is_set_error(sc, errors,
   10039 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   10040 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   10041 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   10042 		NQRXC_ERROR_RXE)) {
   10043 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   10044 		    EXTRXC_ERROR_SE, 0))
   10045 			log(LOG_WARNING, "%s: symbol error\n",
   10046 			    device_xname(sc->sc_dev));
   10047 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   10048 		    EXTRXC_ERROR_SEQ, 0))
   10049 			log(LOG_WARNING, "%s: receive sequence error\n",
   10050 			    device_xname(sc->sc_dev));
   10051 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   10052 		    EXTRXC_ERROR_CE, 0))
   10053 			log(LOG_WARNING, "%s: CRC error\n",
   10054 			    device_xname(sc->sc_dev));
   10055 		return true;
   10056 	}
   10057 
   10058 	return false;
   10059 }
   10060 
   10061 static inline bool
   10062 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   10063 {
   10064 	struct wm_softc *sc = rxq->rxq_sc;
   10065 
   10066 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   10067 		NQRXC_STATUS_DD)) {
   10068 		/* We have processed all of the receive descriptors. */
   10069 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   10070 		return false;
   10071 	}
   10072 
   10073 	return true;
   10074 }
   10075 
   10076 static inline bool
   10077 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   10078     uint16_t vlantag, struct mbuf *m)
   10079 {
   10080 
   10081 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10082 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   10083 		vlan_set_tag(m, le16toh(vlantag));
   10084 	}
   10085 
   10086 	return true;
   10087 }
   10088 
   10089 static inline void
   10090 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   10091     uint32_t errors, struct mbuf *m)
   10092 {
   10093 	struct wm_softc *sc = rxq->rxq_sc;
   10094 
   10095 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   10096 		if (wm_rxdesc_is_set_status(sc, status,
   10097 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   10098 			WM_Q_EVCNT_INCR(rxq, ipsum);
   10099 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   10100 			if (wm_rxdesc_is_set_error(sc, errors,
   10101 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   10102 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   10103 		}
   10104 		if (wm_rxdesc_is_set_status(sc, status,
   10105 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   10106 			/*
   10107 			 * Note: we don't know if this was TCP or UDP,
   10108 			 * so we just set both bits, and expect the
   10109 			 * upper layers to deal.
   10110 			 */
   10111 			WM_Q_EVCNT_INCR(rxq, tusum);
   10112 			m->m_pkthdr.csum_flags |=
   10113 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   10114 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   10115 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   10116 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   10117 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   10118 		}
   10119 	}
   10120 }
   10121 
   10122 /*
   10123  * wm_rxeof:
   10124  *
   10125  *	Helper; handle receive interrupts.
   10126  */
   10127 static bool
   10128 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   10129 {
   10130 	struct wm_softc *sc = rxq->rxq_sc;
   10131 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10132 	struct wm_rxsoft *rxs;
   10133 	struct mbuf *m;
   10134 	int i, len;
   10135 	int count = 0;
   10136 	uint32_t status, errors;
   10137 	uint16_t vlantag;
   10138 	bool more = false;
   10139 
   10140 	KASSERT(mutex_owned(rxq->rxq_lock));
   10141 
   10142 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   10143 		rxs = &rxq->rxq_soft[i];
   10144 
   10145 		DPRINTF(sc, WM_DEBUG_RX,
   10146 		    ("%s: RX: checking descriptor %d\n",
   10147 			device_xname(sc->sc_dev), i));
   10148 		wm_cdrxsync(rxq, i,
   10149 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   10150 
   10151 		status = wm_rxdesc_get_status(rxq, i);
   10152 		errors = wm_rxdesc_get_errors(rxq, i);
   10153 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   10154 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   10155 #ifdef WM_DEBUG
   10156 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   10157 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   10158 #endif
   10159 
   10160 		if (!wm_rxdesc_dd(rxq, i, status))
   10161 			break;
   10162 
   10163 		if (limit-- == 0) {
   10164 			more = true;
   10165 			DPRINTF(sc, WM_DEBUG_RX,
   10166 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   10167 				device_xname(sc->sc_dev), i));
   10168 			break;
   10169 		}
   10170 
   10171 		count++;
   10172 		if (__predict_false(rxq->rxq_discard)) {
   10173 			DPRINTF(sc, WM_DEBUG_RX,
   10174 			    ("%s: RX: discarding contents of descriptor %d\n",
   10175 				device_xname(sc->sc_dev), i));
   10176 			wm_init_rxdesc(rxq, i);
   10177 			if (wm_rxdesc_is_eop(rxq, status)) {
   10178 				/* Reset our state. */
   10179 				DPRINTF(sc, WM_DEBUG_RX,
   10180 				    ("%s: RX: resetting rxdiscard -> 0\n",
   10181 					device_xname(sc->sc_dev)));
   10182 				rxq->rxq_discard = 0;
   10183 			}
   10184 			continue;
   10185 		}
   10186 
   10187 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10188 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   10189 
   10190 		m = rxs->rxs_mbuf;
   10191 
   10192 		/*
   10193 		 * Add a new receive buffer to the ring, unless of
   10194 		 * course the length is zero. Treat the latter as a
   10195 		 * failed mapping.
   10196 		 */
   10197 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   10198 			/*
   10199 			 * Failed, throw away what we've done so
   10200 			 * far, and discard the rest of the packet.
   10201 			 */
   10202 			if_statinc(ifp, if_ierrors);
   10203 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10204 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   10205 			wm_init_rxdesc(rxq, i);
   10206 			if (!wm_rxdesc_is_eop(rxq, status))
   10207 				rxq->rxq_discard = 1;
   10208 			if (rxq->rxq_head != NULL)
   10209 				m_freem(rxq->rxq_head);
   10210 			WM_RXCHAIN_RESET(rxq);
   10211 			DPRINTF(sc, WM_DEBUG_RX,
   10212 			    ("%s: RX: Rx buffer allocation failed, "
   10213 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   10214 				rxq->rxq_discard ? " (discard)" : ""));
   10215 			continue;
   10216 		}
   10217 
   10218 		m->m_len = len;
   10219 		rxq->rxq_len += len;
   10220 		DPRINTF(sc, WM_DEBUG_RX,
   10221 		    ("%s: RX: buffer at %p len %d\n",
   10222 			device_xname(sc->sc_dev), m->m_data, len));
   10223 
   10224 		/* If this is not the end of the packet, keep looking. */
   10225 		if (!wm_rxdesc_is_eop(rxq, status)) {
   10226 			WM_RXCHAIN_LINK(rxq, m);
   10227 			DPRINTF(sc, WM_DEBUG_RX,
   10228 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   10229 				device_xname(sc->sc_dev), rxq->rxq_len));
   10230 			continue;
   10231 		}
   10232 
   10233 		/*
   10234 		 * Okay, we have the entire packet now. The chip is
   10235 		 * configured to include the FCS except I35[04], I21[01].
   10236 		 * (not all chips can be configured to strip it), so we need
   10237 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   10238 		 * in RCTL register is always set, so we don't trim it.
   10239 		 * PCH2 and newer chip also not include FCS when jumbo
   10240 		 * frame is used to do workaround an errata.
   10241 		 * May need to adjust length of previous mbuf in the
   10242 		 * chain if the current mbuf is too short.
   10243 		 */
   10244 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   10245 			if (m->m_len < ETHER_CRC_LEN) {
   10246 				rxq->rxq_tail->m_len
   10247 				    -= (ETHER_CRC_LEN - m->m_len);
   10248 				m->m_len = 0;
   10249 			} else
   10250 				m->m_len -= ETHER_CRC_LEN;
   10251 			len = rxq->rxq_len - ETHER_CRC_LEN;
   10252 		} else
   10253 			len = rxq->rxq_len;
   10254 
   10255 		WM_RXCHAIN_LINK(rxq, m);
   10256 
   10257 		*rxq->rxq_tailp = NULL;
   10258 		m = rxq->rxq_head;
   10259 
   10260 		WM_RXCHAIN_RESET(rxq);
   10261 
   10262 		DPRINTF(sc, WM_DEBUG_RX,
   10263 		    ("%s: RX: have entire packet, len -> %d\n",
   10264 			device_xname(sc->sc_dev), len));
   10265 
   10266 		/* If an error occurred, update stats and drop the packet. */
   10267 		if (wm_rxdesc_has_errors(rxq, errors)) {
   10268 			m_freem(m);
   10269 			continue;
   10270 		}
   10271 
   10272 		/* No errors.  Receive the packet. */
   10273 		m_set_rcvif(m, ifp);
   10274 		m->m_pkthdr.len = len;
   10275 		/*
   10276 		 * TODO
   10277 		 * should be save rsshash and rsstype to this mbuf.
   10278 		 */
   10279 		DPRINTF(sc, WM_DEBUG_RX,
   10280 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   10281 			device_xname(sc->sc_dev), rsstype, rsshash));
   10282 
   10283 		/*
   10284 		 * If VLANs are enabled, VLAN packets have been unwrapped
   10285 		 * for us.  Associate the tag with the packet.
   10286 		 */
   10287 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   10288 			continue;
   10289 
   10290 		/* Set up checksum info for this packet. */
   10291 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   10292 
   10293 		rxq->rxq_packets++;
   10294 		rxq->rxq_bytes += len;
   10295 		/* Pass it on. */
   10296 		if_percpuq_enqueue(sc->sc_ipq, m);
   10297 
   10298 		if (rxq->rxq_stopping)
   10299 			break;
   10300 	}
   10301 	rxq->rxq_ptr = i;
   10302 
   10303 	if (count != 0)
   10304 		rnd_add_uint32(&sc->rnd_source, count);
   10305 
   10306 	DPRINTF(sc, WM_DEBUG_RX,
   10307 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   10308 
   10309 	return more;
   10310 }
   10311 
   10312 /*
   10313  * wm_linkintr_gmii:
   10314  *
   10315  *	Helper; handle link interrupts for GMII.
   10316  */
   10317 static void
   10318 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   10319 {
   10320 	device_t dev = sc->sc_dev;
   10321 	uint32_t status, reg;
   10322 	bool link;
   10323 	bool dopoll = true;
   10324 	int rv;
   10325 
   10326 	KASSERT(mutex_owned(sc->sc_core_lock));
   10327 
   10328 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   10329 		__func__));
   10330 
   10331 	if ((icr & ICR_LSC) == 0) {
   10332 		if (icr & ICR_RXSEQ)
   10333 			DPRINTF(sc, WM_DEBUG_LINK,
   10334 			    ("%s: LINK Receive sequence error\n",
   10335 				device_xname(dev)));
   10336 		return;
   10337 	}
   10338 
   10339 	/* Link status changed */
   10340 	status = CSR_READ(sc, WMREG_STATUS);
   10341 	link = status & STATUS_LU;
   10342 	if (link) {
   10343 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10344 			device_xname(dev),
   10345 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10346 		if (wm_phy_need_linkdown_discard(sc)) {
   10347 			DPRINTF(sc, WM_DEBUG_LINK,
   10348 			    ("%s: linkintr: Clear linkdown discard flag\n",
   10349 				device_xname(dev)));
   10350 			wm_clear_linkdown_discard(sc);
   10351 		}
   10352 	} else {
   10353 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10354 			device_xname(dev)));
   10355 		if (wm_phy_need_linkdown_discard(sc)) {
   10356 			DPRINTF(sc, WM_DEBUG_LINK,
   10357 			    ("%s: linkintr: Set linkdown discard flag\n",
   10358 				device_xname(dev)));
   10359 			wm_set_linkdown_discard(sc);
   10360 		}
   10361 	}
   10362 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   10363 		wm_gig_downshift_workaround_ich8lan(sc);
   10364 
   10365 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   10366 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   10367 
   10368 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   10369 		device_xname(dev)));
   10370 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   10371 		if (link) {
   10372 			/*
   10373 			 * To workaround the problem, it's required to wait
   10374 			 * several hundred miliseconds. The time depend
   10375 			 * on the environment. Wait 1 second for the safety.
   10376 			 */
   10377 			dopoll = false;
   10378 			getmicrotime(&sc->sc_linkup_delay_time);
   10379 			sc->sc_linkup_delay_time.tv_sec += 1;
   10380 		} else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   10381 			/*
   10382 			 * Simplify by checking tv_sec only. It's enough.
   10383 			 *
   10384 			 * Currently, it's not required to clear the time.
   10385 			 * It's just to know the timer is stopped
   10386 			 * (for debugging).
   10387 			 */
   10388 
   10389 			sc->sc_linkup_delay_time.tv_sec = 0;
   10390 			sc->sc_linkup_delay_time.tv_usec = 0;
   10391 		}
   10392 	}
   10393 
   10394 	/*
   10395 	 * Call mii_pollstat().
   10396 	 *
   10397 	 * Some (not all) systems use I35[04] or I21[01] don't send packet soon
   10398 	 * after linkup. The MAC send a packet to the PHY and any error is not
   10399 	 * observed. This behavior causes a problem that gratuitous ARP and/or
   10400 	 * IPv6 DAD packet are silently dropped. To avoid this problem, don't
   10401 	 * call mii_pollstat() here which will send LINK_STATE_UP notification
   10402 	 * to the upper layer. Instead, mii_pollstat() will be called in
   10403 	 * wm_gmii_mediastatus() or mii_tick() will be called in wm_tick().
   10404 	 */
   10405 	if (dopoll)
   10406 		mii_pollstat(&sc->sc_mii);
   10407 
   10408 	/* Do some workarounds soon after link status is changed. */
   10409 
   10410 	if (sc->sc_type == WM_T_82543) {
   10411 		int miistatus, active;
   10412 
   10413 		/*
   10414 		 * With 82543, we need to force speed and
   10415 		 * duplex on the MAC equal to what the PHY
   10416 		 * speed and duplex configuration is.
   10417 		 */
   10418 		miistatus = sc->sc_mii.mii_media_status;
   10419 
   10420 		if (miistatus & IFM_ACTIVE) {
   10421 			active = sc->sc_mii.mii_media_active;
   10422 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10423 			switch (IFM_SUBTYPE(active)) {
   10424 			case IFM_10_T:
   10425 				sc->sc_ctrl |= CTRL_SPEED_10;
   10426 				break;
   10427 			case IFM_100_TX:
   10428 				sc->sc_ctrl |= CTRL_SPEED_100;
   10429 				break;
   10430 			case IFM_1000_T:
   10431 				sc->sc_ctrl |= CTRL_SPEED_1000;
   10432 				break;
   10433 			default:
   10434 				/*
   10435 				 * Fiber?
   10436 				 * Shoud not enter here.
   10437 				 */
   10438 				device_printf(dev, "unknown media (%x)\n",
   10439 				    active);
   10440 				break;
   10441 			}
   10442 			if (active & IFM_FDX)
   10443 				sc->sc_ctrl |= CTRL_FD;
   10444 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10445 		}
   10446 	} else if (sc->sc_type == WM_T_PCH) {
   10447 		wm_k1_gig_workaround_hv(sc,
   10448 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10449 	}
   10450 
   10451 	/*
   10452 	 * When connected at 10Mbps half-duplex, some parts are excessively
   10453 	 * aggressive resulting in many collisions. To avoid this, increase
   10454 	 * the IPG and reduce Rx latency in the PHY.
   10455 	 */
   10456 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   10457 	    && link) {
   10458 		uint32_t tipg_reg;
   10459 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   10460 		bool fdx;
   10461 		uint16_t emi_addr, emi_val;
   10462 
   10463 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   10464 		tipg_reg &= ~TIPG_IPGT_MASK;
   10465 		fdx = status & STATUS_FD;
   10466 
   10467 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10468 			tipg_reg |= 0xff;
   10469 			/* Reduce Rx latency in analog PHY */
   10470 			emi_val = 0;
   10471 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10472 		    fdx && speed != STATUS_SPEED_1000) {
   10473 			tipg_reg |= 0xc;
   10474 			emi_val = 1;
   10475 		} else {
   10476 			/* Roll back the default values */
   10477 			tipg_reg |= 0x08;
   10478 			emi_val = 1;
   10479 		}
   10480 
   10481 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10482 
   10483 		rv = sc->phy.acquire(sc);
   10484 		if (rv)
   10485 			return;
   10486 
   10487 		if (sc->sc_type == WM_T_PCH2)
   10488 			emi_addr = I82579_RX_CONFIG;
   10489 		else
   10490 			emi_addr = I217_RX_CONFIG;
   10491 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10492 
   10493 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10494 			uint16_t phy_reg;
   10495 
   10496 			sc->phy.readreg_locked(dev, 2,
   10497 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10498 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10499 			if (speed == STATUS_SPEED_100
   10500 			    || speed == STATUS_SPEED_10)
   10501 				phy_reg |= 0x3e8;
   10502 			else
   10503 				phy_reg |= 0xfa;
   10504 			sc->phy.writereg_locked(dev, 2,
   10505 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10506 
   10507 			if (speed == STATUS_SPEED_1000) {
   10508 				sc->phy.readreg_locked(dev, 2,
   10509 				    HV_PM_CTRL, &phy_reg);
   10510 
   10511 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10512 
   10513 				sc->phy.writereg_locked(dev, 2,
   10514 				    HV_PM_CTRL, phy_reg);
   10515 			}
   10516 		}
   10517 		sc->phy.release(sc);
   10518 
   10519 		if (rv)
   10520 			return;
   10521 
   10522 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10523 			uint16_t data, ptr_gap;
   10524 
   10525 			if (speed == STATUS_SPEED_1000) {
   10526 				rv = sc->phy.acquire(sc);
   10527 				if (rv)
   10528 					return;
   10529 
   10530 				rv = sc->phy.readreg_locked(dev, 2,
   10531 				    I82579_UNKNOWN1, &data);
   10532 				if (rv) {
   10533 					sc->phy.release(sc);
   10534 					return;
   10535 				}
   10536 
   10537 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10538 				if (ptr_gap < 0x18) {
   10539 					data &= ~(0x3ff << 2);
   10540 					data |= (0x18 << 2);
   10541 					rv = sc->phy.writereg_locked(dev,
   10542 					    2, I82579_UNKNOWN1, data);
   10543 				}
   10544 				sc->phy.release(sc);
   10545 				if (rv)
   10546 					return;
   10547 			} else {
   10548 				rv = sc->phy.acquire(sc);
   10549 				if (rv)
   10550 					return;
   10551 
   10552 				rv = sc->phy.writereg_locked(dev, 2,
   10553 				    I82579_UNKNOWN1, 0xc023);
   10554 				sc->phy.release(sc);
   10555 				if (rv)
   10556 					return;
   10557 
   10558 			}
   10559 		}
   10560 	}
   10561 
   10562 	/*
   10563 	 * I217 Packet Loss issue:
   10564 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10565 	 * on power up.
   10566 	 * Set the Beacon Duration for I217 to 8 usec
   10567 	 */
   10568 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10569 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10570 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10571 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10572 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10573 	}
   10574 
   10575 	/* Work-around I218 hang issue */
   10576 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10577 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10578 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10579 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10580 		wm_k1_workaround_lpt_lp(sc, link);
   10581 
   10582 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10583 		/*
   10584 		 * Set platform power management values for Latency
   10585 		 * Tolerance Reporting (LTR)
   10586 		 */
   10587 		wm_platform_pm_pch_lpt(sc,
   10588 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10589 	}
   10590 
   10591 	/* Clear link partner's EEE ability */
   10592 	sc->eee_lp_ability = 0;
   10593 
   10594 	/* FEXTNVM6 K1-off workaround */
   10595 	if (sc->sc_type == WM_T_PCH_SPT) {
   10596 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10597 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10598 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10599 		else
   10600 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10601 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10602 	}
   10603 
   10604 	if (!link)
   10605 		return;
   10606 
   10607 	switch (sc->sc_type) {
   10608 	case WM_T_PCH2:
   10609 		wm_k1_workaround_lv(sc);
   10610 		/* FALLTHROUGH */
   10611 	case WM_T_PCH:
   10612 		if (sc->sc_phytype == WMPHY_82578)
   10613 			wm_link_stall_workaround_hv(sc);
   10614 		break;
   10615 	default:
   10616 		break;
   10617 	}
   10618 
   10619 	/* Enable/Disable EEE after link up */
   10620 	if (sc->sc_phytype > WMPHY_82579)
   10621 		wm_set_eee_pchlan(sc);
   10622 }
   10623 
   10624 /*
   10625  * wm_linkintr_tbi:
   10626  *
   10627  *	Helper; handle link interrupts for TBI mode.
   10628  */
   10629 static void
   10630 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10631 {
   10632 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10633 	uint32_t status;
   10634 
   10635 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10636 		__func__));
   10637 
   10638 	status = CSR_READ(sc, WMREG_STATUS);
   10639 	if (icr & ICR_LSC) {
   10640 		wm_check_for_link(sc);
   10641 		if (status & STATUS_LU) {
   10642 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10643 				device_xname(sc->sc_dev),
   10644 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10645 			/*
   10646 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10647 			 * so we should update sc->sc_ctrl
   10648 			 */
   10649 
   10650 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10651 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10652 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10653 			if (status & STATUS_FD)
   10654 				sc->sc_tctl |=
   10655 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10656 			else
   10657 				sc->sc_tctl |=
   10658 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10659 			if (sc->sc_ctrl & CTRL_TFCE)
   10660 				sc->sc_fcrtl |= FCRTL_XONE;
   10661 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10662 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10663 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10664 			sc->sc_tbi_linkup = 1;
   10665 			if_link_state_change(ifp, LINK_STATE_UP);
   10666 		} else {
   10667 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10668 				device_xname(sc->sc_dev)));
   10669 			sc->sc_tbi_linkup = 0;
   10670 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10671 		}
   10672 		/* Update LED */
   10673 		wm_tbi_serdes_set_linkled(sc);
   10674 	} else if (icr & ICR_RXSEQ)
   10675 		DPRINTF(sc, WM_DEBUG_LINK,
   10676 		    ("%s: LINK: Receive sequence error\n",
   10677 			device_xname(sc->sc_dev)));
   10678 }
   10679 
   10680 /*
   10681  * wm_linkintr_serdes:
   10682  *
   10683  *	Helper; handle link interrupts for TBI mode.
   10684  */
   10685 static void
   10686 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10687 {
   10688 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10689 	struct mii_data *mii = &sc->sc_mii;
   10690 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10691 	uint32_t pcs_adv, pcs_lpab, reg;
   10692 
   10693 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10694 		__func__));
   10695 
   10696 	if (icr & ICR_LSC) {
   10697 		/* Check PCS */
   10698 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10699 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10700 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10701 				device_xname(sc->sc_dev)));
   10702 			mii->mii_media_status |= IFM_ACTIVE;
   10703 			sc->sc_tbi_linkup = 1;
   10704 			if_link_state_change(ifp, LINK_STATE_UP);
   10705 		} else {
   10706 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10707 				device_xname(sc->sc_dev)));
   10708 			mii->mii_media_status |= IFM_NONE;
   10709 			sc->sc_tbi_linkup = 0;
   10710 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10711 			wm_tbi_serdes_set_linkled(sc);
   10712 			return;
   10713 		}
   10714 		mii->mii_media_active |= IFM_1000_SX;
   10715 		if ((reg & PCS_LSTS_FDX) != 0)
   10716 			mii->mii_media_active |= IFM_FDX;
   10717 		else
   10718 			mii->mii_media_active |= IFM_HDX;
   10719 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10720 			/* Check flow */
   10721 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10722 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10723 				DPRINTF(sc, WM_DEBUG_LINK,
   10724 				    ("XXX LINKOK but not ACOMP\n"));
   10725 				return;
   10726 			}
   10727 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10728 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10729 			DPRINTF(sc, WM_DEBUG_LINK,
   10730 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10731 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10732 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10733 				mii->mii_media_active |= IFM_FLOW
   10734 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10735 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10736 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10737 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10738 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10739 				mii->mii_media_active |= IFM_FLOW
   10740 				    | IFM_ETH_TXPAUSE;
   10741 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10742 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10743 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10744 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10745 				mii->mii_media_active |= IFM_FLOW
   10746 				    | IFM_ETH_RXPAUSE;
   10747 		}
   10748 		/* Update LED */
   10749 		wm_tbi_serdes_set_linkled(sc);
   10750 	} else
   10751 		DPRINTF(sc, WM_DEBUG_LINK,
   10752 		    ("%s: LINK: Receive sequence error\n",
   10753 		    device_xname(sc->sc_dev)));
   10754 }
   10755 
   10756 /*
   10757  * wm_linkintr:
   10758  *
   10759  *	Helper; handle link interrupts.
   10760  */
   10761 static void
   10762 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10763 {
   10764 
   10765 	KASSERT(mutex_owned(sc->sc_core_lock));
   10766 
   10767 	if (sc->sc_flags & WM_F_HAS_MII)
   10768 		wm_linkintr_gmii(sc, icr);
   10769 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10770 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10771 		wm_linkintr_serdes(sc, icr);
   10772 	else
   10773 		wm_linkintr_tbi(sc, icr);
   10774 }
   10775 
   10776 
   10777 static inline void
   10778 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10779 {
   10780 
   10781 	if (wmq->wmq_txrx_use_workqueue) {
   10782 		if (!wmq->wmq_wq_enqueued) {
   10783 			wmq->wmq_wq_enqueued = true;
   10784 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
   10785 			    curcpu());
   10786 		}
   10787 	} else
   10788 		softint_schedule(wmq->wmq_si);
   10789 }
   10790 
   10791 static inline void
   10792 wm_legacy_intr_disable(struct wm_softc *sc)
   10793 {
   10794 
   10795 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10796 }
   10797 
   10798 static inline void
   10799 wm_legacy_intr_enable(struct wm_softc *sc)
   10800 {
   10801 
   10802 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10803 }
   10804 
   10805 /*
   10806  * wm_intr_legacy:
   10807  *
   10808  *	Interrupt service routine for INTx and MSI.
   10809  */
   10810 static int
   10811 wm_intr_legacy(void *arg)
   10812 {
   10813 	struct wm_softc *sc = arg;
   10814 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10815 	struct wm_queue *wmq = &sc->sc_queue[0];
   10816 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10817 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10818 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10819 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10820 	uint32_t icr, rndval = 0;
   10821 	bool more = false;
   10822 
   10823 	icr = CSR_READ(sc, WMREG_ICR);
   10824 	if ((icr & sc->sc_icr) == 0)
   10825 		return 0;
   10826 
   10827 	DPRINTF(sc, WM_DEBUG_TX,
   10828 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10829 	if (rndval == 0)
   10830 		rndval = icr;
   10831 
   10832 	mutex_enter(txq->txq_lock);
   10833 
   10834 	if (txq->txq_stopping) {
   10835 		mutex_exit(txq->txq_lock);
   10836 		return 1;
   10837 	}
   10838 
   10839 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10840 	if (icr & ICR_TXDW) {
   10841 		DPRINTF(sc, WM_DEBUG_TX,
   10842 		    ("%s: TX: got TXDW interrupt\n",
   10843 			device_xname(sc->sc_dev)));
   10844 		WM_Q_EVCNT_INCR(txq, txdw);
   10845 	}
   10846 #endif
   10847 	if (txlimit > 0) {
   10848 		more |= wm_txeof(txq, txlimit);
   10849 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10850 			more = true;
   10851 	} else
   10852 		more = true;
   10853 	mutex_exit(txq->txq_lock);
   10854 
   10855 	mutex_enter(rxq->rxq_lock);
   10856 
   10857 	if (rxq->rxq_stopping) {
   10858 		mutex_exit(rxq->rxq_lock);
   10859 		return 1;
   10860 	}
   10861 
   10862 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10863 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10864 		DPRINTF(sc, WM_DEBUG_RX,
   10865 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10866 			device_xname(sc->sc_dev),
   10867 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10868 		WM_Q_EVCNT_INCR(rxq, intr);
   10869 	}
   10870 #endif
   10871 	if (rxlimit > 0) {
   10872 		/*
   10873 		 * wm_rxeof() does *not* call upper layer functions directly,
   10874 		 * as if_percpuq_enqueue() just call softint_schedule().
   10875 		 * So, we can call wm_rxeof() in interrupt context.
   10876 		 */
   10877 		more = wm_rxeof(rxq, rxlimit);
   10878 	} else
   10879 		more = true;
   10880 
   10881 	mutex_exit(rxq->rxq_lock);
   10882 
   10883 	mutex_enter(sc->sc_core_lock);
   10884 
   10885 	if (sc->sc_core_stopping) {
   10886 		mutex_exit(sc->sc_core_lock);
   10887 		return 1;
   10888 	}
   10889 
   10890 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10891 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10892 		wm_linkintr(sc, icr);
   10893 	}
   10894 	if ((icr & ICR_GPI(0)) != 0)
   10895 		device_printf(sc->sc_dev, "got module interrupt\n");
   10896 
   10897 	mutex_exit(sc->sc_core_lock);
   10898 
   10899 	if (icr & ICR_RXO) {
   10900 #if defined(WM_DEBUG)
   10901 		log(LOG_WARNING, "%s: Receive overrun\n",
   10902 		    device_xname(sc->sc_dev));
   10903 #endif /* defined(WM_DEBUG) */
   10904 	}
   10905 
   10906 	rnd_add_uint32(&sc->rnd_source, rndval);
   10907 
   10908 	if (more) {
   10909 		/* Try to get more packets going. */
   10910 		wm_legacy_intr_disable(sc);
   10911 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10912 		wm_sched_handle_queue(sc, wmq);
   10913 	}
   10914 
   10915 	return 1;
   10916 }
   10917 
   10918 static inline void
   10919 wm_txrxintr_disable(struct wm_queue *wmq)
   10920 {
   10921 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10922 
   10923 	if (__predict_false(!wm_is_using_msix(sc))) {
   10924 		wm_legacy_intr_disable(sc);
   10925 		return;
   10926 	}
   10927 
   10928 	if (sc->sc_type == WM_T_82574)
   10929 		CSR_WRITE(sc, WMREG_IMC,
   10930 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10931 	else if (sc->sc_type == WM_T_82575)
   10932 		CSR_WRITE(sc, WMREG_EIMC,
   10933 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10934 	else
   10935 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10936 }
   10937 
   10938 static inline void
   10939 wm_txrxintr_enable(struct wm_queue *wmq)
   10940 {
   10941 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10942 
   10943 	wm_itrs_calculate(sc, wmq);
   10944 
   10945 	if (__predict_false(!wm_is_using_msix(sc))) {
   10946 		wm_legacy_intr_enable(sc);
   10947 		return;
   10948 	}
   10949 
   10950 	/*
   10951 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10952 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10953 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10954 	 * while each wm_handle_queue(wmq) is runnig.
   10955 	 */
   10956 	if (sc->sc_type == WM_T_82574)
   10957 		CSR_WRITE(sc, WMREG_IMS,
   10958 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10959 	else if (sc->sc_type == WM_T_82575)
   10960 		CSR_WRITE(sc, WMREG_EIMS,
   10961 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10962 	else
   10963 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10964 }
   10965 
   10966 static int
   10967 wm_txrxintr_msix(void *arg)
   10968 {
   10969 	struct wm_queue *wmq = arg;
   10970 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10971 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10972 	struct wm_softc *sc = txq->txq_sc;
   10973 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10974 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10975 	bool txmore;
   10976 	bool rxmore;
   10977 
   10978 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10979 
   10980 	DPRINTF(sc, WM_DEBUG_TX,
   10981 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10982 
   10983 	wm_txrxintr_disable(wmq);
   10984 
   10985 	mutex_enter(txq->txq_lock);
   10986 
   10987 	if (txq->txq_stopping) {
   10988 		mutex_exit(txq->txq_lock);
   10989 		return 1;
   10990 	}
   10991 
   10992 	WM_Q_EVCNT_INCR(txq, txdw);
   10993 	if (txlimit > 0) {
   10994 		txmore = wm_txeof(txq, txlimit);
   10995 		/* wm_deferred start() is done in wm_handle_queue(). */
   10996 	} else
   10997 		txmore = true;
   10998 	mutex_exit(txq->txq_lock);
   10999 
   11000 	DPRINTF(sc, WM_DEBUG_RX,
   11001 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   11002 	mutex_enter(rxq->rxq_lock);
   11003 
   11004 	if (rxq->rxq_stopping) {
   11005 		mutex_exit(rxq->rxq_lock);
   11006 		return 1;
   11007 	}
   11008 
   11009 	WM_Q_EVCNT_INCR(rxq, intr);
   11010 	if (rxlimit > 0) {
   11011 		rxmore = wm_rxeof(rxq, rxlimit);
   11012 	} else
   11013 		rxmore = true;
   11014 	mutex_exit(rxq->rxq_lock);
   11015 
   11016 	wm_itrs_writereg(sc, wmq);
   11017 
   11018 	if (txmore || rxmore) {
   11019 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11020 		wm_sched_handle_queue(sc, wmq);
   11021 	} else
   11022 		wm_txrxintr_enable(wmq);
   11023 
   11024 	return 1;
   11025 }
   11026 
   11027 static void
   11028 wm_handle_queue(void *arg)
   11029 {
   11030 	struct wm_queue *wmq = arg;
   11031 	struct wm_txqueue *txq = &wmq->wmq_txq;
   11032 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   11033 	struct wm_softc *sc = txq->txq_sc;
   11034 	u_int txlimit = sc->sc_tx_process_limit;
   11035 	u_int rxlimit = sc->sc_rx_process_limit;
   11036 	bool txmore;
   11037 	bool rxmore;
   11038 
   11039 	mutex_enter(txq->txq_lock);
   11040 	if (txq->txq_stopping) {
   11041 		mutex_exit(txq->txq_lock);
   11042 		return;
   11043 	}
   11044 	txmore = wm_txeof(txq, txlimit);
   11045 	wm_deferred_start_locked(txq);
   11046 	mutex_exit(txq->txq_lock);
   11047 
   11048 	mutex_enter(rxq->rxq_lock);
   11049 	if (rxq->rxq_stopping) {
   11050 		mutex_exit(rxq->rxq_lock);
   11051 		return;
   11052 	}
   11053 	WM_Q_EVCNT_INCR(rxq, defer);
   11054 	rxmore = wm_rxeof(rxq, rxlimit);
   11055 	mutex_exit(rxq->rxq_lock);
   11056 
   11057 	if (txmore || rxmore) {
   11058 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11059 		wm_sched_handle_queue(sc, wmq);
   11060 	} else
   11061 		wm_txrxintr_enable(wmq);
   11062 }
   11063 
   11064 static void
   11065 wm_handle_queue_work(struct work *wk, void *context)
   11066 {
   11067 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   11068 
   11069 	/*
   11070 	 * Some qemu environment workaround.  They don't stop interrupt
   11071 	 * immediately.
   11072 	 */
   11073 	wmq->wmq_wq_enqueued = false;
   11074 	wm_handle_queue(wmq);
   11075 }
   11076 
   11077 /*
   11078  * wm_linkintr_msix:
   11079  *
   11080  *	Interrupt service routine for link status change for MSI-X.
   11081  */
   11082 static int
   11083 wm_linkintr_msix(void *arg)
   11084 {
   11085 	struct wm_softc *sc = arg;
   11086 	uint32_t reg;
   11087 	bool has_rxo;
   11088 
   11089 	reg = CSR_READ(sc, WMREG_ICR);
   11090 	mutex_enter(sc->sc_core_lock);
   11091 	DPRINTF(sc, WM_DEBUG_LINK,
   11092 	    ("%s: LINK: got link intr. ICR = %08x\n",
   11093 		device_xname(sc->sc_dev), reg));
   11094 
   11095 	if (sc->sc_core_stopping)
   11096 		goto out;
   11097 
   11098 	if ((reg & ICR_LSC) != 0) {
   11099 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   11100 		wm_linkintr(sc, ICR_LSC);
   11101 	}
   11102 	if ((reg & ICR_GPI(0)) != 0)
   11103 		device_printf(sc->sc_dev, "got module interrupt\n");
   11104 
   11105 	/*
   11106 	 * XXX 82574 MSI-X mode workaround
   11107 	 *
   11108 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   11109 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   11110 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   11111 	 * interrupts by writing WMREG_ICS to process receive packets.
   11112 	 */
   11113 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   11114 #if defined(WM_DEBUG)
   11115 		log(LOG_WARNING, "%s: Receive overrun\n",
   11116 		    device_xname(sc->sc_dev));
   11117 #endif /* defined(WM_DEBUG) */
   11118 
   11119 		has_rxo = true;
   11120 		/*
   11121 		 * The RXO interrupt is very high rate when receive traffic is
   11122 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   11123 		 * interrupts. ICR_OTHER will be enabled at the end of
   11124 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   11125 		 * ICR_RXQ(1) interrupts.
   11126 		 */
   11127 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   11128 
   11129 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   11130 	}
   11131 
   11132 
   11133 
   11134 out:
   11135 	mutex_exit(sc->sc_core_lock);
   11136 
   11137 	if (sc->sc_type == WM_T_82574) {
   11138 		if (!has_rxo)
   11139 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   11140 		else
   11141 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   11142 	} else if (sc->sc_type == WM_T_82575)
   11143 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   11144 	else
   11145 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   11146 
   11147 	return 1;
   11148 }
   11149 
   11150 /*
   11151  * Media related.
   11152  * GMII, SGMII, TBI (and SERDES)
   11153  */
   11154 
   11155 /* Common */
   11156 
   11157 /*
   11158  * wm_tbi_serdes_set_linkled:
   11159  *
   11160  *	Update the link LED on TBI and SERDES devices.
   11161  */
   11162 static void
   11163 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   11164 {
   11165 
   11166 	if (sc->sc_tbi_linkup)
   11167 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   11168 	else
   11169 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   11170 
   11171 	/* 82540 or newer devices are active low */
   11172 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   11173 
   11174 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11175 }
   11176 
   11177 /* GMII related */
   11178 
   11179 /*
   11180  * wm_gmii_reset:
   11181  *
   11182  *	Reset the PHY.
   11183  */
   11184 static void
   11185 wm_gmii_reset(struct wm_softc *sc)
   11186 {
   11187 	uint32_t reg;
   11188 	int rv;
   11189 
   11190 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11191 		device_xname(sc->sc_dev), __func__));
   11192 
   11193 	rv = sc->phy.acquire(sc);
   11194 	if (rv != 0) {
   11195 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11196 		    __func__);
   11197 		return;
   11198 	}
   11199 
   11200 	switch (sc->sc_type) {
   11201 	case WM_T_82542_2_0:
   11202 	case WM_T_82542_2_1:
   11203 		/* null */
   11204 		break;
   11205 	case WM_T_82543:
   11206 		/*
   11207 		 * With 82543, we need to force speed and duplex on the MAC
   11208 		 * equal to what the PHY speed and duplex configuration is.
   11209 		 * In addition, we need to perform a hardware reset on the PHY
   11210 		 * to take it out of reset.
   11211 		 */
   11212 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11213 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11214 
   11215 		/* The PHY reset pin is active-low. */
   11216 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11217 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   11218 		    CTRL_EXT_SWDPIN(4));
   11219 		reg |= CTRL_EXT_SWDPIO(4);
   11220 
   11221 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11222 		CSR_WRITE_FLUSH(sc);
   11223 		delay(10*1000);
   11224 
   11225 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   11226 		CSR_WRITE_FLUSH(sc);
   11227 		delay(150);
   11228 #if 0
   11229 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   11230 #endif
   11231 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   11232 		break;
   11233 	case WM_T_82544:	/* Reset 10000us */
   11234 	case WM_T_82540:
   11235 	case WM_T_82545:
   11236 	case WM_T_82545_3:
   11237 	case WM_T_82546:
   11238 	case WM_T_82546_3:
   11239 	case WM_T_82541:
   11240 	case WM_T_82541_2:
   11241 	case WM_T_82547:
   11242 	case WM_T_82547_2:
   11243 	case WM_T_82571:	/* Reset 100us */
   11244 	case WM_T_82572:
   11245 	case WM_T_82573:
   11246 	case WM_T_82574:
   11247 	case WM_T_82575:
   11248 	case WM_T_82576:
   11249 	case WM_T_82580:
   11250 	case WM_T_I350:
   11251 	case WM_T_I354:
   11252 	case WM_T_I210:
   11253 	case WM_T_I211:
   11254 	case WM_T_82583:
   11255 	case WM_T_80003:
   11256 		/* Generic reset */
   11257 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11258 		CSR_WRITE_FLUSH(sc);
   11259 		delay(20000);
   11260 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11261 		CSR_WRITE_FLUSH(sc);
   11262 		delay(20000);
   11263 
   11264 		if ((sc->sc_type == WM_T_82541)
   11265 		    || (sc->sc_type == WM_T_82541_2)
   11266 		    || (sc->sc_type == WM_T_82547)
   11267 		    || (sc->sc_type == WM_T_82547_2)) {
   11268 			/* Workaround for igp are done in igp_reset() */
   11269 			/* XXX add code to set LED after phy reset */
   11270 		}
   11271 		break;
   11272 	case WM_T_ICH8:
   11273 	case WM_T_ICH9:
   11274 	case WM_T_ICH10:
   11275 	case WM_T_PCH:
   11276 	case WM_T_PCH2:
   11277 	case WM_T_PCH_LPT:
   11278 	case WM_T_PCH_SPT:
   11279 	case WM_T_PCH_CNP:
   11280 		/* Generic reset */
   11281 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11282 		CSR_WRITE_FLUSH(sc);
   11283 		delay(100);
   11284 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11285 		CSR_WRITE_FLUSH(sc);
   11286 		delay(150);
   11287 		break;
   11288 	default:
   11289 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   11290 		    __func__);
   11291 		break;
   11292 	}
   11293 
   11294 	sc->phy.release(sc);
   11295 
   11296 	/* get_cfg_done */
   11297 	wm_get_cfg_done(sc);
   11298 
   11299 	/* Extra setup */
   11300 	switch (sc->sc_type) {
   11301 	case WM_T_82542_2_0:
   11302 	case WM_T_82542_2_1:
   11303 	case WM_T_82543:
   11304 	case WM_T_82544:
   11305 	case WM_T_82540:
   11306 	case WM_T_82545:
   11307 	case WM_T_82545_3:
   11308 	case WM_T_82546:
   11309 	case WM_T_82546_3:
   11310 	case WM_T_82541_2:
   11311 	case WM_T_82547_2:
   11312 	case WM_T_82571:
   11313 	case WM_T_82572:
   11314 	case WM_T_82573:
   11315 	case WM_T_82574:
   11316 	case WM_T_82583:
   11317 	case WM_T_82575:
   11318 	case WM_T_82576:
   11319 	case WM_T_82580:
   11320 	case WM_T_I350:
   11321 	case WM_T_I354:
   11322 	case WM_T_I210:
   11323 	case WM_T_I211:
   11324 	case WM_T_80003:
   11325 		/* Null */
   11326 		break;
   11327 	case WM_T_82541:
   11328 	case WM_T_82547:
   11329 		/* XXX Configure actively LED after PHY reset */
   11330 		break;
   11331 	case WM_T_ICH8:
   11332 	case WM_T_ICH9:
   11333 	case WM_T_ICH10:
   11334 	case WM_T_PCH:
   11335 	case WM_T_PCH2:
   11336 	case WM_T_PCH_LPT:
   11337 	case WM_T_PCH_SPT:
   11338 	case WM_T_PCH_CNP:
   11339 		wm_phy_post_reset(sc);
   11340 		break;
   11341 	default:
   11342 		panic("%s: unknown type\n", __func__);
   11343 		break;
   11344 	}
   11345 }
   11346 
   11347 /*
   11348  * Set up sc_phytype and mii_{read|write}reg.
   11349  *
   11350  *  To identify PHY type, correct read/write function should be selected.
   11351  * To select correct read/write function, PCI ID or MAC type are required
   11352  * without accessing PHY registers.
   11353  *
   11354  *  On the first call of this function, PHY ID is not known yet. Check
   11355  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   11356  * result might be incorrect.
   11357  *
   11358  *  In the second call, PHY OUI and model is used to identify PHY type.
   11359  * It might not be perfect because of the lack of compared entry, but it
   11360  * would be better than the first call.
   11361  *
   11362  *  If the detected new result and previous assumption is different,
   11363  * a diagnostic message will be printed.
   11364  */
   11365 static void
   11366 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   11367     uint16_t phy_model)
   11368 {
   11369 	device_t dev = sc->sc_dev;
   11370 	struct mii_data *mii = &sc->sc_mii;
   11371 	uint16_t new_phytype = WMPHY_UNKNOWN;
   11372 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   11373 	mii_readreg_t new_readreg;
   11374 	mii_writereg_t new_writereg;
   11375 	bool dodiag = true;
   11376 
   11377 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11378 		device_xname(sc->sc_dev), __func__));
   11379 
   11380 	/*
   11381 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   11382 	 * incorrect. So don't print diag output when it's 2nd call.
   11383 	 */
   11384 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   11385 		dodiag = false;
   11386 
   11387 	if (mii->mii_readreg == NULL) {
   11388 		/*
   11389 		 *  This is the first call of this function. For ICH and PCH
   11390 		 * variants, it's difficult to determine the PHY access method
   11391 		 * by sc_type, so use the PCI product ID for some devices.
   11392 		 */
   11393 
   11394 		switch (sc->sc_pcidevid) {
   11395 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   11396 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   11397 			/* 82577 */
   11398 			new_phytype = WMPHY_82577;
   11399 			break;
   11400 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   11401 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   11402 			/* 82578 */
   11403 			new_phytype = WMPHY_82578;
   11404 			break;
   11405 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   11406 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   11407 			/* 82579 */
   11408 			new_phytype = WMPHY_82579;
   11409 			break;
   11410 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   11411 		case PCI_PRODUCT_INTEL_82801I_BM:
   11412 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   11413 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   11414 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   11415 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   11416 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   11417 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   11418 			/* ICH8, 9, 10 with 82567 */
   11419 			new_phytype = WMPHY_BM;
   11420 			break;
   11421 		default:
   11422 			break;
   11423 		}
   11424 	} else {
   11425 		/* It's not the first call. Use PHY OUI and model */
   11426 		switch (phy_oui) {
   11427 		case MII_OUI_ATTANSIC: /* atphy(4) */
   11428 			switch (phy_model) {
   11429 			case MII_MODEL_ATTANSIC_AR8021:
   11430 				new_phytype = WMPHY_82578;
   11431 				break;
   11432 			default:
   11433 				break;
   11434 			}
   11435 			break;
   11436 		case MII_OUI_xxMARVELL:
   11437 			switch (phy_model) {
   11438 			case MII_MODEL_xxMARVELL_I210:
   11439 				new_phytype = WMPHY_I210;
   11440 				break;
   11441 			case MII_MODEL_xxMARVELL_E1011:
   11442 			case MII_MODEL_xxMARVELL_E1000_3:
   11443 			case MII_MODEL_xxMARVELL_E1000_5:
   11444 			case MII_MODEL_xxMARVELL_E1112:
   11445 				new_phytype = WMPHY_M88;
   11446 				break;
   11447 			case MII_MODEL_xxMARVELL_E1149:
   11448 				new_phytype = WMPHY_BM;
   11449 				break;
   11450 			case MII_MODEL_xxMARVELL_E1111:
   11451 			case MII_MODEL_xxMARVELL_I347:
   11452 			case MII_MODEL_xxMARVELL_E1512:
   11453 			case MII_MODEL_xxMARVELL_E1340M:
   11454 			case MII_MODEL_xxMARVELL_E1543:
   11455 				new_phytype = WMPHY_M88;
   11456 				break;
   11457 			case MII_MODEL_xxMARVELL_I82563:
   11458 				new_phytype = WMPHY_GG82563;
   11459 				break;
   11460 			default:
   11461 				break;
   11462 			}
   11463 			break;
   11464 		case MII_OUI_INTEL:
   11465 			switch (phy_model) {
   11466 			case MII_MODEL_INTEL_I82577:
   11467 				new_phytype = WMPHY_82577;
   11468 				break;
   11469 			case MII_MODEL_INTEL_I82579:
   11470 				new_phytype = WMPHY_82579;
   11471 				break;
   11472 			case MII_MODEL_INTEL_I217:
   11473 				new_phytype = WMPHY_I217;
   11474 				break;
   11475 			case MII_MODEL_INTEL_I82580:
   11476 				new_phytype = WMPHY_82580;
   11477 				break;
   11478 			case MII_MODEL_INTEL_I350:
   11479 				new_phytype = WMPHY_I350;
   11480 				break;
   11481 			default:
   11482 				break;
   11483 			}
   11484 			break;
   11485 		case MII_OUI_yyINTEL:
   11486 			switch (phy_model) {
   11487 			case MII_MODEL_yyINTEL_I82562G:
   11488 			case MII_MODEL_yyINTEL_I82562EM:
   11489 			case MII_MODEL_yyINTEL_I82562ET:
   11490 				new_phytype = WMPHY_IFE;
   11491 				break;
   11492 			case MII_MODEL_yyINTEL_IGP01E1000:
   11493 				new_phytype = WMPHY_IGP;
   11494 				break;
   11495 			case MII_MODEL_yyINTEL_I82566:
   11496 				new_phytype = WMPHY_IGP_3;
   11497 				break;
   11498 			default:
   11499 				break;
   11500 			}
   11501 			break;
   11502 		default:
   11503 			break;
   11504 		}
   11505 
   11506 		if (dodiag) {
   11507 			if (new_phytype == WMPHY_UNKNOWN)
   11508 				aprint_verbose_dev(dev,
   11509 				    "%s: Unknown PHY model. OUI=%06x, "
   11510 				    "model=%04x\n", __func__, phy_oui,
   11511 				    phy_model);
   11512 
   11513 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11514 			    && (sc->sc_phytype != new_phytype)) {
   11515 				aprint_error_dev(dev, "Previously assumed PHY "
   11516 				    "type(%u) was incorrect. PHY type from PHY"
   11517 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11518 			}
   11519 		}
   11520 	}
   11521 
   11522 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11523 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11524 		/* SGMII */
   11525 		new_readreg = wm_sgmii_readreg;
   11526 		new_writereg = wm_sgmii_writereg;
   11527 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11528 		/* BM2 (phyaddr == 1) */
   11529 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11530 		    && (new_phytype != WMPHY_BM)
   11531 		    && (new_phytype != WMPHY_UNKNOWN))
   11532 			doubt_phytype = new_phytype;
   11533 		new_phytype = WMPHY_BM;
   11534 		new_readreg = wm_gmii_bm_readreg;
   11535 		new_writereg = wm_gmii_bm_writereg;
   11536 	} else if (sc->sc_type >= WM_T_PCH) {
   11537 		/* All PCH* use _hv_ */
   11538 		new_readreg = wm_gmii_hv_readreg;
   11539 		new_writereg = wm_gmii_hv_writereg;
   11540 	} else if (sc->sc_type >= WM_T_ICH8) {
   11541 		/* non-82567 ICH8, 9 and 10 */
   11542 		new_readreg = wm_gmii_i82544_readreg;
   11543 		new_writereg = wm_gmii_i82544_writereg;
   11544 	} else if (sc->sc_type >= WM_T_80003) {
   11545 		/* 80003 */
   11546 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11547 		    && (new_phytype != WMPHY_GG82563)
   11548 		    && (new_phytype != WMPHY_UNKNOWN))
   11549 			doubt_phytype = new_phytype;
   11550 		new_phytype = WMPHY_GG82563;
   11551 		new_readreg = wm_gmii_i80003_readreg;
   11552 		new_writereg = wm_gmii_i80003_writereg;
   11553 	} else if (sc->sc_type >= WM_T_I210) {
   11554 		/* I210 and I211 */
   11555 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11556 		    && (new_phytype != WMPHY_I210)
   11557 		    && (new_phytype != WMPHY_UNKNOWN))
   11558 			doubt_phytype = new_phytype;
   11559 		new_phytype = WMPHY_I210;
   11560 		new_readreg = wm_gmii_gs40g_readreg;
   11561 		new_writereg = wm_gmii_gs40g_writereg;
   11562 	} else if (sc->sc_type >= WM_T_82580) {
   11563 		/* 82580, I350 and I354 */
   11564 		new_readreg = wm_gmii_82580_readreg;
   11565 		new_writereg = wm_gmii_82580_writereg;
   11566 	} else if (sc->sc_type >= WM_T_82544) {
   11567 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11568 		new_readreg = wm_gmii_i82544_readreg;
   11569 		new_writereg = wm_gmii_i82544_writereg;
   11570 	} else {
   11571 		new_readreg = wm_gmii_i82543_readreg;
   11572 		new_writereg = wm_gmii_i82543_writereg;
   11573 	}
   11574 
   11575 	if (new_phytype == WMPHY_BM) {
   11576 		/* All BM use _bm_ */
   11577 		new_readreg = wm_gmii_bm_readreg;
   11578 		new_writereg = wm_gmii_bm_writereg;
   11579 	}
   11580 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11581 		/* All PCH* use _hv_ */
   11582 		new_readreg = wm_gmii_hv_readreg;
   11583 		new_writereg = wm_gmii_hv_writereg;
   11584 	}
   11585 
   11586 	/* Diag output */
   11587 	if (dodiag) {
   11588 		if (doubt_phytype != WMPHY_UNKNOWN)
   11589 			aprint_error_dev(dev, "Assumed new PHY type was "
   11590 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11591 			    new_phytype);
   11592 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11593 		    && (sc->sc_phytype != new_phytype))
   11594 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11595 			    "was incorrect. New PHY type = %u\n",
   11596 			    sc->sc_phytype, new_phytype);
   11597 
   11598 		if ((mii->mii_readreg != NULL) &&
   11599 		    (new_phytype == WMPHY_UNKNOWN))
   11600 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11601 
   11602 		if ((mii->mii_readreg != NULL) &&
   11603 		    (mii->mii_readreg != new_readreg))
   11604 			aprint_error_dev(dev, "Previously assumed PHY "
   11605 			    "read/write function was incorrect.\n");
   11606 	}
   11607 
   11608 	/* Update now */
   11609 	sc->sc_phytype = new_phytype;
   11610 	mii->mii_readreg = new_readreg;
   11611 	mii->mii_writereg = new_writereg;
   11612 	if (new_readreg == wm_gmii_hv_readreg) {
   11613 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11614 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11615 	} else if (new_readreg == wm_sgmii_readreg) {
   11616 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11617 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11618 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11619 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11620 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11621 	}
   11622 }
   11623 
   11624 /*
   11625  * wm_get_phy_id_82575:
   11626  *
   11627  * Return PHY ID. Return -1 if it failed.
   11628  */
   11629 static int
   11630 wm_get_phy_id_82575(struct wm_softc *sc)
   11631 {
   11632 	uint32_t reg;
   11633 	int phyid = -1;
   11634 
   11635 	/* XXX */
   11636 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11637 		return -1;
   11638 
   11639 	if (wm_sgmii_uses_mdio(sc)) {
   11640 		switch (sc->sc_type) {
   11641 		case WM_T_82575:
   11642 		case WM_T_82576:
   11643 			reg = CSR_READ(sc, WMREG_MDIC);
   11644 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11645 			break;
   11646 		case WM_T_82580:
   11647 		case WM_T_I350:
   11648 		case WM_T_I354:
   11649 		case WM_T_I210:
   11650 		case WM_T_I211:
   11651 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11652 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11653 			break;
   11654 		default:
   11655 			return -1;
   11656 		}
   11657 	}
   11658 
   11659 	return phyid;
   11660 }
   11661 
   11662 /*
   11663  * wm_gmii_mediainit:
   11664  *
   11665  *	Initialize media for use on 1000BASE-T devices.
   11666  */
   11667 static void
   11668 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11669 {
   11670 	device_t dev = sc->sc_dev;
   11671 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11672 	struct mii_data *mii = &sc->sc_mii;
   11673 
   11674 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11675 		device_xname(sc->sc_dev), __func__));
   11676 
   11677 	/* We have GMII. */
   11678 	sc->sc_flags |= WM_F_HAS_MII;
   11679 
   11680 	if (sc->sc_type == WM_T_80003)
   11681 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11682 	else
   11683 		sc->sc_tipg = TIPG_1000T_DFLT;
   11684 
   11685 	/*
   11686 	 * Let the chip set speed/duplex on its own based on
   11687 	 * signals from the PHY.
   11688 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11689 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11690 	 */
   11691 	sc->sc_ctrl |= CTRL_SLU;
   11692 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11693 
   11694 	/* Initialize our media structures and probe the GMII. */
   11695 	mii->mii_ifp = ifp;
   11696 
   11697 	mii->mii_statchg = wm_gmii_statchg;
   11698 
   11699 	/* get PHY control from SMBus to PCIe */
   11700 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11701 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11702 	    || (sc->sc_type == WM_T_PCH_CNP))
   11703 		wm_init_phy_workarounds_pchlan(sc);
   11704 
   11705 	wm_gmii_reset(sc);
   11706 
   11707 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11708 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11709 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11710 
   11711 	/* Setup internal SGMII PHY for SFP */
   11712 	wm_sgmii_sfp_preconfig(sc);
   11713 
   11714 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11715 	    || (sc->sc_type == WM_T_82580)
   11716 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11717 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11718 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11719 			/* Attach only one port */
   11720 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11721 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11722 		} else {
   11723 			int i, id;
   11724 			uint32_t ctrl_ext;
   11725 
   11726 			id = wm_get_phy_id_82575(sc);
   11727 			if (id != -1) {
   11728 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11729 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11730 			}
   11731 			if ((id == -1)
   11732 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11733 				/* Power on sgmii phy if it is disabled */
   11734 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11735 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11736 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11737 				CSR_WRITE_FLUSH(sc);
   11738 				delay(300*1000); /* XXX too long */
   11739 
   11740 				/*
   11741 				 * From 1 to 8.
   11742 				 *
   11743 				 * I2C access fails with I2C register's ERROR
   11744 				 * bit set, so prevent error message while
   11745 				 * scanning.
   11746 				 */
   11747 				sc->phy.no_errprint = true;
   11748 				for (i = 1; i < 8; i++)
   11749 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11750 					    0xffffffff, i, MII_OFFSET_ANY,
   11751 					    MIIF_DOPAUSE);
   11752 				sc->phy.no_errprint = false;
   11753 
   11754 				/* Restore previous sfp cage power state */
   11755 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11756 			}
   11757 		}
   11758 	} else
   11759 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11760 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11761 
   11762 	/*
   11763 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11764 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11765 	 */
   11766 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11767 		|| (sc->sc_type == WM_T_PCH_SPT)
   11768 		|| (sc->sc_type == WM_T_PCH_CNP))
   11769 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11770 		wm_set_mdio_slow_mode_hv(sc);
   11771 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11772 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11773 	}
   11774 
   11775 	/*
   11776 	 * (For ICH8 variants)
   11777 	 * If PHY detection failed, use BM's r/w function and retry.
   11778 	 */
   11779 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11780 		/* if failed, retry with *_bm_* */
   11781 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11782 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11783 		    sc->sc_phytype);
   11784 		sc->sc_phytype = WMPHY_BM;
   11785 		mii->mii_readreg = wm_gmii_bm_readreg;
   11786 		mii->mii_writereg = wm_gmii_bm_writereg;
   11787 
   11788 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11789 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11790 	}
   11791 
   11792 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11793 		/* Any PHY wasn't found */
   11794 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11795 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11796 		sc->sc_phytype = WMPHY_NONE;
   11797 	} else {
   11798 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11799 
   11800 		/*
   11801 		 * PHY found! Check PHY type again by the second call of
   11802 		 * wm_gmii_setup_phytype.
   11803 		 */
   11804 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11805 		    child->mii_mpd_model);
   11806 
   11807 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11808 	}
   11809 }
   11810 
   11811 /*
   11812  * wm_gmii_mediachange:	[ifmedia interface function]
   11813  *
   11814  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11815  */
   11816 static int
   11817 wm_gmii_mediachange(struct ifnet *ifp)
   11818 {
   11819 	struct wm_softc *sc = ifp->if_softc;
   11820 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11821 	uint32_t reg;
   11822 	int rc;
   11823 
   11824 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11825 		device_xname(sc->sc_dev), __func__));
   11826 
   11827 	KASSERT(mutex_owned(sc->sc_core_lock));
   11828 
   11829 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11830 		return 0;
   11831 
   11832 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11833 	if ((sc->sc_type == WM_T_82580)
   11834 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11835 	    || (sc->sc_type == WM_T_I211)) {
   11836 		reg = CSR_READ(sc, WMREG_PHPM);
   11837 		reg &= ~PHPM_GO_LINK_D;
   11838 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11839 	}
   11840 
   11841 	/* Disable D0 LPLU. */
   11842 	wm_lplu_d0_disable(sc);
   11843 
   11844 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11845 	sc->sc_ctrl |= CTRL_SLU;
   11846 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11847 	    || (sc->sc_type > WM_T_82543)) {
   11848 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11849 	} else {
   11850 		sc->sc_ctrl &= ~CTRL_ASDE;
   11851 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11852 		if (ife->ifm_media & IFM_FDX)
   11853 			sc->sc_ctrl |= CTRL_FD;
   11854 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11855 		case IFM_10_T:
   11856 			sc->sc_ctrl |= CTRL_SPEED_10;
   11857 			break;
   11858 		case IFM_100_TX:
   11859 			sc->sc_ctrl |= CTRL_SPEED_100;
   11860 			break;
   11861 		case IFM_1000_T:
   11862 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11863 			break;
   11864 		case IFM_NONE:
   11865 			/* There is no specific setting for IFM_NONE */
   11866 			break;
   11867 		default:
   11868 			panic("wm_gmii_mediachange: bad media 0x%x",
   11869 			    ife->ifm_media);
   11870 		}
   11871 	}
   11872 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11873 	CSR_WRITE_FLUSH(sc);
   11874 
   11875 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11876 		wm_serdes_mediachange(ifp);
   11877 
   11878 	if (sc->sc_type <= WM_T_82543)
   11879 		wm_gmii_reset(sc);
   11880 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11881 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11882 		/* allow time for SFP cage time to power up phy */
   11883 		delay(300 * 1000);
   11884 		wm_gmii_reset(sc);
   11885 	}
   11886 
   11887 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11888 		return 0;
   11889 	return rc;
   11890 }
   11891 
   11892 /*
   11893  * wm_gmii_mediastatus:	[ifmedia interface function]
   11894  *
   11895  *	Get the current interface media status on a 1000BASE-T device.
   11896  */
   11897 static void
   11898 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11899 {
   11900 	struct wm_softc *sc = ifp->if_softc;
   11901 	struct ethercom *ec = &sc->sc_ethercom;
   11902 	struct mii_data *mii;
   11903 	bool dopoll = true;
   11904 
   11905 	/*
   11906 	 * In normal drivers, ether_mediastatus() is called here.
   11907 	 * To avoid calling mii_pollstat(), ether_mediastatus() is open coded.
   11908 	 */
   11909 	KASSERT(mutex_owned(sc->sc_core_lock));
   11910 	KASSERT(ec->ec_mii != NULL);
   11911 	KASSERT(mii_locked(ec->ec_mii));
   11912 
   11913 	mii = ec->ec_mii;
   11914 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   11915 		struct timeval now;
   11916 
   11917 		getmicrotime(&now);
   11918 		if (timercmp(&now, &sc->sc_linkup_delay_time, <))
   11919 			dopoll = false;
   11920 		else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   11921 			/* Simplify by checking tv_sec only. It's enough. */
   11922 
   11923 			sc->sc_linkup_delay_time.tv_sec = 0;
   11924 			sc->sc_linkup_delay_time.tv_usec = 0;
   11925 		}
   11926 	}
   11927 
   11928 	/*
   11929 	 * Don't call mii_pollstat() while doing workaround.
   11930 	 * See also wm_linkintr_gmii() and wm_tick().
   11931 	 */
   11932 	if (dopoll)
   11933 		mii_pollstat(mii);
   11934 	ifmr->ifm_active = mii->mii_media_active;
   11935 	ifmr->ifm_status = mii->mii_media_status;
   11936 
   11937 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11938 	    | sc->sc_flowflags;
   11939 }
   11940 
   11941 #define	MDI_IO		CTRL_SWDPIN(2)
   11942 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11943 #define	MDI_CLK		CTRL_SWDPIN(3)
   11944 
   11945 static void
   11946 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11947 {
   11948 	uint32_t i, v;
   11949 
   11950 	v = CSR_READ(sc, WMREG_CTRL);
   11951 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11952 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11953 
   11954 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11955 		if (data & i)
   11956 			v |= MDI_IO;
   11957 		else
   11958 			v &= ~MDI_IO;
   11959 		CSR_WRITE(sc, WMREG_CTRL, v);
   11960 		CSR_WRITE_FLUSH(sc);
   11961 		delay(10);
   11962 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11963 		CSR_WRITE_FLUSH(sc);
   11964 		delay(10);
   11965 		CSR_WRITE(sc, WMREG_CTRL, v);
   11966 		CSR_WRITE_FLUSH(sc);
   11967 		delay(10);
   11968 	}
   11969 }
   11970 
   11971 static uint16_t
   11972 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11973 {
   11974 	uint32_t v, i;
   11975 	uint16_t data = 0;
   11976 
   11977 	v = CSR_READ(sc, WMREG_CTRL);
   11978 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11979 	v |= CTRL_SWDPIO(3);
   11980 
   11981 	CSR_WRITE(sc, WMREG_CTRL, v);
   11982 	CSR_WRITE_FLUSH(sc);
   11983 	delay(10);
   11984 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11985 	CSR_WRITE_FLUSH(sc);
   11986 	delay(10);
   11987 	CSR_WRITE(sc, WMREG_CTRL, v);
   11988 	CSR_WRITE_FLUSH(sc);
   11989 	delay(10);
   11990 
   11991 	for (i = 0; i < 16; i++) {
   11992 		data <<= 1;
   11993 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11994 		CSR_WRITE_FLUSH(sc);
   11995 		delay(10);
   11996 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11997 			data |= 1;
   11998 		CSR_WRITE(sc, WMREG_CTRL, v);
   11999 		CSR_WRITE_FLUSH(sc);
   12000 		delay(10);
   12001 	}
   12002 
   12003 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12004 	CSR_WRITE_FLUSH(sc);
   12005 	delay(10);
   12006 	CSR_WRITE(sc, WMREG_CTRL, v);
   12007 	CSR_WRITE_FLUSH(sc);
   12008 	delay(10);
   12009 
   12010 	return data;
   12011 }
   12012 
   12013 #undef MDI_IO
   12014 #undef MDI_DIR
   12015 #undef MDI_CLK
   12016 
   12017 /*
   12018  * wm_gmii_i82543_readreg:	[mii interface function]
   12019  *
   12020  *	Read a PHY register on the GMII (i82543 version).
   12021  */
   12022 static int
   12023 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12024 {
   12025 	struct wm_softc *sc = device_private(dev);
   12026 
   12027 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   12028 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   12029 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   12030 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   12031 
   12032 	DPRINTF(sc, WM_DEBUG_GMII,
   12033 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   12034 		device_xname(dev), phy, reg, *val));
   12035 
   12036 	return 0;
   12037 }
   12038 
   12039 /*
   12040  * wm_gmii_i82543_writereg:	[mii interface function]
   12041  *
   12042  *	Write a PHY register on the GMII (i82543 version).
   12043  */
   12044 static int
   12045 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   12046 {
   12047 	struct wm_softc *sc = device_private(dev);
   12048 
   12049 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   12050 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   12051 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   12052 	    (MII_COMMAND_START << 30), 32);
   12053 
   12054 	return 0;
   12055 }
   12056 
   12057 /*
   12058  * wm_gmii_mdic_readreg:	[mii interface function]
   12059  *
   12060  *	Read a PHY register on the GMII.
   12061  */
   12062 static int
   12063 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12064 {
   12065 	struct wm_softc *sc = device_private(dev);
   12066 	uint32_t mdic = 0;
   12067 	int i;
   12068 
   12069 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12070 	    && (reg > MII_ADDRMASK)) {
   12071 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12072 		    __func__, sc->sc_phytype, reg);
   12073 		reg &= MII_ADDRMASK;
   12074 	}
   12075 
   12076 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   12077 	    MDIC_REGADD(reg));
   12078 
   12079 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12080 		delay(50);
   12081 		mdic = CSR_READ(sc, WMREG_MDIC);
   12082 		if (mdic & MDIC_READY)
   12083 			break;
   12084 	}
   12085 
   12086 	if ((mdic & MDIC_READY) == 0) {
   12087 		DPRINTF(sc, WM_DEBUG_GMII,
   12088 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   12089 			device_xname(dev), phy, reg));
   12090 		return ETIMEDOUT;
   12091 	} else if (mdic & MDIC_E) {
   12092 		/* This is normal if no PHY is present. */
   12093 		DPRINTF(sc, WM_DEBUG_GMII,
   12094 		    ("%s: MDIC read error: phy %d reg %d\n",
   12095 			device_xname(sc->sc_dev), phy, reg));
   12096 		return -1;
   12097 	} else
   12098 		*val = MDIC_DATA(mdic);
   12099 
   12100 	/*
   12101 	 * Allow some time after each MDIC transaction to avoid
   12102 	 * reading duplicate data in the next MDIC transaction.
   12103 	 */
   12104 	if (sc->sc_type == WM_T_PCH2)
   12105 		delay(100);
   12106 
   12107 	return 0;
   12108 }
   12109 
   12110 /*
   12111  * wm_gmii_mdic_writereg:	[mii interface function]
   12112  *
   12113  *	Write a PHY register on the GMII.
   12114  */
   12115 static int
   12116 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   12117 {
   12118 	struct wm_softc *sc = device_private(dev);
   12119 	uint32_t mdic = 0;
   12120 	int i;
   12121 
   12122 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12123 	    && (reg > MII_ADDRMASK)) {
   12124 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12125 		    __func__, sc->sc_phytype, reg);
   12126 		reg &= MII_ADDRMASK;
   12127 	}
   12128 
   12129 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   12130 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   12131 
   12132 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12133 		delay(50);
   12134 		mdic = CSR_READ(sc, WMREG_MDIC);
   12135 		if (mdic & MDIC_READY)
   12136 			break;
   12137 	}
   12138 
   12139 	if ((mdic & MDIC_READY) == 0) {
   12140 		DPRINTF(sc, WM_DEBUG_GMII,
   12141 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   12142 			device_xname(dev), phy, reg));
   12143 		return ETIMEDOUT;
   12144 	} else if (mdic & MDIC_E) {
   12145 		DPRINTF(sc, WM_DEBUG_GMII,
   12146 		    ("%s: MDIC write error: phy %d reg %d\n",
   12147 			device_xname(dev), phy, reg));
   12148 		return -1;
   12149 	}
   12150 
   12151 	/*
   12152 	 * Allow some time after each MDIC transaction to avoid
   12153 	 * reading duplicate data in the next MDIC transaction.
   12154 	 */
   12155 	if (sc->sc_type == WM_T_PCH2)
   12156 		delay(100);
   12157 
   12158 	return 0;
   12159 }
   12160 
   12161 /*
   12162  * wm_gmii_i82544_readreg:	[mii interface function]
   12163  *
   12164  *	Read a PHY register on the GMII.
   12165  */
   12166 static int
   12167 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12168 {
   12169 	struct wm_softc *sc = device_private(dev);
   12170 	int rv;
   12171 
   12172 	rv = sc->phy.acquire(sc);
   12173 	if (rv != 0) {
   12174 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12175 		return rv;
   12176 	}
   12177 
   12178 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   12179 
   12180 	sc->phy.release(sc);
   12181 
   12182 	return rv;
   12183 }
   12184 
   12185 static int
   12186 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12187 {
   12188 	struct wm_softc *sc = device_private(dev);
   12189 	int rv;
   12190 
   12191 	switch (sc->sc_phytype) {
   12192 	case WMPHY_IGP:
   12193 	case WMPHY_IGP_2:
   12194 	case WMPHY_IGP_3:
   12195 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12196 			rv = wm_gmii_mdic_writereg(dev, phy,
   12197 			    IGPHY_PAGE_SELECT, reg);
   12198 			if (rv != 0)
   12199 				return rv;
   12200 		}
   12201 		break;
   12202 	default:
   12203 #ifdef WM_DEBUG
   12204 		if ((reg >> MII_ADDRBITS) != 0)
   12205 			device_printf(dev,
   12206 			    "%s: PHYTYPE = 0x%x, addr = 0x%02x\n",
   12207 			    __func__, sc->sc_phytype, reg);
   12208 #endif
   12209 		break;
   12210 	}
   12211 
   12212 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12213 }
   12214 
   12215 /*
   12216  * wm_gmii_i82544_writereg:	[mii interface function]
   12217  *
   12218  *	Write a PHY register on the GMII.
   12219  */
   12220 static int
   12221 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   12222 {
   12223 	struct wm_softc *sc = device_private(dev);
   12224 	int rv;
   12225 
   12226 	rv = sc->phy.acquire(sc);
   12227 	if (rv != 0) {
   12228 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12229 		return rv;
   12230 	}
   12231 
   12232 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   12233 	sc->phy.release(sc);
   12234 
   12235 	return rv;
   12236 }
   12237 
   12238 static int
   12239 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12240 {
   12241 	struct wm_softc *sc = device_private(dev);
   12242 	int rv;
   12243 
   12244 	switch (sc->sc_phytype) {
   12245 	case WMPHY_IGP:
   12246 	case WMPHY_IGP_2:
   12247 	case WMPHY_IGP_3:
   12248 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12249 			rv = wm_gmii_mdic_writereg(dev, phy,
   12250 			    IGPHY_PAGE_SELECT, reg);
   12251 			if (rv != 0)
   12252 				return rv;
   12253 		}
   12254 		break;
   12255 	default:
   12256 #ifdef WM_DEBUG
   12257 		if ((reg >> MII_ADDRBITS) != 0)
   12258 			device_printf(dev,
   12259 			    "%s: PHYTYPE == 0x%x, addr = 0x%02x",
   12260 			    __func__, sc->sc_phytype, reg);
   12261 #endif
   12262 		break;
   12263 	}
   12264 
   12265 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12266 }
   12267 
   12268 /*
   12269  * wm_gmii_i80003_readreg:	[mii interface function]
   12270  *
   12271  *	Read a PHY register on the kumeran
   12272  * This could be handled by the PHY layer if we didn't have to lock the
   12273  * resource ...
   12274  */
   12275 static int
   12276 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12277 {
   12278 	struct wm_softc *sc = device_private(dev);
   12279 	int page_select;
   12280 	uint16_t temp, temp2;
   12281 	int rv;
   12282 
   12283 	if (phy != 1) /* Only one PHY on kumeran bus */
   12284 		return -1;
   12285 
   12286 	rv = sc->phy.acquire(sc);
   12287 	if (rv != 0) {
   12288 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12289 		return rv;
   12290 	}
   12291 
   12292 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12293 		page_select = GG82563_PHY_PAGE_SELECT;
   12294 	else {
   12295 		/*
   12296 		 * Use Alternative Page Select register to access registers
   12297 		 * 30 and 31.
   12298 		 */
   12299 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12300 	}
   12301 	temp = reg >> GG82563_PAGE_SHIFT;
   12302 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12303 		goto out;
   12304 
   12305 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12306 		/*
   12307 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12308 		 * register.
   12309 		 */
   12310 		delay(200);
   12311 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12312 		if ((rv != 0) || (temp2 != temp)) {
   12313 			device_printf(dev, "%s failed\n", __func__);
   12314 			rv = -1;
   12315 			goto out;
   12316 		}
   12317 		delay(200);
   12318 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12319 		delay(200);
   12320 	} else
   12321 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12322 
   12323 out:
   12324 	sc->phy.release(sc);
   12325 	return rv;
   12326 }
   12327 
   12328 /*
   12329  * wm_gmii_i80003_writereg:	[mii interface function]
   12330  *
   12331  *	Write a PHY register on the kumeran.
   12332  * This could be handled by the PHY layer if we didn't have to lock the
   12333  * resource ...
   12334  */
   12335 static int
   12336 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   12337 {
   12338 	struct wm_softc *sc = device_private(dev);
   12339 	int page_select, rv;
   12340 	uint16_t temp, temp2;
   12341 
   12342 	if (phy != 1) /* Only one PHY on kumeran bus */
   12343 		return -1;
   12344 
   12345 	rv = sc->phy.acquire(sc);
   12346 	if (rv != 0) {
   12347 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12348 		return rv;
   12349 	}
   12350 
   12351 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12352 		page_select = GG82563_PHY_PAGE_SELECT;
   12353 	else {
   12354 		/*
   12355 		 * Use Alternative Page Select register to access registers
   12356 		 * 30 and 31.
   12357 		 */
   12358 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12359 	}
   12360 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   12361 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12362 		goto out;
   12363 
   12364 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12365 		/*
   12366 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12367 		 * register.
   12368 		 */
   12369 		delay(200);
   12370 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12371 		if ((rv != 0) || (temp2 != temp)) {
   12372 			device_printf(dev, "%s failed\n", __func__);
   12373 			rv = -1;
   12374 			goto out;
   12375 		}
   12376 		delay(200);
   12377 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12378 		delay(200);
   12379 	} else
   12380 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12381 
   12382 out:
   12383 	sc->phy.release(sc);
   12384 	return rv;
   12385 }
   12386 
   12387 /*
   12388  * wm_gmii_bm_readreg:	[mii interface function]
   12389  *
   12390  *	Read a PHY register on the kumeran
   12391  * This could be handled by the PHY layer if we didn't have to lock the
   12392  * resource ...
   12393  */
   12394 static int
   12395 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12396 {
   12397 	struct wm_softc *sc = device_private(dev);
   12398 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12399 	int rv;
   12400 
   12401 	rv = sc->phy.acquire(sc);
   12402 	if (rv != 0) {
   12403 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12404 		return rv;
   12405 	}
   12406 
   12407 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12408 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12409 		    || (reg == 31)) ? 1 : phy;
   12410 	/* Page 800 works differently than the rest so it has its own func */
   12411 	if (page == BM_WUC_PAGE) {
   12412 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12413 		goto release;
   12414 	}
   12415 
   12416 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12417 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12418 		    && (sc->sc_type != WM_T_82583))
   12419 			rv = wm_gmii_mdic_writereg(dev, phy,
   12420 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12421 		else
   12422 			rv = wm_gmii_mdic_writereg(dev, phy,
   12423 			    BME1000_PHY_PAGE_SELECT, page);
   12424 		if (rv != 0)
   12425 			goto release;
   12426 	}
   12427 
   12428 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12429 
   12430 release:
   12431 	sc->phy.release(sc);
   12432 	return rv;
   12433 }
   12434 
   12435 /*
   12436  * wm_gmii_bm_writereg:	[mii interface function]
   12437  *
   12438  *	Write a PHY register on the kumeran.
   12439  * This could be handled by the PHY layer if we didn't have to lock the
   12440  * resource ...
   12441  */
   12442 static int
   12443 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   12444 {
   12445 	struct wm_softc *sc = device_private(dev);
   12446 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12447 	int rv;
   12448 
   12449 	rv = sc->phy.acquire(sc);
   12450 	if (rv != 0) {
   12451 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12452 		return rv;
   12453 	}
   12454 
   12455 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12456 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12457 		    || (reg == 31)) ? 1 : phy;
   12458 	/* Page 800 works differently than the rest so it has its own func */
   12459 	if (page == BM_WUC_PAGE) {
   12460 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   12461 		goto release;
   12462 	}
   12463 
   12464 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12465 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12466 		    && (sc->sc_type != WM_T_82583))
   12467 			rv = wm_gmii_mdic_writereg(dev, phy,
   12468 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12469 		else
   12470 			rv = wm_gmii_mdic_writereg(dev, phy,
   12471 			    BME1000_PHY_PAGE_SELECT, page);
   12472 		if (rv != 0)
   12473 			goto release;
   12474 	}
   12475 
   12476 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12477 
   12478 release:
   12479 	sc->phy.release(sc);
   12480 	return rv;
   12481 }
   12482 
   12483 /*
   12484  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   12485  *  @dev: pointer to the HW structure
   12486  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   12487  *
   12488  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   12489  *  address to store contents of the BM_WUC_ENABLE_REG register.
   12490  */
   12491 static int
   12492 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12493 {
   12494 #ifdef WM_DEBUG
   12495 	struct wm_softc *sc = device_private(dev);
   12496 #endif
   12497 	uint16_t temp;
   12498 	int rv;
   12499 
   12500 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12501 		device_xname(dev), __func__));
   12502 
   12503 	if (!phy_regp)
   12504 		return -1;
   12505 
   12506 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12507 
   12508 	/* Select Port Control Registers page */
   12509 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12510 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12511 	if (rv != 0)
   12512 		return rv;
   12513 
   12514 	/* Read WUCE and save it */
   12515 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12516 	if (rv != 0)
   12517 		return rv;
   12518 
   12519 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12520 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12521 	 */
   12522 	temp = *phy_regp;
   12523 	temp |= BM_WUC_ENABLE_BIT;
   12524 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12525 
   12526 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12527 		return rv;
   12528 
   12529 	/* Select Host Wakeup Registers page - caller now able to write
   12530 	 * registers on the Wakeup registers page
   12531 	 */
   12532 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12533 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12534 }
   12535 
   12536 /*
   12537  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12538  *  @dev: pointer to the HW structure
   12539  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12540  *
   12541  *  Restore BM_WUC_ENABLE_REG to its original value.
   12542  *
   12543  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12544  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12545  *  caller.
   12546  */
   12547 static int
   12548 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12549 {
   12550 #ifdef WM_DEBUG
   12551 	struct wm_softc *sc = device_private(dev);
   12552 #endif
   12553 
   12554 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12555 		device_xname(dev), __func__));
   12556 
   12557 	if (!phy_regp)
   12558 		return -1;
   12559 
   12560 	/* Select Port Control Registers page */
   12561 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12562 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12563 
   12564 	/* Restore 769.17 to its original value */
   12565 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12566 
   12567 	return 0;
   12568 }
   12569 
   12570 /*
   12571  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12572  *  @sc: pointer to the HW structure
   12573  *  @offset: register offset to be read or written
   12574  *  @val: pointer to the data to read or write
   12575  *  @rd: determines if operation is read or write
   12576  *  @page_set: BM_WUC_PAGE already set and access enabled
   12577  *
   12578  *  Read the PHY register at offset and store the retrieved information in
   12579  *  data, or write data to PHY register at offset.  Note the procedure to
   12580  *  access the PHY wakeup registers is different than reading the other PHY
   12581  *  registers. It works as such:
   12582  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12583  *  2) Set page to 800 for host (801 if we were manageability)
   12584  *  3) Write the address using the address opcode (0x11)
   12585  *  4) Read or write the data using the data opcode (0x12)
   12586  *  5) Restore 769.17.2 to its original value
   12587  *
   12588  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12589  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12590  *
   12591  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12592  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12593  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12594  */
   12595 static int
   12596 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12597     bool page_set)
   12598 {
   12599 	struct wm_softc *sc = device_private(dev);
   12600 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12601 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12602 	uint16_t wuce;
   12603 	int rv = 0;
   12604 
   12605 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12606 		device_xname(dev), __func__));
   12607 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12608 	if ((sc->sc_type == WM_T_PCH)
   12609 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12610 		device_printf(dev,
   12611 		    "Attempting to access page %d while gig enabled.\n", page);
   12612 	}
   12613 
   12614 	if (!page_set) {
   12615 		/* Enable access to PHY wakeup registers */
   12616 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12617 		if (rv != 0) {
   12618 			device_printf(dev,
   12619 			    "%s: Could not enable PHY wakeup reg access\n",
   12620 			    __func__);
   12621 			return rv;
   12622 		}
   12623 	}
   12624 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12625 		device_xname(sc->sc_dev), __func__, page, regnum));
   12626 
   12627 	/*
   12628 	 * 2) Access PHY wakeup register.
   12629 	 * See wm_access_phy_wakeup_reg_bm.
   12630 	 */
   12631 
   12632 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12633 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12634 	if (rv != 0)
   12635 		return rv;
   12636 
   12637 	if (rd) {
   12638 		/* Read the Wakeup register page value using opcode 0x12 */
   12639 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12640 	} else {
   12641 		/* Write the Wakeup register page value using opcode 0x12 */
   12642 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12643 	}
   12644 	if (rv != 0)
   12645 		return rv;
   12646 
   12647 	if (!page_set)
   12648 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12649 
   12650 	return rv;
   12651 }
   12652 
   12653 /*
   12654  * wm_gmii_hv_readreg:	[mii interface function]
   12655  *
   12656  *	Read a PHY register on the kumeran
   12657  * This could be handled by the PHY layer if we didn't have to lock the
   12658  * resource ...
   12659  */
   12660 static int
   12661 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12662 {
   12663 	struct wm_softc *sc = device_private(dev);
   12664 	int rv;
   12665 
   12666 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12667 		device_xname(dev), __func__));
   12668 
   12669 	rv = sc->phy.acquire(sc);
   12670 	if (rv != 0) {
   12671 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12672 		return rv;
   12673 	}
   12674 
   12675 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12676 	sc->phy.release(sc);
   12677 	return rv;
   12678 }
   12679 
   12680 static int
   12681 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12682 {
   12683 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12684 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12685 	int rv;
   12686 
   12687 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12688 
   12689 	/* Page 800 works differently than the rest so it has its own func */
   12690 	if (page == BM_WUC_PAGE)
   12691 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12692 
   12693 	/*
   12694 	 * Lower than page 768 works differently than the rest so it has its
   12695 	 * own func
   12696 	 */
   12697 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12698 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12699 		return -1;
   12700 	}
   12701 
   12702 	/*
   12703 	 * XXX I21[789] documents say that the SMBus Address register is at
   12704 	 * PHY address 01, Page 0 (not 768), Register 26.
   12705 	 */
   12706 	if (page == HV_INTC_FC_PAGE_START)
   12707 		page = 0;
   12708 
   12709 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12710 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12711 		    page << BME1000_PAGE_SHIFT);
   12712 		if (rv != 0)
   12713 			return rv;
   12714 	}
   12715 
   12716 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12717 }
   12718 
   12719 /*
   12720  * wm_gmii_hv_writereg:	[mii interface function]
   12721  *
   12722  *	Write a PHY register on the kumeran.
   12723  * This could be handled by the PHY layer if we didn't have to lock the
   12724  * resource ...
   12725  */
   12726 static int
   12727 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12728 {
   12729 	struct wm_softc *sc = device_private(dev);
   12730 	int rv;
   12731 
   12732 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12733 		device_xname(dev), __func__));
   12734 
   12735 	rv = sc->phy.acquire(sc);
   12736 	if (rv != 0) {
   12737 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12738 		return rv;
   12739 	}
   12740 
   12741 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12742 	sc->phy.release(sc);
   12743 
   12744 	return rv;
   12745 }
   12746 
   12747 static int
   12748 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12749 {
   12750 	struct wm_softc *sc = device_private(dev);
   12751 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12752 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12753 	int rv;
   12754 
   12755 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12756 
   12757 	/* Page 800 works differently than the rest so it has its own func */
   12758 	if (page == BM_WUC_PAGE)
   12759 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12760 		    false);
   12761 
   12762 	/*
   12763 	 * Lower than page 768 works differently than the rest so it has its
   12764 	 * own func
   12765 	 */
   12766 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12767 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12768 		return -1;
   12769 	}
   12770 
   12771 	{
   12772 		/*
   12773 		 * XXX I21[789] documents say that the SMBus Address register
   12774 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12775 		 */
   12776 		if (page == HV_INTC_FC_PAGE_START)
   12777 			page = 0;
   12778 
   12779 		/*
   12780 		 * XXX Workaround MDIO accesses being disabled after entering
   12781 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12782 		 * register is set)
   12783 		 */
   12784 		if (sc->sc_phytype == WMPHY_82578) {
   12785 			struct mii_softc *child;
   12786 
   12787 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12788 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12789 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12790 			    && ((val & (1 << 11)) != 0)) {
   12791 				device_printf(dev, "XXX need workaround\n");
   12792 			}
   12793 		}
   12794 
   12795 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12796 			rv = wm_gmii_mdic_writereg(dev, 1,
   12797 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12798 			if (rv != 0)
   12799 				return rv;
   12800 		}
   12801 	}
   12802 
   12803 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12804 }
   12805 
   12806 /*
   12807  * wm_gmii_82580_readreg:	[mii interface function]
   12808  *
   12809  *	Read a PHY register on the 82580 and I350.
   12810  * This could be handled by the PHY layer if we didn't have to lock the
   12811  * resource ...
   12812  */
   12813 static int
   12814 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12815 {
   12816 	struct wm_softc *sc = device_private(dev);
   12817 	int rv;
   12818 
   12819 	rv = sc->phy.acquire(sc);
   12820 	if (rv != 0) {
   12821 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12822 		return rv;
   12823 	}
   12824 
   12825 #ifdef DIAGNOSTIC
   12826 	if (reg > MII_ADDRMASK) {
   12827 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12828 		    __func__, sc->sc_phytype, reg);
   12829 		reg &= MII_ADDRMASK;
   12830 	}
   12831 #endif
   12832 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12833 
   12834 	sc->phy.release(sc);
   12835 	return rv;
   12836 }
   12837 
   12838 /*
   12839  * wm_gmii_82580_writereg:	[mii interface function]
   12840  *
   12841  *	Write a PHY register on the 82580 and I350.
   12842  * This could be handled by the PHY layer if we didn't have to lock the
   12843  * resource ...
   12844  */
   12845 static int
   12846 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12847 {
   12848 	struct wm_softc *sc = device_private(dev);
   12849 	int rv;
   12850 
   12851 	rv = sc->phy.acquire(sc);
   12852 	if (rv != 0) {
   12853 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12854 		return rv;
   12855 	}
   12856 
   12857 #ifdef DIAGNOSTIC
   12858 	if (reg > MII_ADDRMASK) {
   12859 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12860 		    __func__, sc->sc_phytype, reg);
   12861 		reg &= MII_ADDRMASK;
   12862 	}
   12863 #endif
   12864 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12865 
   12866 	sc->phy.release(sc);
   12867 	return rv;
   12868 }
   12869 
   12870 /*
   12871  * wm_gmii_gs40g_readreg:	[mii interface function]
   12872  *
   12873  *	Read a PHY register on the I2100 and I211.
   12874  * This could be handled by the PHY layer if we didn't have to lock the
   12875  * resource ...
   12876  */
   12877 static int
   12878 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12879 {
   12880 	struct wm_softc *sc = device_private(dev);
   12881 	int page, offset;
   12882 	int rv;
   12883 
   12884 	/* Acquire semaphore */
   12885 	rv = sc->phy.acquire(sc);
   12886 	if (rv != 0) {
   12887 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12888 		return rv;
   12889 	}
   12890 
   12891 	/* Page select */
   12892 	page = reg >> GS40G_PAGE_SHIFT;
   12893 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12894 	if (rv != 0)
   12895 		goto release;
   12896 
   12897 	/* Read reg */
   12898 	offset = reg & GS40G_OFFSET_MASK;
   12899 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12900 
   12901 release:
   12902 	sc->phy.release(sc);
   12903 	return rv;
   12904 }
   12905 
   12906 /*
   12907  * wm_gmii_gs40g_writereg:	[mii interface function]
   12908  *
   12909  *	Write a PHY register on the I210 and I211.
   12910  * This could be handled by the PHY layer if we didn't have to lock the
   12911  * resource ...
   12912  */
   12913 static int
   12914 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12915 {
   12916 	struct wm_softc *sc = device_private(dev);
   12917 	uint16_t page;
   12918 	int offset, rv;
   12919 
   12920 	/* Acquire semaphore */
   12921 	rv = sc->phy.acquire(sc);
   12922 	if (rv != 0) {
   12923 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12924 		return rv;
   12925 	}
   12926 
   12927 	/* Page select */
   12928 	page = reg >> GS40G_PAGE_SHIFT;
   12929 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12930 	if (rv != 0)
   12931 		goto release;
   12932 
   12933 	/* Write reg */
   12934 	offset = reg & GS40G_OFFSET_MASK;
   12935 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12936 
   12937 release:
   12938 	/* Release semaphore */
   12939 	sc->phy.release(sc);
   12940 	return rv;
   12941 }
   12942 
   12943 /*
   12944  * wm_gmii_statchg:	[mii interface function]
   12945  *
   12946  *	Callback from MII layer when media changes.
   12947  */
   12948 static void
   12949 wm_gmii_statchg(struct ifnet *ifp)
   12950 {
   12951 	struct wm_softc *sc = ifp->if_softc;
   12952 	struct mii_data *mii = &sc->sc_mii;
   12953 
   12954 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12955 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12956 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12957 
   12958 	/* Get flow control negotiation result. */
   12959 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12960 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12961 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12962 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12963 	}
   12964 
   12965 	if (sc->sc_flowflags & IFM_FLOW) {
   12966 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12967 			sc->sc_ctrl |= CTRL_TFCE;
   12968 			sc->sc_fcrtl |= FCRTL_XONE;
   12969 		}
   12970 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12971 			sc->sc_ctrl |= CTRL_RFCE;
   12972 	}
   12973 
   12974 	if (mii->mii_media_active & IFM_FDX) {
   12975 		DPRINTF(sc, WM_DEBUG_LINK,
   12976 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12977 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12978 	} else {
   12979 		DPRINTF(sc, WM_DEBUG_LINK,
   12980 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12981 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12982 	}
   12983 
   12984 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12985 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12986 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12987 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12988 	if (sc->sc_type == WM_T_80003) {
   12989 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12990 		case IFM_1000_T:
   12991 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12992 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12993 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12994 			break;
   12995 		default:
   12996 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12997 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12998 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12999 			break;
   13000 		}
   13001 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   13002 	}
   13003 }
   13004 
   13005 /* kumeran related (80003, ICH* and PCH*) */
   13006 
   13007 /*
   13008  * wm_kmrn_readreg:
   13009  *
   13010  *	Read a kumeran register
   13011  */
   13012 static int
   13013 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   13014 {
   13015 	int rv;
   13016 
   13017 	if (sc->sc_type == WM_T_80003)
   13018 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13019 	else
   13020 		rv = sc->phy.acquire(sc);
   13021 	if (rv != 0) {
   13022 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   13023 		    __func__);
   13024 		return rv;
   13025 	}
   13026 
   13027 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   13028 
   13029 	if (sc->sc_type == WM_T_80003)
   13030 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13031 	else
   13032 		sc->phy.release(sc);
   13033 
   13034 	return rv;
   13035 }
   13036 
   13037 static int
   13038 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   13039 {
   13040 
   13041 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   13042 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   13043 	    KUMCTRLSTA_REN);
   13044 	CSR_WRITE_FLUSH(sc);
   13045 	delay(2);
   13046 
   13047 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   13048 
   13049 	return 0;
   13050 }
   13051 
   13052 /*
   13053  * wm_kmrn_writereg:
   13054  *
   13055  *	Write a kumeran register
   13056  */
   13057 static int
   13058 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   13059 {
   13060 	int rv;
   13061 
   13062 	if (sc->sc_type == WM_T_80003)
   13063 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13064 	else
   13065 		rv = sc->phy.acquire(sc);
   13066 	if (rv != 0) {
   13067 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   13068 		    __func__);
   13069 		return rv;
   13070 	}
   13071 
   13072 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   13073 
   13074 	if (sc->sc_type == WM_T_80003)
   13075 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13076 	else
   13077 		sc->phy.release(sc);
   13078 
   13079 	return rv;
   13080 }
   13081 
   13082 static int
   13083 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   13084 {
   13085 
   13086 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   13087 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   13088 
   13089 	return 0;
   13090 }
   13091 
   13092 /*
   13093  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   13094  * This access method is different from IEEE MMD.
   13095  */
   13096 static int
   13097 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   13098 {
   13099 	struct wm_softc *sc = device_private(dev);
   13100 	int rv;
   13101 
   13102 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   13103 	if (rv != 0)
   13104 		return rv;
   13105 
   13106 	if (rd)
   13107 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   13108 	else
   13109 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   13110 	return rv;
   13111 }
   13112 
   13113 static int
   13114 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   13115 {
   13116 
   13117 	return wm_access_emi_reg_locked(dev, reg, val, true);
   13118 }
   13119 
   13120 static int
   13121 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   13122 {
   13123 
   13124 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   13125 }
   13126 
   13127 /* SGMII related */
   13128 
   13129 /*
   13130  * wm_sgmii_uses_mdio
   13131  *
   13132  * Check whether the transaction is to the internal PHY or the external
   13133  * MDIO interface. Return true if it's MDIO.
   13134  */
   13135 static bool
   13136 wm_sgmii_uses_mdio(struct wm_softc *sc)
   13137 {
   13138 	uint32_t reg;
   13139 	bool ismdio = false;
   13140 
   13141 	switch (sc->sc_type) {
   13142 	case WM_T_82575:
   13143 	case WM_T_82576:
   13144 		reg = CSR_READ(sc, WMREG_MDIC);
   13145 		ismdio = ((reg & MDIC_DEST) != 0);
   13146 		break;
   13147 	case WM_T_82580:
   13148 	case WM_T_I350:
   13149 	case WM_T_I354:
   13150 	case WM_T_I210:
   13151 	case WM_T_I211:
   13152 		reg = CSR_READ(sc, WMREG_MDICNFG);
   13153 		ismdio = ((reg & MDICNFG_DEST) != 0);
   13154 		break;
   13155 	default:
   13156 		break;
   13157 	}
   13158 
   13159 	return ismdio;
   13160 }
   13161 
   13162 /* Setup internal SGMII PHY for SFP */
   13163 static void
   13164 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   13165 {
   13166 	uint16_t id1, id2, phyreg;
   13167 	int i, rv;
   13168 
   13169 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   13170 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   13171 		return;
   13172 
   13173 	for (i = 0; i < MII_NPHY; i++) {
   13174 		sc->phy.no_errprint = true;
   13175 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   13176 		if (rv != 0)
   13177 			continue;
   13178 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   13179 		if (rv != 0)
   13180 			continue;
   13181 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   13182 			continue;
   13183 		sc->phy.no_errprint = false;
   13184 
   13185 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   13186 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   13187 		phyreg |= ESSR_SGMII_WOC_COPPER;
   13188 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   13189 		break;
   13190 	}
   13191 
   13192 }
   13193 
   13194 /*
   13195  * wm_sgmii_readreg:	[mii interface function]
   13196  *
   13197  *	Read a PHY register on the SGMII
   13198  * This could be handled by the PHY layer if we didn't have to lock the
   13199  * resource ...
   13200  */
   13201 static int
   13202 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   13203 {
   13204 	struct wm_softc *sc = device_private(dev);
   13205 	int rv;
   13206 
   13207 	rv = sc->phy.acquire(sc);
   13208 	if (rv != 0) {
   13209 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13210 		return rv;
   13211 	}
   13212 
   13213 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   13214 
   13215 	sc->phy.release(sc);
   13216 	return rv;
   13217 }
   13218 
   13219 static int
   13220 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   13221 {
   13222 	struct wm_softc *sc = device_private(dev);
   13223 	uint32_t i2ccmd;
   13224 	int i, rv = 0;
   13225 
   13226 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13227 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13228 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13229 
   13230 	/* Poll the ready bit */
   13231 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13232 		delay(50);
   13233 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13234 		if (i2ccmd & I2CCMD_READY)
   13235 			break;
   13236 	}
   13237 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13238 		device_printf(dev, "I2CCMD Read did not complete\n");
   13239 		rv = ETIMEDOUT;
   13240 	}
   13241 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13242 		if (!sc->phy.no_errprint)
   13243 			device_printf(dev, "I2CCMD Error bit set\n");
   13244 		rv = EIO;
   13245 	}
   13246 
   13247 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   13248 
   13249 	return rv;
   13250 }
   13251 
   13252 /*
   13253  * wm_sgmii_writereg:	[mii interface function]
   13254  *
   13255  *	Write a PHY register on the SGMII.
   13256  * This could be handled by the PHY layer if we didn't have to lock the
   13257  * resource ...
   13258  */
   13259 static int
   13260 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   13261 {
   13262 	struct wm_softc *sc = device_private(dev);
   13263 	int rv;
   13264 
   13265 	rv = sc->phy.acquire(sc);
   13266 	if (rv != 0) {
   13267 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13268 		return rv;
   13269 	}
   13270 
   13271 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   13272 
   13273 	sc->phy.release(sc);
   13274 
   13275 	return rv;
   13276 }
   13277 
   13278 static int
   13279 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   13280 {
   13281 	struct wm_softc *sc = device_private(dev);
   13282 	uint32_t i2ccmd;
   13283 	uint16_t swapdata;
   13284 	int rv = 0;
   13285 	int i;
   13286 
   13287 	/* Swap the data bytes for the I2C interface */
   13288 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   13289 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13290 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   13291 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13292 
   13293 	/* Poll the ready bit */
   13294 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13295 		delay(50);
   13296 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13297 		if (i2ccmd & I2CCMD_READY)
   13298 			break;
   13299 	}
   13300 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13301 		device_printf(dev, "I2CCMD Write did not complete\n");
   13302 		rv = ETIMEDOUT;
   13303 	}
   13304 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13305 		device_printf(dev, "I2CCMD Error bit set\n");
   13306 		rv = EIO;
   13307 	}
   13308 
   13309 	return rv;
   13310 }
   13311 
   13312 /* TBI related */
   13313 
   13314 static bool
   13315 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   13316 {
   13317 	bool sig;
   13318 
   13319 	sig = ctrl & CTRL_SWDPIN(1);
   13320 
   13321 	/*
   13322 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   13323 	 * detect a signal, 1 if they don't.
   13324 	 */
   13325 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   13326 		sig = !sig;
   13327 
   13328 	return sig;
   13329 }
   13330 
   13331 /*
   13332  * wm_tbi_mediainit:
   13333  *
   13334  *	Initialize media for use on 1000BASE-X devices.
   13335  */
   13336 static void
   13337 wm_tbi_mediainit(struct wm_softc *sc)
   13338 {
   13339 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13340 	const char *sep = "";
   13341 
   13342 	if (sc->sc_type < WM_T_82543)
   13343 		sc->sc_tipg = TIPG_WM_DFLT;
   13344 	else
   13345 		sc->sc_tipg = TIPG_LG_DFLT;
   13346 
   13347 	sc->sc_tbi_serdes_anegticks = 5;
   13348 
   13349 	/* Initialize our media structures */
   13350 	sc->sc_mii.mii_ifp = ifp;
   13351 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   13352 
   13353 	ifp->if_baudrate = IF_Gbps(1);
   13354 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   13355 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13356 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13357 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   13358 		    sc->sc_core_lock);
   13359 	} else {
   13360 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13361 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   13362 	}
   13363 
   13364 	/*
   13365 	 * SWD Pins:
   13366 	 *
   13367 	 *	0 = Link LED (output)
   13368 	 *	1 = Loss Of Signal (input)
   13369 	 */
   13370 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   13371 
   13372 	/* XXX Perhaps this is only for TBI */
   13373 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13374 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   13375 
   13376 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   13377 		sc->sc_ctrl &= ~CTRL_LRST;
   13378 
   13379 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13380 
   13381 #define	ADD(ss, mm, dd)							  \
   13382 do {									  \
   13383 	aprint_normal("%s%s", sep, ss);					  \
   13384 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   13385 	sep = ", ";							  \
   13386 } while (/*CONSTCOND*/0)
   13387 
   13388 	aprint_normal_dev(sc->sc_dev, "");
   13389 
   13390 	if (sc->sc_type == WM_T_I354) {
   13391 		uint32_t status;
   13392 
   13393 		status = CSR_READ(sc, WMREG_STATUS);
   13394 		if (((status & STATUS_2P5_SKU) != 0)
   13395 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13396 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   13397 		} else
   13398 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   13399 	} else if (sc->sc_type == WM_T_82545) {
   13400 		/* Only 82545 is LX (XXX except SFP) */
   13401 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13402 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13403 	} else if (sc->sc_sfptype != 0) {
   13404 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   13405 		switch (sc->sc_sfptype) {
   13406 		default:
   13407 		case SFF_SFP_ETH_FLAGS_1000SX:
   13408 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13409 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13410 			break;
   13411 		case SFF_SFP_ETH_FLAGS_1000LX:
   13412 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13413 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13414 			break;
   13415 		case SFF_SFP_ETH_FLAGS_1000CX:
   13416 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   13417 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   13418 			break;
   13419 		case SFF_SFP_ETH_FLAGS_1000T:
   13420 			ADD("1000baseT", IFM_1000_T, 0);
   13421 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   13422 			break;
   13423 		case SFF_SFP_ETH_FLAGS_100FX:
   13424 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   13425 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   13426 			break;
   13427 		}
   13428 	} else {
   13429 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13430 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13431 	}
   13432 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   13433 	aprint_normal("\n");
   13434 
   13435 #undef ADD
   13436 
   13437 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   13438 }
   13439 
   13440 /*
   13441  * wm_tbi_mediachange:	[ifmedia interface function]
   13442  *
   13443  *	Set hardware to newly-selected media on a 1000BASE-X device.
   13444  */
   13445 static int
   13446 wm_tbi_mediachange(struct ifnet *ifp)
   13447 {
   13448 	struct wm_softc *sc = ifp->if_softc;
   13449 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13450 	uint32_t status, ctrl;
   13451 	bool signal;
   13452 	int i;
   13453 
   13454 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   13455 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13456 		/* XXX need some work for >= 82571 and < 82575 */
   13457 		if (sc->sc_type < WM_T_82575)
   13458 			return 0;
   13459 	}
   13460 
   13461 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13462 	    || (sc->sc_type >= WM_T_82575))
   13463 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13464 
   13465 	sc->sc_ctrl &= ~CTRL_LRST;
   13466 	sc->sc_txcw = TXCW_ANE;
   13467 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13468 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   13469 	else if (ife->ifm_media & IFM_FDX)
   13470 		sc->sc_txcw |= TXCW_FD;
   13471 	else
   13472 		sc->sc_txcw |= TXCW_HD;
   13473 
   13474 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   13475 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   13476 
   13477 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   13478 		device_xname(sc->sc_dev), sc->sc_txcw));
   13479 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13480 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13481 	CSR_WRITE_FLUSH(sc);
   13482 	delay(1000);
   13483 
   13484 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13485 	signal = wm_tbi_havesignal(sc, ctrl);
   13486 
   13487 	DPRINTF(sc, WM_DEBUG_LINK,
   13488 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   13489 
   13490 	if (signal) {
   13491 		/* Have signal; wait for the link to come up. */
   13492 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   13493 			delay(10000);
   13494 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   13495 				break;
   13496 		}
   13497 
   13498 		DPRINTF(sc, WM_DEBUG_LINK,
   13499 		    ("%s: i = %d after waiting for link\n",
   13500 			device_xname(sc->sc_dev), i));
   13501 
   13502 		status = CSR_READ(sc, WMREG_STATUS);
   13503 		DPRINTF(sc, WM_DEBUG_LINK,
   13504 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13505 			__PRIxBIT "\n",
   13506 			device_xname(sc->sc_dev), status, STATUS_LU));
   13507 		if (status & STATUS_LU) {
   13508 			/* Link is up. */
   13509 			DPRINTF(sc, WM_DEBUG_LINK,
   13510 			    ("%s: LINK: set media -> link up %s\n",
   13511 				device_xname(sc->sc_dev),
   13512 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13513 
   13514 			/*
   13515 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13516 			 * so we should update sc->sc_ctrl
   13517 			 */
   13518 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13519 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13520 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13521 			if (status & STATUS_FD)
   13522 				sc->sc_tctl |=
   13523 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13524 			else
   13525 				sc->sc_tctl |=
   13526 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13527 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13528 				sc->sc_fcrtl |= FCRTL_XONE;
   13529 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13530 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13531 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13532 			sc->sc_tbi_linkup = 1;
   13533 		} else {
   13534 			if (i == WM_LINKUP_TIMEOUT)
   13535 				wm_check_for_link(sc);
   13536 			/* Link is down. */
   13537 			DPRINTF(sc, WM_DEBUG_LINK,
   13538 			    ("%s: LINK: set media -> link down\n",
   13539 				device_xname(sc->sc_dev)));
   13540 			sc->sc_tbi_linkup = 0;
   13541 		}
   13542 	} else {
   13543 		DPRINTF(sc, WM_DEBUG_LINK,
   13544 		    ("%s: LINK: set media -> no signal\n",
   13545 			device_xname(sc->sc_dev)));
   13546 		sc->sc_tbi_linkup = 0;
   13547 	}
   13548 
   13549 	wm_tbi_serdes_set_linkled(sc);
   13550 
   13551 	return 0;
   13552 }
   13553 
   13554 /*
   13555  * wm_tbi_mediastatus:	[ifmedia interface function]
   13556  *
   13557  *	Get the current interface media status on a 1000BASE-X device.
   13558  */
   13559 static void
   13560 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13561 {
   13562 	struct wm_softc *sc = ifp->if_softc;
   13563 	uint32_t ctrl, status;
   13564 
   13565 	ifmr->ifm_status = IFM_AVALID;
   13566 	ifmr->ifm_active = IFM_ETHER;
   13567 
   13568 	status = CSR_READ(sc, WMREG_STATUS);
   13569 	if ((status & STATUS_LU) == 0) {
   13570 		ifmr->ifm_active |= IFM_NONE;
   13571 		return;
   13572 	}
   13573 
   13574 	ifmr->ifm_status |= IFM_ACTIVE;
   13575 	/* Only 82545 is LX */
   13576 	if (sc->sc_type == WM_T_82545)
   13577 		ifmr->ifm_active |= IFM_1000_LX;
   13578 	else
   13579 		ifmr->ifm_active |= IFM_1000_SX;
   13580 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13581 		ifmr->ifm_active |= IFM_FDX;
   13582 	else
   13583 		ifmr->ifm_active |= IFM_HDX;
   13584 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13585 	if (ctrl & CTRL_RFCE)
   13586 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13587 	if (ctrl & CTRL_TFCE)
   13588 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13589 }
   13590 
   13591 /* XXX TBI only */
   13592 static int
   13593 wm_check_for_link(struct wm_softc *sc)
   13594 {
   13595 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13596 	uint32_t rxcw;
   13597 	uint32_t ctrl;
   13598 	uint32_t status;
   13599 	bool signal;
   13600 
   13601 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13602 		device_xname(sc->sc_dev), __func__));
   13603 
   13604 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13605 		/* XXX need some work for >= 82571 */
   13606 		if (sc->sc_type >= WM_T_82571) {
   13607 			sc->sc_tbi_linkup = 1;
   13608 			return 0;
   13609 		}
   13610 	}
   13611 
   13612 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13613 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13614 	status = CSR_READ(sc, WMREG_STATUS);
   13615 	signal = wm_tbi_havesignal(sc, ctrl);
   13616 
   13617 	DPRINTF(sc, WM_DEBUG_LINK,
   13618 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13619 		device_xname(sc->sc_dev), __func__, signal,
   13620 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13621 
   13622 	/*
   13623 	 * SWDPIN   LU RXCW
   13624 	 *	0    0	  0
   13625 	 *	0    0	  1	(should not happen)
   13626 	 *	0    1	  0	(should not happen)
   13627 	 *	0    1	  1	(should not happen)
   13628 	 *	1    0	  0	Disable autonego and force linkup
   13629 	 *	1    0	  1	got /C/ but not linkup yet
   13630 	 *	1    1	  0	(linkup)
   13631 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13632 	 *
   13633 	 */
   13634 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13635 		DPRINTF(sc, WM_DEBUG_LINK,
   13636 		    ("%s: %s: force linkup and fullduplex\n",
   13637 			device_xname(sc->sc_dev), __func__));
   13638 		sc->sc_tbi_linkup = 0;
   13639 		/* Disable auto-negotiation in the TXCW register */
   13640 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13641 
   13642 		/*
   13643 		 * Force link-up and also force full-duplex.
   13644 		 *
   13645 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13646 		 * so we should update sc->sc_ctrl
   13647 		 */
   13648 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13649 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13650 	} else if (((status & STATUS_LU) != 0)
   13651 	    && ((rxcw & RXCW_C) != 0)
   13652 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13653 		sc->sc_tbi_linkup = 1;
   13654 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13655 			device_xname(sc->sc_dev), __func__));
   13656 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13657 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13658 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13659 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13660 			device_xname(sc->sc_dev), __func__));
   13661 	} else {
   13662 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13663 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13664 			status));
   13665 	}
   13666 
   13667 	return 0;
   13668 }
   13669 
   13670 /*
   13671  * wm_tbi_tick:
   13672  *
   13673  *	Check the link on TBI devices.
   13674  *	This function acts as mii_tick().
   13675  */
   13676 static void
   13677 wm_tbi_tick(struct wm_softc *sc)
   13678 {
   13679 	struct mii_data *mii = &sc->sc_mii;
   13680 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13681 	uint32_t status;
   13682 
   13683 	KASSERT(mutex_owned(sc->sc_core_lock));
   13684 
   13685 	status = CSR_READ(sc, WMREG_STATUS);
   13686 
   13687 	/* XXX is this needed? */
   13688 	(void)CSR_READ(sc, WMREG_RXCW);
   13689 	(void)CSR_READ(sc, WMREG_CTRL);
   13690 
   13691 	/* set link status */
   13692 	if ((status & STATUS_LU) == 0) {
   13693 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13694 			device_xname(sc->sc_dev)));
   13695 		sc->sc_tbi_linkup = 0;
   13696 	} else if (sc->sc_tbi_linkup == 0) {
   13697 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13698 			device_xname(sc->sc_dev),
   13699 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13700 		sc->sc_tbi_linkup = 1;
   13701 		sc->sc_tbi_serdes_ticks = 0;
   13702 	}
   13703 
   13704 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13705 		goto setled;
   13706 
   13707 	if ((status & STATUS_LU) == 0) {
   13708 		sc->sc_tbi_linkup = 0;
   13709 		/* If the timer expired, retry autonegotiation */
   13710 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13711 		    && (++sc->sc_tbi_serdes_ticks
   13712 			>= sc->sc_tbi_serdes_anegticks)) {
   13713 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13714 				device_xname(sc->sc_dev), __func__));
   13715 			sc->sc_tbi_serdes_ticks = 0;
   13716 			/*
   13717 			 * Reset the link, and let autonegotiation do
   13718 			 * its thing
   13719 			 */
   13720 			sc->sc_ctrl |= CTRL_LRST;
   13721 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13722 			CSR_WRITE_FLUSH(sc);
   13723 			delay(1000);
   13724 			sc->sc_ctrl &= ~CTRL_LRST;
   13725 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13726 			CSR_WRITE_FLUSH(sc);
   13727 			delay(1000);
   13728 			CSR_WRITE(sc, WMREG_TXCW,
   13729 			    sc->sc_txcw & ~TXCW_ANE);
   13730 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13731 		}
   13732 	}
   13733 
   13734 setled:
   13735 	wm_tbi_serdes_set_linkled(sc);
   13736 }
   13737 
   13738 /* SERDES related */
   13739 static void
   13740 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13741 {
   13742 	uint32_t reg;
   13743 
   13744 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13745 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13746 		return;
   13747 
   13748 	/* Enable PCS to turn on link */
   13749 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13750 	reg |= PCS_CFG_PCS_EN;
   13751 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13752 
   13753 	/* Power up the laser */
   13754 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13755 	reg &= ~CTRL_EXT_SWDPIN(3);
   13756 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13757 
   13758 	/* Flush the write to verify completion */
   13759 	CSR_WRITE_FLUSH(sc);
   13760 	delay(1000);
   13761 }
   13762 
   13763 static int
   13764 wm_serdes_mediachange(struct ifnet *ifp)
   13765 {
   13766 	struct wm_softc *sc = ifp->if_softc;
   13767 	bool pcs_autoneg = true; /* XXX */
   13768 	uint32_t ctrl_ext, pcs_lctl, reg;
   13769 
   13770 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13771 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13772 		return 0;
   13773 
   13774 	/* XXX Currently, this function is not called on 8257[12] */
   13775 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13776 	    || (sc->sc_type >= WM_T_82575))
   13777 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13778 
   13779 	/* Power on the sfp cage if present */
   13780 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13781 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13782 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13783 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13784 
   13785 	sc->sc_ctrl |= CTRL_SLU;
   13786 
   13787 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13788 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13789 
   13790 		reg = CSR_READ(sc, WMREG_CONNSW);
   13791 		reg |= CONNSW_ENRGSRC;
   13792 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13793 	}
   13794 
   13795 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13796 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13797 	case CTRL_EXT_LINK_MODE_SGMII:
   13798 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13799 		pcs_autoneg = true;
   13800 		/* Autoneg time out should be disabled for SGMII mode */
   13801 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13802 		break;
   13803 	case CTRL_EXT_LINK_MODE_1000KX:
   13804 		pcs_autoneg = false;
   13805 		/* FALLTHROUGH */
   13806 	default:
   13807 		if ((sc->sc_type == WM_T_82575)
   13808 		    || (sc->sc_type == WM_T_82576)) {
   13809 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13810 				pcs_autoneg = false;
   13811 		}
   13812 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13813 		    | CTRL_FRCFDX;
   13814 
   13815 		/* Set speed of 1000/Full if speed/duplex is forced */
   13816 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13817 	}
   13818 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13819 
   13820 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13821 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13822 
   13823 	if (pcs_autoneg) {
   13824 		/* Set PCS register for autoneg */
   13825 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13826 
   13827 		/* Disable force flow control for autoneg */
   13828 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13829 
   13830 		/* Configure flow control advertisement for autoneg */
   13831 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13832 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13833 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13834 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13835 	} else
   13836 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13837 
   13838 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13839 
   13840 	return 0;
   13841 }
   13842 
   13843 static void
   13844 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13845 {
   13846 	struct wm_softc *sc = ifp->if_softc;
   13847 	struct mii_data *mii = &sc->sc_mii;
   13848 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13849 	uint32_t pcs_adv, pcs_lpab, reg;
   13850 
   13851 	ifmr->ifm_status = IFM_AVALID;
   13852 	ifmr->ifm_active = IFM_ETHER;
   13853 
   13854 	/* Check PCS */
   13855 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13856 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13857 		ifmr->ifm_active |= IFM_NONE;
   13858 		sc->sc_tbi_linkup = 0;
   13859 		goto setled;
   13860 	}
   13861 
   13862 	sc->sc_tbi_linkup = 1;
   13863 	ifmr->ifm_status |= IFM_ACTIVE;
   13864 	if (sc->sc_type == WM_T_I354) {
   13865 		uint32_t status;
   13866 
   13867 		status = CSR_READ(sc, WMREG_STATUS);
   13868 		if (((status & STATUS_2P5_SKU) != 0)
   13869 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13870 			ifmr->ifm_active |= IFM_2500_KX;
   13871 		} else
   13872 			ifmr->ifm_active |= IFM_1000_KX;
   13873 	} else {
   13874 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13875 		case PCS_LSTS_SPEED_10:
   13876 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13877 			break;
   13878 		case PCS_LSTS_SPEED_100:
   13879 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13880 			break;
   13881 		case PCS_LSTS_SPEED_1000:
   13882 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13883 			break;
   13884 		default:
   13885 			device_printf(sc->sc_dev, "Unknown speed\n");
   13886 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13887 			break;
   13888 		}
   13889 	}
   13890 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13891 	if ((reg & PCS_LSTS_FDX) != 0)
   13892 		ifmr->ifm_active |= IFM_FDX;
   13893 	else
   13894 		ifmr->ifm_active |= IFM_HDX;
   13895 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13896 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13897 		/* Check flow */
   13898 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13899 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13900 			DPRINTF(sc, WM_DEBUG_LINK,
   13901 			    ("XXX LINKOK but not ACOMP\n"));
   13902 			goto setled;
   13903 		}
   13904 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13905 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13906 		DPRINTF(sc, WM_DEBUG_LINK,
   13907 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13908 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13909 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13910 			mii->mii_media_active |= IFM_FLOW
   13911 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13912 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13913 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13914 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13915 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13916 			mii->mii_media_active |= IFM_FLOW
   13917 			    | IFM_ETH_TXPAUSE;
   13918 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13919 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13920 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13921 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13922 			mii->mii_media_active |= IFM_FLOW
   13923 			    | IFM_ETH_RXPAUSE;
   13924 		}
   13925 	}
   13926 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13927 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13928 setled:
   13929 	wm_tbi_serdes_set_linkled(sc);
   13930 }
   13931 
   13932 /*
   13933  * wm_serdes_tick:
   13934  *
   13935  *	Check the link on serdes devices.
   13936  */
   13937 static void
   13938 wm_serdes_tick(struct wm_softc *sc)
   13939 {
   13940 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13941 	struct mii_data *mii = &sc->sc_mii;
   13942 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13943 	uint32_t reg;
   13944 
   13945 	KASSERT(mutex_owned(sc->sc_core_lock));
   13946 
   13947 	mii->mii_media_status = IFM_AVALID;
   13948 	mii->mii_media_active = IFM_ETHER;
   13949 
   13950 	/* Check PCS */
   13951 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13952 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13953 		mii->mii_media_status |= IFM_ACTIVE;
   13954 		sc->sc_tbi_linkup = 1;
   13955 		sc->sc_tbi_serdes_ticks = 0;
   13956 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13957 		if ((reg & PCS_LSTS_FDX) != 0)
   13958 			mii->mii_media_active |= IFM_FDX;
   13959 		else
   13960 			mii->mii_media_active |= IFM_HDX;
   13961 	} else {
   13962 		mii->mii_media_status |= IFM_NONE;
   13963 		sc->sc_tbi_linkup = 0;
   13964 		/* If the timer expired, retry autonegotiation */
   13965 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13966 		    && (++sc->sc_tbi_serdes_ticks
   13967 			>= sc->sc_tbi_serdes_anegticks)) {
   13968 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13969 				device_xname(sc->sc_dev), __func__));
   13970 			sc->sc_tbi_serdes_ticks = 0;
   13971 			/* XXX */
   13972 			wm_serdes_mediachange(ifp);
   13973 		}
   13974 	}
   13975 
   13976 	wm_tbi_serdes_set_linkled(sc);
   13977 }
   13978 
   13979 /* SFP related */
   13980 
   13981 static int
   13982 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13983 {
   13984 	uint32_t i2ccmd;
   13985 	int i;
   13986 
   13987 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13988 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13989 
   13990 	/* Poll the ready bit */
   13991 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13992 		delay(50);
   13993 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13994 		if (i2ccmd & I2CCMD_READY)
   13995 			break;
   13996 	}
   13997 	if ((i2ccmd & I2CCMD_READY) == 0)
   13998 		return -1;
   13999 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   14000 		return -1;
   14001 
   14002 	*data = i2ccmd & 0x00ff;
   14003 
   14004 	return 0;
   14005 }
   14006 
   14007 static uint32_t
   14008 wm_sfp_get_media_type(struct wm_softc *sc)
   14009 {
   14010 	uint32_t ctrl_ext;
   14011 	uint8_t val = 0;
   14012 	int timeout = 3;
   14013 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   14014 	int rv = -1;
   14015 
   14016 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14017 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   14018 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   14019 	CSR_WRITE_FLUSH(sc);
   14020 
   14021 	/* Read SFP module data */
   14022 	while (timeout) {
   14023 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   14024 		if (rv == 0)
   14025 			break;
   14026 		delay(100*1000); /* XXX too big */
   14027 		timeout--;
   14028 	}
   14029 	if (rv != 0)
   14030 		goto out;
   14031 
   14032 	switch (val) {
   14033 	case SFF_SFP_ID_SFF:
   14034 		aprint_normal_dev(sc->sc_dev,
   14035 		    "Module/Connector soldered to board\n");
   14036 		break;
   14037 	case SFF_SFP_ID_SFP:
   14038 		sc->sc_flags |= WM_F_SFP;
   14039 		break;
   14040 	case SFF_SFP_ID_UNKNOWN:
   14041 		goto out;
   14042 	default:
   14043 		break;
   14044 	}
   14045 
   14046 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   14047 	if (rv != 0)
   14048 		goto out;
   14049 
   14050 	sc->sc_sfptype = val;
   14051 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   14052 		mediatype = WM_MEDIATYPE_SERDES;
   14053 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   14054 		sc->sc_flags |= WM_F_SGMII;
   14055 		mediatype = WM_MEDIATYPE_COPPER;
   14056 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   14057 		sc->sc_flags |= WM_F_SGMII;
   14058 		mediatype = WM_MEDIATYPE_SERDES;
   14059 	} else {
   14060 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   14061 		    __func__, sc->sc_sfptype);
   14062 		sc->sc_sfptype = 0; /* XXX unknown */
   14063 	}
   14064 
   14065 out:
   14066 	/* Restore I2C interface setting */
   14067 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14068 
   14069 	return mediatype;
   14070 }
   14071 
   14072 /*
   14073  * NVM related.
   14074  * Microwire, SPI (w/wo EERD) and Flash.
   14075  */
   14076 
   14077 /* Both spi and uwire */
   14078 
   14079 /*
   14080  * wm_eeprom_sendbits:
   14081  *
   14082  *	Send a series of bits to the EEPROM.
   14083  */
   14084 static void
   14085 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   14086 {
   14087 	uint32_t reg;
   14088 	int x;
   14089 
   14090 	reg = CSR_READ(sc, WMREG_EECD);
   14091 
   14092 	for (x = nbits; x > 0; x--) {
   14093 		if (bits & (1U << (x - 1)))
   14094 			reg |= EECD_DI;
   14095 		else
   14096 			reg &= ~EECD_DI;
   14097 		CSR_WRITE(sc, WMREG_EECD, reg);
   14098 		CSR_WRITE_FLUSH(sc);
   14099 		delay(2);
   14100 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14101 		CSR_WRITE_FLUSH(sc);
   14102 		delay(2);
   14103 		CSR_WRITE(sc, WMREG_EECD, reg);
   14104 		CSR_WRITE_FLUSH(sc);
   14105 		delay(2);
   14106 	}
   14107 }
   14108 
   14109 /*
   14110  * wm_eeprom_recvbits:
   14111  *
   14112  *	Receive a series of bits from the EEPROM.
   14113  */
   14114 static void
   14115 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   14116 {
   14117 	uint32_t reg, val;
   14118 	int x;
   14119 
   14120 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   14121 
   14122 	val = 0;
   14123 	for (x = nbits; x > 0; x--) {
   14124 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14125 		CSR_WRITE_FLUSH(sc);
   14126 		delay(2);
   14127 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   14128 			val |= (1U << (x - 1));
   14129 		CSR_WRITE(sc, WMREG_EECD, reg);
   14130 		CSR_WRITE_FLUSH(sc);
   14131 		delay(2);
   14132 	}
   14133 	*valp = val;
   14134 }
   14135 
   14136 /* Microwire */
   14137 
   14138 /*
   14139  * wm_nvm_read_uwire:
   14140  *
   14141  *	Read a word from the EEPROM using the MicroWire protocol.
   14142  */
   14143 static int
   14144 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14145 {
   14146 	uint32_t reg, val;
   14147 	int i, rv;
   14148 
   14149 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14150 		device_xname(sc->sc_dev), __func__));
   14151 
   14152 	rv = sc->nvm.acquire(sc);
   14153 	if (rv != 0)
   14154 		return rv;
   14155 
   14156 	for (i = 0; i < wordcnt; i++) {
   14157 		/* Clear SK and DI. */
   14158 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   14159 		CSR_WRITE(sc, WMREG_EECD, reg);
   14160 
   14161 		/*
   14162 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   14163 		 * and Xen.
   14164 		 *
   14165 		 * We use this workaround only for 82540 because qemu's
   14166 		 * e1000 act as 82540.
   14167 		 */
   14168 		if (sc->sc_type == WM_T_82540) {
   14169 			reg |= EECD_SK;
   14170 			CSR_WRITE(sc, WMREG_EECD, reg);
   14171 			reg &= ~EECD_SK;
   14172 			CSR_WRITE(sc, WMREG_EECD, reg);
   14173 			CSR_WRITE_FLUSH(sc);
   14174 			delay(2);
   14175 		}
   14176 		/* XXX: end of workaround */
   14177 
   14178 		/* Set CHIP SELECT. */
   14179 		reg |= EECD_CS;
   14180 		CSR_WRITE(sc, WMREG_EECD, reg);
   14181 		CSR_WRITE_FLUSH(sc);
   14182 		delay(2);
   14183 
   14184 		/* Shift in the READ command. */
   14185 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   14186 
   14187 		/* Shift in address. */
   14188 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   14189 
   14190 		/* Shift out the data. */
   14191 		wm_eeprom_recvbits(sc, &val, 16);
   14192 		data[i] = val & 0xffff;
   14193 
   14194 		/* Clear CHIP SELECT. */
   14195 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   14196 		CSR_WRITE(sc, WMREG_EECD, reg);
   14197 		CSR_WRITE_FLUSH(sc);
   14198 		delay(2);
   14199 	}
   14200 
   14201 	sc->nvm.release(sc);
   14202 	return 0;
   14203 }
   14204 
   14205 /* SPI */
   14206 
   14207 /*
   14208  * Set SPI and FLASH related information from the EECD register.
   14209  * For 82541 and 82547, the word size is taken from EEPROM.
   14210  */
   14211 static int
   14212 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   14213 {
   14214 	int size;
   14215 	uint32_t reg;
   14216 	uint16_t data;
   14217 
   14218 	reg = CSR_READ(sc, WMREG_EECD);
   14219 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   14220 
   14221 	/* Read the size of NVM from EECD by default */
   14222 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14223 	switch (sc->sc_type) {
   14224 	case WM_T_82541:
   14225 	case WM_T_82541_2:
   14226 	case WM_T_82547:
   14227 	case WM_T_82547_2:
   14228 		/* Set dummy value to access EEPROM */
   14229 		sc->sc_nvm_wordsize = 64;
   14230 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   14231 			aprint_error_dev(sc->sc_dev,
   14232 			    "%s: failed to read EEPROM size\n", __func__);
   14233 		}
   14234 		reg = data;
   14235 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14236 		if (size == 0)
   14237 			size = 6; /* 64 word size */
   14238 		else
   14239 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   14240 		break;
   14241 	case WM_T_80003:
   14242 	case WM_T_82571:
   14243 	case WM_T_82572:
   14244 	case WM_T_82573: /* SPI case */
   14245 	case WM_T_82574: /* SPI case */
   14246 	case WM_T_82583: /* SPI case */
   14247 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14248 		if (size > 14)
   14249 			size = 14;
   14250 		break;
   14251 	case WM_T_82575:
   14252 	case WM_T_82576:
   14253 	case WM_T_82580:
   14254 	case WM_T_I350:
   14255 	case WM_T_I354:
   14256 	case WM_T_I210:
   14257 	case WM_T_I211:
   14258 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14259 		if (size > 15)
   14260 			size = 15;
   14261 		break;
   14262 	default:
   14263 		aprint_error_dev(sc->sc_dev,
   14264 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   14265 		return -1;
   14266 		break;
   14267 	}
   14268 
   14269 	sc->sc_nvm_wordsize = 1 << size;
   14270 
   14271 	return 0;
   14272 }
   14273 
   14274 /*
   14275  * wm_nvm_ready_spi:
   14276  *
   14277  *	Wait for a SPI EEPROM to be ready for commands.
   14278  */
   14279 static int
   14280 wm_nvm_ready_spi(struct wm_softc *sc)
   14281 {
   14282 	uint32_t val;
   14283 	int usec;
   14284 
   14285 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14286 		device_xname(sc->sc_dev), __func__));
   14287 
   14288 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   14289 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   14290 		wm_eeprom_recvbits(sc, &val, 8);
   14291 		if ((val & SPI_SR_RDY) == 0)
   14292 			break;
   14293 	}
   14294 	if (usec >= SPI_MAX_RETRIES) {
   14295 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   14296 		return -1;
   14297 	}
   14298 	return 0;
   14299 }
   14300 
   14301 /*
   14302  * wm_nvm_read_spi:
   14303  *
   14304  *	Read a work from the EEPROM using the SPI protocol.
   14305  */
   14306 static int
   14307 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14308 {
   14309 	uint32_t reg, val;
   14310 	int i;
   14311 	uint8_t opc;
   14312 	int rv;
   14313 
   14314 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14315 		device_xname(sc->sc_dev), __func__));
   14316 
   14317 	rv = sc->nvm.acquire(sc);
   14318 	if (rv != 0)
   14319 		return rv;
   14320 
   14321 	/* Clear SK and CS. */
   14322 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   14323 	CSR_WRITE(sc, WMREG_EECD, reg);
   14324 	CSR_WRITE_FLUSH(sc);
   14325 	delay(2);
   14326 
   14327 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   14328 		goto out;
   14329 
   14330 	/* Toggle CS to flush commands. */
   14331 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   14332 	CSR_WRITE_FLUSH(sc);
   14333 	delay(2);
   14334 	CSR_WRITE(sc, WMREG_EECD, reg);
   14335 	CSR_WRITE_FLUSH(sc);
   14336 	delay(2);
   14337 
   14338 	opc = SPI_OPC_READ;
   14339 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   14340 		opc |= SPI_OPC_A8;
   14341 
   14342 	wm_eeprom_sendbits(sc, opc, 8);
   14343 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   14344 
   14345 	for (i = 0; i < wordcnt; i++) {
   14346 		wm_eeprom_recvbits(sc, &val, 16);
   14347 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   14348 	}
   14349 
   14350 	/* Raise CS and clear SK. */
   14351 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   14352 	CSR_WRITE(sc, WMREG_EECD, reg);
   14353 	CSR_WRITE_FLUSH(sc);
   14354 	delay(2);
   14355 
   14356 out:
   14357 	sc->nvm.release(sc);
   14358 	return rv;
   14359 }
   14360 
   14361 /* Using with EERD */
   14362 
   14363 static int
   14364 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   14365 {
   14366 	uint32_t attempts = 100000;
   14367 	uint32_t i, reg = 0;
   14368 	int32_t done = -1;
   14369 
   14370 	for (i = 0; i < attempts; i++) {
   14371 		reg = CSR_READ(sc, rw);
   14372 
   14373 		if (reg & EERD_DONE) {
   14374 			done = 0;
   14375 			break;
   14376 		}
   14377 		delay(5);
   14378 	}
   14379 
   14380 	return done;
   14381 }
   14382 
   14383 static int
   14384 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   14385 {
   14386 	int i, eerd = 0;
   14387 	int rv;
   14388 
   14389 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14390 		device_xname(sc->sc_dev), __func__));
   14391 
   14392 	rv = sc->nvm.acquire(sc);
   14393 	if (rv != 0)
   14394 		return rv;
   14395 
   14396 	for (i = 0; i < wordcnt; i++) {
   14397 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   14398 		CSR_WRITE(sc, WMREG_EERD, eerd);
   14399 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   14400 		if (rv != 0) {
   14401 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   14402 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   14403 			break;
   14404 		}
   14405 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   14406 	}
   14407 
   14408 	sc->nvm.release(sc);
   14409 	return rv;
   14410 }
   14411 
   14412 /* Flash */
   14413 
   14414 static int
   14415 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   14416 {
   14417 	uint32_t eecd;
   14418 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   14419 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   14420 	uint32_t nvm_dword = 0;
   14421 	uint8_t sig_byte = 0;
   14422 	int rv;
   14423 
   14424 	switch (sc->sc_type) {
   14425 	case WM_T_PCH_SPT:
   14426 	case WM_T_PCH_CNP:
   14427 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   14428 		act_offset = ICH_NVM_SIG_WORD * 2;
   14429 
   14430 		/* Set bank to 0 in case flash read fails. */
   14431 		*bank = 0;
   14432 
   14433 		/* Check bank 0 */
   14434 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   14435 		if (rv != 0)
   14436 			return rv;
   14437 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14438 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14439 			*bank = 0;
   14440 			return 0;
   14441 		}
   14442 
   14443 		/* Check bank 1 */
   14444 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   14445 		    &nvm_dword);
   14446 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14447 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14448 			*bank = 1;
   14449 			return 0;
   14450 		}
   14451 		aprint_error_dev(sc->sc_dev,
   14452 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   14453 		return -1;
   14454 	case WM_T_ICH8:
   14455 	case WM_T_ICH9:
   14456 		eecd = CSR_READ(sc, WMREG_EECD);
   14457 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   14458 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   14459 			return 0;
   14460 		}
   14461 		/* FALLTHROUGH */
   14462 	default:
   14463 		/* Default to 0 */
   14464 		*bank = 0;
   14465 
   14466 		/* Check bank 0 */
   14467 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   14468 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14469 			*bank = 0;
   14470 			return 0;
   14471 		}
   14472 
   14473 		/* Check bank 1 */
   14474 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   14475 		    &sig_byte);
   14476 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14477 			*bank = 1;
   14478 			return 0;
   14479 		}
   14480 	}
   14481 
   14482 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   14483 		device_xname(sc->sc_dev)));
   14484 	return -1;
   14485 }
   14486 
   14487 /******************************************************************************
   14488  * This function does initial flash setup so that a new read/write/erase cycle
   14489  * can be started.
   14490  *
   14491  * sc - The pointer to the hw structure
   14492  ****************************************************************************/
   14493 static int32_t
   14494 wm_ich8_cycle_init(struct wm_softc *sc)
   14495 {
   14496 	uint16_t hsfsts;
   14497 	int32_t error = 1;
   14498 	int32_t i     = 0;
   14499 
   14500 	if (sc->sc_type >= WM_T_PCH_SPT)
   14501 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14502 	else
   14503 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14504 
   14505 	/* May be check the Flash Des Valid bit in Hw status */
   14506 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14507 		return error;
   14508 
   14509 	/* Clear FCERR in Hw status by writing 1 */
   14510 	/* Clear DAEL in Hw status by writing a 1 */
   14511 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14512 
   14513 	if (sc->sc_type >= WM_T_PCH_SPT)
   14514 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14515 	else
   14516 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14517 
   14518 	/*
   14519 	 * Either we should have a hardware SPI cycle in progress bit to check
   14520 	 * against, in order to start a new cycle or FDONE bit should be
   14521 	 * changed in the hardware so that it is 1 after hardware reset, which
   14522 	 * can then be used as an indication whether a cycle is in progress or
   14523 	 * has been completed .. we should also have some software semaphore
   14524 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14525 	 * threads access to those bits can be sequentiallized or a way so that
   14526 	 * 2 threads don't start the cycle at the same time
   14527 	 */
   14528 
   14529 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14530 		/*
   14531 		 * There is no cycle running at present, so we can start a
   14532 		 * cycle
   14533 		 */
   14534 
   14535 		/* Begin by setting Flash Cycle Done. */
   14536 		hsfsts |= HSFSTS_DONE;
   14537 		if (sc->sc_type >= WM_T_PCH_SPT)
   14538 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14539 			    hsfsts & 0xffffUL);
   14540 		else
   14541 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14542 		error = 0;
   14543 	} else {
   14544 		/*
   14545 		 * Otherwise poll for sometime so the current cycle has a
   14546 		 * chance to end before giving up.
   14547 		 */
   14548 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14549 			if (sc->sc_type >= WM_T_PCH_SPT)
   14550 				hsfsts = ICH8_FLASH_READ32(sc,
   14551 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14552 			else
   14553 				hsfsts = ICH8_FLASH_READ16(sc,
   14554 				    ICH_FLASH_HSFSTS);
   14555 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14556 				error = 0;
   14557 				break;
   14558 			}
   14559 			delay(1);
   14560 		}
   14561 		if (error == 0) {
   14562 			/*
   14563 			 * Successful in waiting for previous cycle to timeout,
   14564 			 * now set the Flash Cycle Done.
   14565 			 */
   14566 			hsfsts |= HSFSTS_DONE;
   14567 			if (sc->sc_type >= WM_T_PCH_SPT)
   14568 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14569 				    hsfsts & 0xffffUL);
   14570 			else
   14571 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14572 				    hsfsts);
   14573 		}
   14574 	}
   14575 	return error;
   14576 }
   14577 
   14578 /******************************************************************************
   14579  * This function starts a flash cycle and waits for its completion
   14580  *
   14581  * sc - The pointer to the hw structure
   14582  ****************************************************************************/
   14583 static int32_t
   14584 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14585 {
   14586 	uint16_t hsflctl;
   14587 	uint16_t hsfsts;
   14588 	int32_t error = 1;
   14589 	uint32_t i = 0;
   14590 
   14591 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14592 	if (sc->sc_type >= WM_T_PCH_SPT)
   14593 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14594 	else
   14595 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14596 	hsflctl |= HSFCTL_GO;
   14597 	if (sc->sc_type >= WM_T_PCH_SPT)
   14598 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14599 		    (uint32_t)hsflctl << 16);
   14600 	else
   14601 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14602 
   14603 	/* Wait till FDONE bit is set to 1 */
   14604 	do {
   14605 		if (sc->sc_type >= WM_T_PCH_SPT)
   14606 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14607 			    & 0xffffUL;
   14608 		else
   14609 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14610 		if (hsfsts & HSFSTS_DONE)
   14611 			break;
   14612 		delay(1);
   14613 		i++;
   14614 	} while (i < timeout);
   14615 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14616 		error = 0;
   14617 
   14618 	return error;
   14619 }
   14620 
   14621 /******************************************************************************
   14622  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14623  *
   14624  * sc - The pointer to the hw structure
   14625  * index - The index of the byte or word to read.
   14626  * size - Size of data to read, 1=byte 2=word, 4=dword
   14627  * data - Pointer to the word to store the value read.
   14628  *****************************************************************************/
   14629 static int32_t
   14630 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14631     uint32_t size, uint32_t *data)
   14632 {
   14633 	uint16_t hsfsts;
   14634 	uint16_t hsflctl;
   14635 	uint32_t flash_linear_address;
   14636 	uint32_t flash_data = 0;
   14637 	int32_t error = 1;
   14638 	int32_t count = 0;
   14639 
   14640 	if (size < 1  || size > 4 || data == 0x0 ||
   14641 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14642 		return error;
   14643 
   14644 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14645 	    sc->sc_ich8_flash_base;
   14646 
   14647 	do {
   14648 		delay(1);
   14649 		/* Steps */
   14650 		error = wm_ich8_cycle_init(sc);
   14651 		if (error)
   14652 			break;
   14653 
   14654 		if (sc->sc_type >= WM_T_PCH_SPT)
   14655 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14656 			    >> 16;
   14657 		else
   14658 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14659 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14660 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14661 		    & HSFCTL_BCOUNT_MASK;
   14662 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14663 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14664 			/*
   14665 			 * In SPT, This register is in Lan memory space, not
   14666 			 * flash. Therefore, only 32 bit access is supported.
   14667 			 */
   14668 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14669 			    (uint32_t)hsflctl << 16);
   14670 		} else
   14671 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14672 
   14673 		/*
   14674 		 * Write the last 24 bits of index into Flash Linear address
   14675 		 * field in Flash Address
   14676 		 */
   14677 		/* TODO: TBD maybe check the index against the size of flash */
   14678 
   14679 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14680 
   14681 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14682 
   14683 		/*
   14684 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14685 		 * the whole sequence a few more times, else read in (shift in)
   14686 		 * the Flash Data0, the order is least significant byte first
   14687 		 * msb to lsb
   14688 		 */
   14689 		if (error == 0) {
   14690 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14691 			if (size == 1)
   14692 				*data = (uint8_t)(flash_data & 0x000000FF);
   14693 			else if (size == 2)
   14694 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14695 			else if (size == 4)
   14696 				*data = (uint32_t)flash_data;
   14697 			break;
   14698 		} else {
   14699 			/*
   14700 			 * If we've gotten here, then things are probably
   14701 			 * completely hosed, but if the error condition is
   14702 			 * detected, it won't hurt to give it another try...
   14703 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14704 			 */
   14705 			if (sc->sc_type >= WM_T_PCH_SPT)
   14706 				hsfsts = ICH8_FLASH_READ32(sc,
   14707 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14708 			else
   14709 				hsfsts = ICH8_FLASH_READ16(sc,
   14710 				    ICH_FLASH_HSFSTS);
   14711 
   14712 			if (hsfsts & HSFSTS_ERR) {
   14713 				/* Repeat for some time before giving up. */
   14714 				continue;
   14715 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14716 				break;
   14717 		}
   14718 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14719 
   14720 	return error;
   14721 }
   14722 
   14723 /******************************************************************************
   14724  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14725  *
   14726  * sc - pointer to wm_hw structure
   14727  * index - The index of the byte to read.
   14728  * data - Pointer to a byte to store the value read.
   14729  *****************************************************************************/
   14730 static int32_t
   14731 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14732 {
   14733 	int32_t status;
   14734 	uint32_t word = 0;
   14735 
   14736 	status = wm_read_ich8_data(sc, index, 1, &word);
   14737 	if (status == 0)
   14738 		*data = (uint8_t)word;
   14739 	else
   14740 		*data = 0;
   14741 
   14742 	return status;
   14743 }
   14744 
   14745 /******************************************************************************
   14746  * Reads a word from the NVM using the ICH8 flash access registers.
   14747  *
   14748  * sc - pointer to wm_hw structure
   14749  * index - The starting byte index of the word to read.
   14750  * data - Pointer to a word to store the value read.
   14751  *****************************************************************************/
   14752 static int32_t
   14753 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14754 {
   14755 	int32_t status;
   14756 	uint32_t word = 0;
   14757 
   14758 	status = wm_read_ich8_data(sc, index, 2, &word);
   14759 	if (status == 0)
   14760 		*data = (uint16_t)word;
   14761 	else
   14762 		*data = 0;
   14763 
   14764 	return status;
   14765 }
   14766 
   14767 /******************************************************************************
   14768  * Reads a dword from the NVM using the ICH8 flash access registers.
   14769  *
   14770  * sc - pointer to wm_hw structure
   14771  * index - The starting byte index of the word to read.
   14772  * data - Pointer to a word to store the value read.
   14773  *****************************************************************************/
   14774 static int32_t
   14775 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14776 {
   14777 	int32_t status;
   14778 
   14779 	status = wm_read_ich8_data(sc, index, 4, data);
   14780 	return status;
   14781 }
   14782 
   14783 /******************************************************************************
   14784  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14785  * register.
   14786  *
   14787  * sc - Struct containing variables accessed by shared code
   14788  * offset - offset of word in the EEPROM to read
   14789  * data - word read from the EEPROM
   14790  * words - number of words to read
   14791  *****************************************************************************/
   14792 static int
   14793 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14794 {
   14795 	int rv;
   14796 	uint32_t flash_bank = 0;
   14797 	uint32_t act_offset = 0;
   14798 	uint32_t bank_offset = 0;
   14799 	uint16_t word = 0;
   14800 	uint16_t i = 0;
   14801 
   14802 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14803 		device_xname(sc->sc_dev), __func__));
   14804 
   14805 	rv = sc->nvm.acquire(sc);
   14806 	if (rv != 0)
   14807 		return rv;
   14808 
   14809 	/*
   14810 	 * We need to know which is the valid flash bank.  In the event
   14811 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14812 	 * managing flash_bank. So it cannot be trusted and needs
   14813 	 * to be updated with each read.
   14814 	 */
   14815 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14816 	if (rv) {
   14817 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14818 			device_xname(sc->sc_dev)));
   14819 		flash_bank = 0;
   14820 	}
   14821 
   14822 	/*
   14823 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14824 	 * size
   14825 	 */
   14826 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14827 
   14828 	for (i = 0; i < words; i++) {
   14829 		/* The NVM part needs a byte offset, hence * 2 */
   14830 		act_offset = bank_offset + ((offset + i) * 2);
   14831 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14832 		if (rv) {
   14833 			aprint_error_dev(sc->sc_dev,
   14834 			    "%s: failed to read NVM\n", __func__);
   14835 			break;
   14836 		}
   14837 		data[i] = word;
   14838 	}
   14839 
   14840 	sc->nvm.release(sc);
   14841 	return rv;
   14842 }
   14843 
   14844 /******************************************************************************
   14845  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14846  * register.
   14847  *
   14848  * sc - Struct containing variables accessed by shared code
   14849  * offset - offset of word in the EEPROM to read
   14850  * data - word read from the EEPROM
   14851  * words - number of words to read
   14852  *****************************************************************************/
   14853 static int
   14854 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14855 {
   14856 	int	 rv;
   14857 	uint32_t flash_bank = 0;
   14858 	uint32_t act_offset = 0;
   14859 	uint32_t bank_offset = 0;
   14860 	uint32_t dword = 0;
   14861 	uint16_t i = 0;
   14862 
   14863 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14864 		device_xname(sc->sc_dev), __func__));
   14865 
   14866 	rv = sc->nvm.acquire(sc);
   14867 	if (rv != 0)
   14868 		return rv;
   14869 
   14870 	/*
   14871 	 * We need to know which is the valid flash bank.  In the event
   14872 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14873 	 * managing flash_bank. So it cannot be trusted and needs
   14874 	 * to be updated with each read.
   14875 	 */
   14876 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14877 	if (rv) {
   14878 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14879 			device_xname(sc->sc_dev)));
   14880 		flash_bank = 0;
   14881 	}
   14882 
   14883 	/*
   14884 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14885 	 * size
   14886 	 */
   14887 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14888 
   14889 	for (i = 0; i < words; i++) {
   14890 		/* The NVM part needs a byte offset, hence * 2 */
   14891 		act_offset = bank_offset + ((offset + i) * 2);
   14892 		/* but we must read dword aligned, so mask ... */
   14893 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14894 		if (rv) {
   14895 			aprint_error_dev(sc->sc_dev,
   14896 			    "%s: failed to read NVM\n", __func__);
   14897 			break;
   14898 		}
   14899 		/* ... and pick out low or high word */
   14900 		if ((act_offset & 0x2) == 0)
   14901 			data[i] = (uint16_t)(dword & 0xFFFF);
   14902 		else
   14903 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14904 	}
   14905 
   14906 	sc->nvm.release(sc);
   14907 	return rv;
   14908 }
   14909 
   14910 /* iNVM */
   14911 
   14912 static int
   14913 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14914 {
   14915 	int32_t	 rv = 0;
   14916 	uint32_t invm_dword;
   14917 	uint16_t i;
   14918 	uint8_t record_type, word_address;
   14919 
   14920 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14921 		device_xname(sc->sc_dev), __func__));
   14922 
   14923 	for (i = 0; i < INVM_SIZE; i++) {
   14924 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14925 		/* Get record type */
   14926 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14927 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14928 			break;
   14929 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14930 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14931 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14932 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14933 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14934 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14935 			if (word_address == address) {
   14936 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14937 				rv = 0;
   14938 				break;
   14939 			}
   14940 		}
   14941 	}
   14942 
   14943 	return rv;
   14944 }
   14945 
   14946 static int
   14947 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14948 {
   14949 	int i, rv;
   14950 
   14951 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14952 		device_xname(sc->sc_dev), __func__));
   14953 
   14954 	rv = sc->nvm.acquire(sc);
   14955 	if (rv != 0)
   14956 		return rv;
   14957 
   14958 	for (i = 0; i < words; i++) {
   14959 		switch (offset + i) {
   14960 		case NVM_OFF_MACADDR:
   14961 		case NVM_OFF_MACADDR1:
   14962 		case NVM_OFF_MACADDR2:
   14963 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14964 			if (rv != 0) {
   14965 				data[i] = 0xffff;
   14966 				rv = -1;
   14967 			}
   14968 			break;
   14969 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14970 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14971 			if (rv != 0) {
   14972 				*data = INVM_DEFAULT_AL;
   14973 				rv = 0;
   14974 			}
   14975 			break;
   14976 		case NVM_OFF_CFG2:
   14977 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14978 			if (rv != 0) {
   14979 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14980 				rv = 0;
   14981 			}
   14982 			break;
   14983 		case NVM_OFF_CFG4:
   14984 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14985 			if (rv != 0) {
   14986 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14987 				rv = 0;
   14988 			}
   14989 			break;
   14990 		case NVM_OFF_LED_1_CFG:
   14991 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14992 			if (rv != 0) {
   14993 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14994 				rv = 0;
   14995 			}
   14996 			break;
   14997 		case NVM_OFF_LED_0_2_CFG:
   14998 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14999 			if (rv != 0) {
   15000 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   15001 				rv = 0;
   15002 			}
   15003 			break;
   15004 		case NVM_OFF_ID_LED_SETTINGS:
   15005 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15006 			if (rv != 0) {
   15007 				*data = ID_LED_RESERVED_FFFF;
   15008 				rv = 0;
   15009 			}
   15010 			break;
   15011 		default:
   15012 			DPRINTF(sc, WM_DEBUG_NVM,
   15013 			    ("NVM word 0x%02x is not mapped.\n", offset));
   15014 			*data = NVM_RESERVED_WORD;
   15015 			break;
   15016 		}
   15017 	}
   15018 
   15019 	sc->nvm.release(sc);
   15020 	return rv;
   15021 }
   15022 
   15023 /* Lock, detecting NVM type, validate checksum, version and read */
   15024 
   15025 static int
   15026 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   15027 {
   15028 	uint32_t eecd = 0;
   15029 
   15030 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   15031 	    || sc->sc_type == WM_T_82583) {
   15032 		eecd = CSR_READ(sc, WMREG_EECD);
   15033 
   15034 		/* Isolate bits 15 & 16 */
   15035 		eecd = ((eecd >> 15) & 0x03);
   15036 
   15037 		/* If both bits are set, device is Flash type */
   15038 		if (eecd == 0x03)
   15039 			return 0;
   15040 	}
   15041 	return 1;
   15042 }
   15043 
   15044 static int
   15045 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   15046 {
   15047 	uint32_t eec;
   15048 
   15049 	eec = CSR_READ(sc, WMREG_EEC);
   15050 	if ((eec & EEC_FLASH_DETECTED) != 0)
   15051 		return 1;
   15052 
   15053 	return 0;
   15054 }
   15055 
   15056 /*
   15057  * wm_nvm_validate_checksum
   15058  *
   15059  * The checksum is defined as the sum of the first 64 (16 bit) words.
   15060  */
   15061 static int
   15062 wm_nvm_validate_checksum(struct wm_softc *sc)
   15063 {
   15064 	uint16_t checksum;
   15065 	uint16_t eeprom_data;
   15066 #ifdef WM_DEBUG
   15067 	uint16_t csum_wordaddr, valid_checksum;
   15068 #endif
   15069 	int i;
   15070 
   15071 	checksum = 0;
   15072 
   15073 	/* Don't check for I211 */
   15074 	if (sc->sc_type == WM_T_I211)
   15075 		return 0;
   15076 
   15077 #ifdef WM_DEBUG
   15078 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   15079 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   15080 		csum_wordaddr = NVM_OFF_COMPAT;
   15081 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   15082 	} else {
   15083 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   15084 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   15085 	}
   15086 
   15087 	/* Dump EEPROM image for debug */
   15088 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15089 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15090 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   15091 		/* XXX PCH_SPT? */
   15092 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   15093 		if ((eeprom_data & valid_checksum) == 0)
   15094 			DPRINTF(sc, WM_DEBUG_NVM,
   15095 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   15096 				device_xname(sc->sc_dev), eeprom_data,
   15097 				valid_checksum));
   15098 	}
   15099 
   15100 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   15101 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   15102 		for (i = 0; i < NVM_SIZE; i++) {
   15103 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   15104 				printf("XXXX ");
   15105 			else
   15106 				printf("%04hx ", eeprom_data);
   15107 			if (i % 8 == 7)
   15108 				printf("\n");
   15109 		}
   15110 	}
   15111 
   15112 #endif /* WM_DEBUG */
   15113 
   15114 	for (i = 0; i < NVM_SIZE; i++) {
   15115 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   15116 			return -1;
   15117 		checksum += eeprom_data;
   15118 	}
   15119 
   15120 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   15121 #ifdef WM_DEBUG
   15122 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   15123 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   15124 #endif
   15125 	}
   15126 
   15127 	return 0;
   15128 }
   15129 
   15130 static void
   15131 wm_nvm_version_invm(struct wm_softc *sc)
   15132 {
   15133 	uint32_t dword;
   15134 
   15135 	/*
   15136 	 * Linux's code to decode version is very strange, so we don't
   15137 	 * obey that algorithm and just use word 61 as the document.
   15138 	 * Perhaps it's not perfect though...
   15139 	 *
   15140 	 * Example:
   15141 	 *
   15142 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   15143 	 */
   15144 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   15145 	dword = __SHIFTOUT(dword, INVM_VER_1);
   15146 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   15147 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   15148 }
   15149 
   15150 static void
   15151 wm_nvm_version(struct wm_softc *sc)
   15152 {
   15153 	uint16_t major, minor, build, patch;
   15154 	uint16_t uid0, uid1;
   15155 	uint16_t nvm_data;
   15156 	uint16_t off;
   15157 	bool check_version = false;
   15158 	bool check_optionrom = false;
   15159 	bool have_build = false;
   15160 	bool have_uid = true;
   15161 
   15162 	/*
   15163 	 * Version format:
   15164 	 *
   15165 	 * XYYZ
   15166 	 * X0YZ
   15167 	 * X0YY
   15168 	 *
   15169 	 * Example:
   15170 	 *
   15171 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   15172 	 *	82571	0x50a6	5.10.6?
   15173 	 *	82572	0x506a	5.6.10?
   15174 	 *	82572EI	0x5069	5.6.9?
   15175 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   15176 	 *		0x2013	2.1.3?
   15177 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   15178 	 * ICH8+82567	0x0040	0.4.0?
   15179 	 * ICH9+82566	0x1040	1.4.0?
   15180 	 *ICH10+82567	0x0043	0.4.3?
   15181 	 *  PCH+82577	0x00c1	0.12.1?
   15182 	 * PCH2+82579	0x00d3	0.13.3?
   15183 	 *		0x00d4	0.13.4?
   15184 	 *  LPT+I218	0x0023	0.2.3?
   15185 	 *  SPT+I219	0x0084	0.8.4?
   15186 	 *  CNP+I219	0x0054	0.5.4?
   15187 	 */
   15188 
   15189 	/*
   15190 	 * XXX
   15191 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   15192 	 * I've never seen real 82574 hardware with such small SPI ROM.
   15193 	 */
   15194 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   15195 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   15196 		have_uid = false;
   15197 
   15198 	switch (sc->sc_type) {
   15199 	case WM_T_82571:
   15200 	case WM_T_82572:
   15201 	case WM_T_82574:
   15202 	case WM_T_82583:
   15203 		check_version = true;
   15204 		check_optionrom = true;
   15205 		have_build = true;
   15206 		break;
   15207 	case WM_T_ICH8:
   15208 	case WM_T_ICH9:
   15209 	case WM_T_ICH10:
   15210 	case WM_T_PCH:
   15211 	case WM_T_PCH2:
   15212 	case WM_T_PCH_LPT:
   15213 	case WM_T_PCH_SPT:
   15214 	case WM_T_PCH_CNP:
   15215 		check_version = true;
   15216 		have_build = true;
   15217 		have_uid = false;
   15218 		break;
   15219 	case WM_T_82575:
   15220 	case WM_T_82576:
   15221 	case WM_T_82580:
   15222 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   15223 			check_version = true;
   15224 		break;
   15225 	case WM_T_I211:
   15226 		wm_nvm_version_invm(sc);
   15227 		have_uid = false;
   15228 		goto printver;
   15229 	case WM_T_I210:
   15230 		if (!wm_nvm_flash_presence_i210(sc)) {
   15231 			wm_nvm_version_invm(sc);
   15232 			have_uid = false;
   15233 			goto printver;
   15234 		}
   15235 		/* FALLTHROUGH */
   15236 	case WM_T_I350:
   15237 	case WM_T_I354:
   15238 		check_version = true;
   15239 		check_optionrom = true;
   15240 		break;
   15241 	default:
   15242 		return;
   15243 	}
   15244 	if (check_version
   15245 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   15246 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   15247 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   15248 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   15249 			build = nvm_data & NVM_BUILD_MASK;
   15250 			have_build = true;
   15251 		} else
   15252 			minor = nvm_data & 0x00ff;
   15253 
   15254 		/* Decimal */
   15255 		minor = (minor / 16) * 10 + (minor % 16);
   15256 		sc->sc_nvm_ver_major = major;
   15257 		sc->sc_nvm_ver_minor = minor;
   15258 
   15259 printver:
   15260 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   15261 		    sc->sc_nvm_ver_minor);
   15262 		if (have_build) {
   15263 			sc->sc_nvm_ver_build = build;
   15264 			aprint_verbose(".%d", build);
   15265 		}
   15266 	}
   15267 
   15268 	/* Assume the Option ROM area is at avove NVM_SIZE */
   15269 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   15270 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   15271 		/* Option ROM Version */
   15272 		if ((off != 0x0000) && (off != 0xffff)) {
   15273 			int rv;
   15274 
   15275 			off += NVM_COMBO_VER_OFF;
   15276 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   15277 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   15278 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   15279 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   15280 				/* 16bits */
   15281 				major = uid0 >> 8;
   15282 				build = (uid0 << 8) | (uid1 >> 8);
   15283 				patch = uid1 & 0x00ff;
   15284 				aprint_verbose(", option ROM Version %d.%d.%d",
   15285 				    major, build, patch);
   15286 			}
   15287 		}
   15288 	}
   15289 
   15290 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   15291 		aprint_verbose(", Image Unique ID %08x",
   15292 		    ((uint32_t)uid1 << 16) | uid0);
   15293 }
   15294 
   15295 /*
   15296  * wm_nvm_read:
   15297  *
   15298  *	Read data from the serial EEPROM.
   15299  */
   15300 static int
   15301 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   15302 {
   15303 	int rv;
   15304 
   15305 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   15306 		device_xname(sc->sc_dev), __func__));
   15307 
   15308 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   15309 		return -1;
   15310 
   15311 	rv = sc->nvm.read(sc, word, wordcnt, data);
   15312 
   15313 	return rv;
   15314 }
   15315 
   15316 /*
   15317  * Hardware semaphores.
   15318  * Very complexed...
   15319  */
   15320 
   15321 static int
   15322 wm_get_null(struct wm_softc *sc)
   15323 {
   15324 
   15325 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15326 		device_xname(sc->sc_dev), __func__));
   15327 	return 0;
   15328 }
   15329 
   15330 static void
   15331 wm_put_null(struct wm_softc *sc)
   15332 {
   15333 
   15334 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15335 		device_xname(sc->sc_dev), __func__));
   15336 	return;
   15337 }
   15338 
   15339 static int
   15340 wm_get_eecd(struct wm_softc *sc)
   15341 {
   15342 	uint32_t reg;
   15343 	int x;
   15344 
   15345 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15346 		device_xname(sc->sc_dev), __func__));
   15347 
   15348 	reg = CSR_READ(sc, WMREG_EECD);
   15349 
   15350 	/* Request EEPROM access. */
   15351 	reg |= EECD_EE_REQ;
   15352 	CSR_WRITE(sc, WMREG_EECD, reg);
   15353 
   15354 	/* ..and wait for it to be granted. */
   15355 	for (x = 0; x < 1000; x++) {
   15356 		reg = CSR_READ(sc, WMREG_EECD);
   15357 		if (reg & EECD_EE_GNT)
   15358 			break;
   15359 		delay(5);
   15360 	}
   15361 	if ((reg & EECD_EE_GNT) == 0) {
   15362 		aprint_error_dev(sc->sc_dev,
   15363 		    "could not acquire EEPROM GNT\n");
   15364 		reg &= ~EECD_EE_REQ;
   15365 		CSR_WRITE(sc, WMREG_EECD, reg);
   15366 		return -1;
   15367 	}
   15368 
   15369 	return 0;
   15370 }
   15371 
   15372 static void
   15373 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   15374 {
   15375 
   15376 	*eecd |= EECD_SK;
   15377 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15378 	CSR_WRITE_FLUSH(sc);
   15379 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15380 		delay(1);
   15381 	else
   15382 		delay(50);
   15383 }
   15384 
   15385 static void
   15386 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   15387 {
   15388 
   15389 	*eecd &= ~EECD_SK;
   15390 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15391 	CSR_WRITE_FLUSH(sc);
   15392 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15393 		delay(1);
   15394 	else
   15395 		delay(50);
   15396 }
   15397 
   15398 static void
   15399 wm_put_eecd(struct wm_softc *sc)
   15400 {
   15401 	uint32_t reg;
   15402 
   15403 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15404 		device_xname(sc->sc_dev), __func__));
   15405 
   15406 	/* Stop nvm */
   15407 	reg = CSR_READ(sc, WMREG_EECD);
   15408 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   15409 		/* Pull CS high */
   15410 		reg |= EECD_CS;
   15411 		wm_nvm_eec_clock_lower(sc, &reg);
   15412 	} else {
   15413 		/* CS on Microwire is active-high */
   15414 		reg &= ~(EECD_CS | EECD_DI);
   15415 		CSR_WRITE(sc, WMREG_EECD, reg);
   15416 		wm_nvm_eec_clock_raise(sc, &reg);
   15417 		wm_nvm_eec_clock_lower(sc, &reg);
   15418 	}
   15419 
   15420 	reg = CSR_READ(sc, WMREG_EECD);
   15421 	reg &= ~EECD_EE_REQ;
   15422 	CSR_WRITE(sc, WMREG_EECD, reg);
   15423 
   15424 	return;
   15425 }
   15426 
   15427 /*
   15428  * Get hardware semaphore.
   15429  * Same as e1000_get_hw_semaphore_generic()
   15430  */
   15431 static int
   15432 wm_get_swsm_semaphore(struct wm_softc *sc)
   15433 {
   15434 	int32_t timeout;
   15435 	uint32_t swsm;
   15436 
   15437 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15438 		device_xname(sc->sc_dev), __func__));
   15439 	KASSERT(sc->sc_nvm_wordsize > 0);
   15440 
   15441 retry:
   15442 	/* Get the SW semaphore. */
   15443 	timeout = sc->sc_nvm_wordsize + 1;
   15444 	while (timeout) {
   15445 		swsm = CSR_READ(sc, WMREG_SWSM);
   15446 
   15447 		if ((swsm & SWSM_SMBI) == 0)
   15448 			break;
   15449 
   15450 		delay(50);
   15451 		timeout--;
   15452 	}
   15453 
   15454 	if (timeout == 0) {
   15455 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   15456 			/*
   15457 			 * In rare circumstances, the SW semaphore may already
   15458 			 * be held unintentionally. Clear the semaphore once
   15459 			 * before giving up.
   15460 			 */
   15461 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   15462 			wm_put_swsm_semaphore(sc);
   15463 			goto retry;
   15464 		}
   15465 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   15466 		return -1;
   15467 	}
   15468 
   15469 	/* Get the FW semaphore. */
   15470 	timeout = sc->sc_nvm_wordsize + 1;
   15471 	while (timeout) {
   15472 		swsm = CSR_READ(sc, WMREG_SWSM);
   15473 		swsm |= SWSM_SWESMBI;
   15474 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   15475 		/* If we managed to set the bit we got the semaphore. */
   15476 		swsm = CSR_READ(sc, WMREG_SWSM);
   15477 		if (swsm & SWSM_SWESMBI)
   15478 			break;
   15479 
   15480 		delay(50);
   15481 		timeout--;
   15482 	}
   15483 
   15484 	if (timeout == 0) {
   15485 		aprint_error_dev(sc->sc_dev,
   15486 		    "could not acquire SWSM SWESMBI\n");
   15487 		/* Release semaphores */
   15488 		wm_put_swsm_semaphore(sc);
   15489 		return -1;
   15490 	}
   15491 	return 0;
   15492 }
   15493 
   15494 /*
   15495  * Put hardware semaphore.
   15496  * Same as e1000_put_hw_semaphore_generic()
   15497  */
   15498 static void
   15499 wm_put_swsm_semaphore(struct wm_softc *sc)
   15500 {
   15501 	uint32_t swsm;
   15502 
   15503 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15504 		device_xname(sc->sc_dev), __func__));
   15505 
   15506 	swsm = CSR_READ(sc, WMREG_SWSM);
   15507 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15508 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15509 }
   15510 
   15511 /*
   15512  * Get SW/FW semaphore.
   15513  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15514  */
   15515 static int
   15516 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15517 {
   15518 	uint32_t swfw_sync;
   15519 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15520 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15521 	int timeout;
   15522 
   15523 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15524 		device_xname(sc->sc_dev), __func__));
   15525 
   15526 	if (sc->sc_type == WM_T_80003)
   15527 		timeout = 50;
   15528 	else
   15529 		timeout = 200;
   15530 
   15531 	while (timeout) {
   15532 		if (wm_get_swsm_semaphore(sc)) {
   15533 			aprint_error_dev(sc->sc_dev,
   15534 			    "%s: failed to get semaphore\n",
   15535 			    __func__);
   15536 			return -1;
   15537 		}
   15538 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15539 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15540 			swfw_sync |= swmask;
   15541 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15542 			wm_put_swsm_semaphore(sc);
   15543 			return 0;
   15544 		}
   15545 		wm_put_swsm_semaphore(sc);
   15546 		delay(5000);
   15547 		timeout--;
   15548 	}
   15549 	device_printf(sc->sc_dev,
   15550 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15551 	    mask, swfw_sync);
   15552 	return -1;
   15553 }
   15554 
   15555 static void
   15556 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15557 {
   15558 	uint32_t swfw_sync;
   15559 
   15560 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15561 		device_xname(sc->sc_dev), __func__));
   15562 
   15563 	while (wm_get_swsm_semaphore(sc) != 0)
   15564 		continue;
   15565 
   15566 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15567 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15568 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15569 
   15570 	wm_put_swsm_semaphore(sc);
   15571 }
   15572 
   15573 static int
   15574 wm_get_nvm_80003(struct wm_softc *sc)
   15575 {
   15576 	int rv;
   15577 
   15578 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15579 		device_xname(sc->sc_dev), __func__));
   15580 
   15581 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15582 		aprint_error_dev(sc->sc_dev,
   15583 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15584 		return rv;
   15585 	}
   15586 
   15587 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15588 	    && (rv = wm_get_eecd(sc)) != 0) {
   15589 		aprint_error_dev(sc->sc_dev,
   15590 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15591 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15592 		return rv;
   15593 	}
   15594 
   15595 	return 0;
   15596 }
   15597 
   15598 static void
   15599 wm_put_nvm_80003(struct wm_softc *sc)
   15600 {
   15601 
   15602 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15603 		device_xname(sc->sc_dev), __func__));
   15604 
   15605 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15606 		wm_put_eecd(sc);
   15607 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15608 }
   15609 
   15610 static int
   15611 wm_get_nvm_82571(struct wm_softc *sc)
   15612 {
   15613 	int rv;
   15614 
   15615 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15616 		device_xname(sc->sc_dev), __func__));
   15617 
   15618 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15619 		return rv;
   15620 
   15621 	switch (sc->sc_type) {
   15622 	case WM_T_82573:
   15623 		break;
   15624 	default:
   15625 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15626 			rv = wm_get_eecd(sc);
   15627 		break;
   15628 	}
   15629 
   15630 	if (rv != 0) {
   15631 		aprint_error_dev(sc->sc_dev,
   15632 		    "%s: failed to get semaphore\n",
   15633 		    __func__);
   15634 		wm_put_swsm_semaphore(sc);
   15635 	}
   15636 
   15637 	return rv;
   15638 }
   15639 
   15640 static void
   15641 wm_put_nvm_82571(struct wm_softc *sc)
   15642 {
   15643 
   15644 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15645 		device_xname(sc->sc_dev), __func__));
   15646 
   15647 	switch (sc->sc_type) {
   15648 	case WM_T_82573:
   15649 		break;
   15650 	default:
   15651 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15652 			wm_put_eecd(sc);
   15653 		break;
   15654 	}
   15655 
   15656 	wm_put_swsm_semaphore(sc);
   15657 }
   15658 
   15659 static int
   15660 wm_get_phy_82575(struct wm_softc *sc)
   15661 {
   15662 
   15663 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15664 		device_xname(sc->sc_dev), __func__));
   15665 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15666 }
   15667 
   15668 static void
   15669 wm_put_phy_82575(struct wm_softc *sc)
   15670 {
   15671 
   15672 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15673 		device_xname(sc->sc_dev), __func__));
   15674 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15675 }
   15676 
   15677 static int
   15678 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15679 {
   15680 	uint32_t ext_ctrl;
   15681 	int timeout = 200;
   15682 
   15683 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15684 		device_xname(sc->sc_dev), __func__));
   15685 
   15686 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15687 	for (timeout = 0; timeout < 200; timeout++) {
   15688 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15689 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15690 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15691 
   15692 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15693 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15694 			return 0;
   15695 		delay(5000);
   15696 	}
   15697 	device_printf(sc->sc_dev,
   15698 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15699 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15700 	return -1;
   15701 }
   15702 
   15703 static void
   15704 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15705 {
   15706 	uint32_t ext_ctrl;
   15707 
   15708 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15709 		device_xname(sc->sc_dev), __func__));
   15710 
   15711 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15712 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15713 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15714 
   15715 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15716 }
   15717 
   15718 static int
   15719 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15720 {
   15721 	uint32_t ext_ctrl;
   15722 	int timeout;
   15723 
   15724 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15725 		device_xname(sc->sc_dev), __func__));
   15726 	mutex_enter(sc->sc_ich_phymtx);
   15727 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15728 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15729 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15730 			break;
   15731 		delay(1000);
   15732 	}
   15733 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15734 		device_printf(sc->sc_dev,
   15735 		    "SW has already locked the resource\n");
   15736 		goto out;
   15737 	}
   15738 
   15739 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15740 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15741 	for (timeout = 0; timeout < 1000; timeout++) {
   15742 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15743 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15744 			break;
   15745 		delay(1000);
   15746 	}
   15747 	if (timeout >= 1000) {
   15748 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15749 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15750 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15751 		goto out;
   15752 	}
   15753 	return 0;
   15754 
   15755 out:
   15756 	mutex_exit(sc->sc_ich_phymtx);
   15757 	return -1;
   15758 }
   15759 
   15760 static void
   15761 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15762 {
   15763 	uint32_t ext_ctrl;
   15764 
   15765 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15766 		device_xname(sc->sc_dev), __func__));
   15767 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15768 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15769 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15770 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15771 	} else
   15772 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15773 
   15774 	mutex_exit(sc->sc_ich_phymtx);
   15775 }
   15776 
   15777 static int
   15778 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15779 {
   15780 
   15781 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15782 		device_xname(sc->sc_dev), __func__));
   15783 	mutex_enter(sc->sc_ich_nvmmtx);
   15784 
   15785 	return 0;
   15786 }
   15787 
   15788 static void
   15789 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15790 {
   15791 
   15792 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15793 		device_xname(sc->sc_dev), __func__));
   15794 	mutex_exit(sc->sc_ich_nvmmtx);
   15795 }
   15796 
   15797 static int
   15798 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15799 {
   15800 	int i = 0;
   15801 	uint32_t reg;
   15802 
   15803 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15804 		device_xname(sc->sc_dev), __func__));
   15805 
   15806 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15807 	do {
   15808 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15809 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15810 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15811 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15812 			break;
   15813 		delay(2*1000);
   15814 		i++;
   15815 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15816 
   15817 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15818 		wm_put_hw_semaphore_82573(sc);
   15819 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15820 		    device_xname(sc->sc_dev));
   15821 		return -1;
   15822 	}
   15823 
   15824 	return 0;
   15825 }
   15826 
   15827 static void
   15828 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15829 {
   15830 	uint32_t reg;
   15831 
   15832 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15833 		device_xname(sc->sc_dev), __func__));
   15834 
   15835 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15836 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15837 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15838 }
   15839 
   15840 /*
   15841  * Management mode and power management related subroutines.
   15842  * BMC, AMT, suspend/resume and EEE.
   15843  */
   15844 
   15845 #ifdef WM_WOL
   15846 static int
   15847 wm_check_mng_mode(struct wm_softc *sc)
   15848 {
   15849 	int rv;
   15850 
   15851 	switch (sc->sc_type) {
   15852 	case WM_T_ICH8:
   15853 	case WM_T_ICH9:
   15854 	case WM_T_ICH10:
   15855 	case WM_T_PCH:
   15856 	case WM_T_PCH2:
   15857 	case WM_T_PCH_LPT:
   15858 	case WM_T_PCH_SPT:
   15859 	case WM_T_PCH_CNP:
   15860 		rv = wm_check_mng_mode_ich8lan(sc);
   15861 		break;
   15862 	case WM_T_82574:
   15863 	case WM_T_82583:
   15864 		rv = wm_check_mng_mode_82574(sc);
   15865 		break;
   15866 	case WM_T_82571:
   15867 	case WM_T_82572:
   15868 	case WM_T_82573:
   15869 	case WM_T_80003:
   15870 		rv = wm_check_mng_mode_generic(sc);
   15871 		break;
   15872 	default:
   15873 		/* Noting to do */
   15874 		rv = 0;
   15875 		break;
   15876 	}
   15877 
   15878 	return rv;
   15879 }
   15880 
   15881 static int
   15882 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15883 {
   15884 	uint32_t fwsm;
   15885 
   15886 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15887 
   15888 	if (((fwsm & FWSM_FW_VALID) != 0)
   15889 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15890 		return 1;
   15891 
   15892 	return 0;
   15893 }
   15894 
   15895 static int
   15896 wm_check_mng_mode_82574(struct wm_softc *sc)
   15897 {
   15898 	uint16_t data;
   15899 
   15900 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15901 
   15902 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15903 		return 1;
   15904 
   15905 	return 0;
   15906 }
   15907 
   15908 static int
   15909 wm_check_mng_mode_generic(struct wm_softc *sc)
   15910 {
   15911 	uint32_t fwsm;
   15912 
   15913 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15914 
   15915 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15916 		return 1;
   15917 
   15918 	return 0;
   15919 }
   15920 #endif /* WM_WOL */
   15921 
   15922 static int
   15923 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15924 {
   15925 	uint32_t manc, fwsm, factps;
   15926 
   15927 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15928 		return 0;
   15929 
   15930 	manc = CSR_READ(sc, WMREG_MANC);
   15931 
   15932 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15933 		device_xname(sc->sc_dev), manc));
   15934 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15935 		return 0;
   15936 
   15937 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15938 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15939 		factps = CSR_READ(sc, WMREG_FACTPS);
   15940 		if (((factps & FACTPS_MNGCG) == 0)
   15941 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15942 			return 1;
   15943 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15944 		uint16_t data;
   15945 
   15946 		factps = CSR_READ(sc, WMREG_FACTPS);
   15947 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15948 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15949 			device_xname(sc->sc_dev), factps, data));
   15950 		if (((factps & FACTPS_MNGCG) == 0)
   15951 		    && ((data & NVM_CFG2_MNGM_MASK)
   15952 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15953 			return 1;
   15954 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15955 	    && ((manc & MANC_ASF_EN) == 0))
   15956 		return 1;
   15957 
   15958 	return 0;
   15959 }
   15960 
   15961 static bool
   15962 wm_phy_resetisblocked(struct wm_softc *sc)
   15963 {
   15964 	bool blocked = false;
   15965 	uint32_t reg;
   15966 	int i = 0;
   15967 
   15968 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15969 		device_xname(sc->sc_dev), __func__));
   15970 
   15971 	switch (sc->sc_type) {
   15972 	case WM_T_ICH8:
   15973 	case WM_T_ICH9:
   15974 	case WM_T_ICH10:
   15975 	case WM_T_PCH:
   15976 	case WM_T_PCH2:
   15977 	case WM_T_PCH_LPT:
   15978 	case WM_T_PCH_SPT:
   15979 	case WM_T_PCH_CNP:
   15980 		do {
   15981 			reg = CSR_READ(sc, WMREG_FWSM);
   15982 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15983 				blocked = true;
   15984 				delay(10*1000);
   15985 				continue;
   15986 			}
   15987 			blocked = false;
   15988 		} while (blocked && (i++ < 30));
   15989 		return blocked;
   15990 		break;
   15991 	case WM_T_82571:
   15992 	case WM_T_82572:
   15993 	case WM_T_82573:
   15994 	case WM_T_82574:
   15995 	case WM_T_82583:
   15996 	case WM_T_80003:
   15997 		reg = CSR_READ(sc, WMREG_MANC);
   15998 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15999 			return true;
   16000 		else
   16001 			return false;
   16002 		break;
   16003 	default:
   16004 		/* No problem */
   16005 		break;
   16006 	}
   16007 
   16008 	return false;
   16009 }
   16010 
   16011 static void
   16012 wm_get_hw_control(struct wm_softc *sc)
   16013 {
   16014 	uint32_t reg;
   16015 
   16016 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   16017 		device_xname(sc->sc_dev), __func__));
   16018 
   16019 	if (sc->sc_type == WM_T_82573) {
   16020 		reg = CSR_READ(sc, WMREG_SWSM);
   16021 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   16022 	} else if (sc->sc_type >= WM_T_82571) {
   16023 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16024 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   16025 	}
   16026 }
   16027 
   16028 static void
   16029 wm_release_hw_control(struct wm_softc *sc)
   16030 {
   16031 	uint32_t reg;
   16032 
   16033 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   16034 		device_xname(sc->sc_dev), __func__));
   16035 
   16036 	if (sc->sc_type == WM_T_82573) {
   16037 		reg = CSR_READ(sc, WMREG_SWSM);
   16038 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   16039 	} else if (sc->sc_type >= WM_T_82571) {
   16040 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16041 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   16042 	}
   16043 }
   16044 
   16045 static void
   16046 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   16047 {
   16048 	uint32_t reg;
   16049 
   16050 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16051 		device_xname(sc->sc_dev), __func__));
   16052 
   16053 	if (sc->sc_type < WM_T_PCH2)
   16054 		return;
   16055 
   16056 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   16057 
   16058 	if (gate)
   16059 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   16060 	else
   16061 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   16062 
   16063 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   16064 }
   16065 
   16066 static int
   16067 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   16068 {
   16069 	uint32_t fwsm, reg;
   16070 	int rv;
   16071 
   16072 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16073 		device_xname(sc->sc_dev), __func__));
   16074 
   16075 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   16076 	wm_gate_hw_phy_config_ich8lan(sc, true);
   16077 
   16078 	/* Disable ULP */
   16079 	wm_ulp_disable(sc);
   16080 
   16081 	/* Acquire PHY semaphore */
   16082 	rv = sc->phy.acquire(sc);
   16083 	if (rv != 0) {
   16084 		DPRINTF(sc, WM_DEBUG_INIT,
   16085 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16086 		return rv;
   16087 	}
   16088 
   16089 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   16090 	 * inaccessible and resetting the PHY is not blocked, toggle the
   16091 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   16092 	 */
   16093 	fwsm = CSR_READ(sc, WMREG_FWSM);
   16094 	switch (sc->sc_type) {
   16095 	case WM_T_PCH_LPT:
   16096 	case WM_T_PCH_SPT:
   16097 	case WM_T_PCH_CNP:
   16098 		if (wm_phy_is_accessible_pchlan(sc))
   16099 			break;
   16100 
   16101 		/* Before toggling LANPHYPC, see if PHY is accessible by
   16102 		 * forcing MAC to SMBus mode first.
   16103 		 */
   16104 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16105 		reg |= CTRL_EXT_FORCE_SMBUS;
   16106 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16107 #if 0
   16108 		/* XXX Isn't this required??? */
   16109 		CSR_WRITE_FLUSH(sc);
   16110 #endif
   16111 		/* Wait 50 milliseconds for MAC to finish any retries
   16112 		 * that it might be trying to perform from previous
   16113 		 * attempts to acknowledge any phy read requests.
   16114 		 */
   16115 		delay(50 * 1000);
   16116 		/* FALLTHROUGH */
   16117 	case WM_T_PCH2:
   16118 		if (wm_phy_is_accessible_pchlan(sc) == true)
   16119 			break;
   16120 		/* FALLTHROUGH */
   16121 	case WM_T_PCH:
   16122 		if (sc->sc_type == WM_T_PCH)
   16123 			if ((fwsm & FWSM_FW_VALID) != 0)
   16124 				break;
   16125 
   16126 		if (wm_phy_resetisblocked(sc) == true) {
   16127 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   16128 			break;
   16129 		}
   16130 
   16131 		/* Toggle LANPHYPC Value bit */
   16132 		wm_toggle_lanphypc_pch_lpt(sc);
   16133 
   16134 		if (sc->sc_type >= WM_T_PCH_LPT) {
   16135 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16136 				break;
   16137 
   16138 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   16139 			 * so ensure that the MAC is also out of SMBus mode
   16140 			 */
   16141 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16142 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16143 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16144 
   16145 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16146 				break;
   16147 			rv = -1;
   16148 		}
   16149 		break;
   16150 	default:
   16151 		break;
   16152 	}
   16153 
   16154 	/* Release semaphore */
   16155 	sc->phy.release(sc);
   16156 
   16157 	if (rv == 0) {
   16158 		/* Check to see if able to reset PHY.  Print error if not */
   16159 		if (wm_phy_resetisblocked(sc)) {
   16160 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   16161 			goto out;
   16162 		}
   16163 
   16164 		/* Reset the PHY before any access to it.  Doing so, ensures
   16165 		 * that the PHY is in a known good state before we read/write
   16166 		 * PHY registers.  The generic reset is sufficient here,
   16167 		 * because we haven't determined the PHY type yet.
   16168 		 */
   16169 		if (wm_reset_phy(sc) != 0)
   16170 			goto out;
   16171 
   16172 		/* On a successful reset, possibly need to wait for the PHY
   16173 		 * to quiesce to an accessible state before returning control
   16174 		 * to the calling function.  If the PHY does not quiesce, then
   16175 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   16176 		 *  the PHY is in.
   16177 		 */
   16178 		if (wm_phy_resetisblocked(sc))
   16179 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   16180 	}
   16181 
   16182 out:
   16183 	/* Ungate automatic PHY configuration on non-managed 82579 */
   16184 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   16185 		delay(10*1000);
   16186 		wm_gate_hw_phy_config_ich8lan(sc, false);
   16187 	}
   16188 
   16189 	return 0;
   16190 }
   16191 
   16192 static void
   16193 wm_init_manageability(struct wm_softc *sc)
   16194 {
   16195 
   16196 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16197 		device_xname(sc->sc_dev), __func__));
   16198 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   16199 
   16200 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16201 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   16202 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16203 
   16204 		/* Disable hardware interception of ARP */
   16205 		manc &= ~MANC_ARP_EN;
   16206 
   16207 		/* Enable receiving management packets to the host */
   16208 		if (sc->sc_type >= WM_T_82571) {
   16209 			manc |= MANC_EN_MNG2HOST;
   16210 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   16211 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   16212 		}
   16213 
   16214 		CSR_WRITE(sc, WMREG_MANC, manc);
   16215 	}
   16216 }
   16217 
   16218 static void
   16219 wm_release_manageability(struct wm_softc *sc)
   16220 {
   16221 
   16222 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16223 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16224 
   16225 		manc |= MANC_ARP_EN;
   16226 		if (sc->sc_type >= WM_T_82571)
   16227 			manc &= ~MANC_EN_MNG2HOST;
   16228 
   16229 		CSR_WRITE(sc, WMREG_MANC, manc);
   16230 	}
   16231 }
   16232 
   16233 static void
   16234 wm_get_wakeup(struct wm_softc *sc)
   16235 {
   16236 
   16237 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   16238 	switch (sc->sc_type) {
   16239 	case WM_T_82573:
   16240 	case WM_T_82583:
   16241 		sc->sc_flags |= WM_F_HAS_AMT;
   16242 		/* FALLTHROUGH */
   16243 	case WM_T_80003:
   16244 	case WM_T_82575:
   16245 	case WM_T_82576:
   16246 	case WM_T_82580:
   16247 	case WM_T_I350:
   16248 	case WM_T_I354:
   16249 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   16250 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   16251 		/* FALLTHROUGH */
   16252 	case WM_T_82541:
   16253 	case WM_T_82541_2:
   16254 	case WM_T_82547:
   16255 	case WM_T_82547_2:
   16256 	case WM_T_82571:
   16257 	case WM_T_82572:
   16258 	case WM_T_82574:
   16259 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16260 		break;
   16261 	case WM_T_ICH8:
   16262 	case WM_T_ICH9:
   16263 	case WM_T_ICH10:
   16264 	case WM_T_PCH:
   16265 	case WM_T_PCH2:
   16266 	case WM_T_PCH_LPT:
   16267 	case WM_T_PCH_SPT:
   16268 	case WM_T_PCH_CNP:
   16269 		sc->sc_flags |= WM_F_HAS_AMT;
   16270 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16271 		break;
   16272 	default:
   16273 		break;
   16274 	}
   16275 
   16276 	/* 1: HAS_MANAGE */
   16277 	if (wm_enable_mng_pass_thru(sc) != 0)
   16278 		sc->sc_flags |= WM_F_HAS_MANAGE;
   16279 
   16280 	/*
   16281 	 * Note that the WOL flags is set after the resetting of the eeprom
   16282 	 * stuff
   16283 	 */
   16284 }
   16285 
   16286 /*
   16287  * Unconfigure Ultra Low Power mode.
   16288  * Only for I217 and newer (see below).
   16289  */
   16290 static int
   16291 wm_ulp_disable(struct wm_softc *sc)
   16292 {
   16293 	uint32_t reg;
   16294 	uint16_t phyreg;
   16295 	int i = 0, rv;
   16296 
   16297 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16298 		device_xname(sc->sc_dev), __func__));
   16299 	/* Exclude old devices */
   16300 	if ((sc->sc_type < WM_T_PCH_LPT)
   16301 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   16302 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   16303 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   16304 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   16305 		return 0;
   16306 
   16307 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   16308 		/* Request ME un-configure ULP mode in the PHY */
   16309 		reg = CSR_READ(sc, WMREG_H2ME);
   16310 		reg &= ~H2ME_ULP;
   16311 		reg |= H2ME_ENFORCE_SETTINGS;
   16312 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16313 
   16314 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   16315 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   16316 			if (i++ == 30) {
   16317 				device_printf(sc->sc_dev, "%s timed out\n",
   16318 				    __func__);
   16319 				return -1;
   16320 			}
   16321 			delay(10 * 1000);
   16322 		}
   16323 		reg = CSR_READ(sc, WMREG_H2ME);
   16324 		reg &= ~H2ME_ENFORCE_SETTINGS;
   16325 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16326 
   16327 		return 0;
   16328 	}
   16329 
   16330 	/* Acquire semaphore */
   16331 	rv = sc->phy.acquire(sc);
   16332 	if (rv != 0) {
   16333 		DPRINTF(sc, WM_DEBUG_INIT,
   16334 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16335 		return rv;
   16336 	}
   16337 
   16338 	/* Toggle LANPHYPC */
   16339 	wm_toggle_lanphypc_pch_lpt(sc);
   16340 
   16341 	/* Unforce SMBus mode in PHY */
   16342 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   16343 	if (rv != 0) {
   16344 		uint32_t reg2;
   16345 
   16346 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   16347 		    __func__);
   16348 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   16349 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   16350 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   16351 		delay(50 * 1000);
   16352 
   16353 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   16354 		    &phyreg);
   16355 		if (rv != 0)
   16356 			goto release;
   16357 	}
   16358 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16359 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   16360 
   16361 	/* Unforce SMBus mode in MAC */
   16362 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16363 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   16364 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16365 
   16366 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   16367 	if (rv != 0)
   16368 		goto release;
   16369 	phyreg |= HV_PM_CTRL_K1_ENA;
   16370 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   16371 
   16372 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   16373 	    &phyreg);
   16374 	if (rv != 0)
   16375 		goto release;
   16376 	phyreg &= ~(I218_ULP_CONFIG1_IND
   16377 	    | I218_ULP_CONFIG1_STICKY_ULP
   16378 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   16379 	    | I218_ULP_CONFIG1_WOL_HOST
   16380 	    | I218_ULP_CONFIG1_INBAND_EXIT
   16381 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   16382 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   16383 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   16384 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16385 	phyreg |= I218_ULP_CONFIG1_START;
   16386 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16387 
   16388 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16389 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   16390 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16391 
   16392 release:
   16393 	/* Release semaphore */
   16394 	sc->phy.release(sc);
   16395 	wm_gmii_reset(sc);
   16396 	delay(50 * 1000);
   16397 
   16398 	return rv;
   16399 }
   16400 
   16401 /* WOL in the newer chipset interfaces (pchlan) */
   16402 static int
   16403 wm_enable_phy_wakeup(struct wm_softc *sc)
   16404 {
   16405 	device_t dev = sc->sc_dev;
   16406 	uint32_t mreg, moff;
   16407 	uint16_t wuce, wuc, wufc, preg;
   16408 	int i, rv;
   16409 
   16410 	KASSERT(sc->sc_type >= WM_T_PCH);
   16411 
   16412 	/* Copy MAC RARs to PHY RARs */
   16413 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   16414 
   16415 	/* Activate PHY wakeup */
   16416 	rv = sc->phy.acquire(sc);
   16417 	if (rv != 0) {
   16418 		device_printf(dev, "%s: failed to acquire semaphore\n",
   16419 		    __func__);
   16420 		return rv;
   16421 	}
   16422 
   16423 	/*
   16424 	 * Enable access to PHY wakeup registers.
   16425 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   16426 	 */
   16427 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   16428 	if (rv != 0) {
   16429 		device_printf(dev,
   16430 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   16431 		goto release;
   16432 	}
   16433 
   16434 	/* Copy MAC MTA to PHY MTA */
   16435 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   16436 		uint16_t lo, hi;
   16437 
   16438 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   16439 		lo = (uint16_t)(mreg & 0xffff);
   16440 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   16441 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   16442 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   16443 	}
   16444 
   16445 	/* Configure PHY Rx Control register */
   16446 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   16447 	mreg = CSR_READ(sc, WMREG_RCTL);
   16448 	if (mreg & RCTL_UPE)
   16449 		preg |= BM_RCTL_UPE;
   16450 	if (mreg & RCTL_MPE)
   16451 		preg |= BM_RCTL_MPE;
   16452 	preg &= ~(BM_RCTL_MO_MASK);
   16453 	moff = __SHIFTOUT(mreg, RCTL_MO);
   16454 	if (moff != 0)
   16455 		preg |= moff << BM_RCTL_MO_SHIFT;
   16456 	if (mreg & RCTL_BAM)
   16457 		preg |= BM_RCTL_BAM;
   16458 	if (mreg & RCTL_PMCF)
   16459 		preg |= BM_RCTL_PMCF;
   16460 	mreg = CSR_READ(sc, WMREG_CTRL);
   16461 	if (mreg & CTRL_RFCE)
   16462 		preg |= BM_RCTL_RFCE;
   16463 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   16464 
   16465 	wuc = WUC_APME | WUC_PME_EN;
   16466 	wufc = WUFC_MAG;
   16467 	/* Enable PHY wakeup in MAC register */
   16468 	CSR_WRITE(sc, WMREG_WUC,
   16469 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   16470 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   16471 
   16472 	/* Configure and enable PHY wakeup in PHY registers */
   16473 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   16474 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   16475 
   16476 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   16477 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16478 
   16479 release:
   16480 	sc->phy.release(sc);
   16481 
   16482 	return 0;
   16483 }
   16484 
   16485 /* Power down workaround on D3 */
   16486 static void
   16487 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   16488 {
   16489 	uint32_t reg;
   16490 	uint16_t phyreg;
   16491 	int i;
   16492 
   16493 	for (i = 0; i < 2; i++) {
   16494 		/* Disable link */
   16495 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16496 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16497 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16498 
   16499 		/*
   16500 		 * Call gig speed drop workaround on Gig disable before
   16501 		 * accessing any PHY registers
   16502 		 */
   16503 		if (sc->sc_type == WM_T_ICH8)
   16504 			wm_gig_downshift_workaround_ich8lan(sc);
   16505 
   16506 		/* Write VR power-down enable */
   16507 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16508 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16509 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16510 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16511 
   16512 		/* Read it back and test */
   16513 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16514 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16515 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16516 			break;
   16517 
   16518 		/* Issue PHY reset and repeat at most one more time */
   16519 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16520 	}
   16521 }
   16522 
   16523 /*
   16524  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16525  *  @sc: pointer to the HW structure
   16526  *
   16527  *  During S0 to Sx transition, it is possible the link remains at gig
   16528  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16529  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16530  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16531  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16532  *  needs to be written.
   16533  *  Parts that support (and are linked to a partner which support) EEE in
   16534  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16535  *  than 10Mbps w/o EEE.
   16536  */
   16537 static void
   16538 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16539 {
   16540 	device_t dev = sc->sc_dev;
   16541 	struct ethercom *ec = &sc->sc_ethercom;
   16542 	uint32_t phy_ctrl;
   16543 	int rv;
   16544 
   16545 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16546 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16547 
   16548 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16549 
   16550 	if (sc->sc_phytype == WMPHY_I217) {
   16551 		uint16_t devid = sc->sc_pcidevid;
   16552 
   16553 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16554 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16555 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16556 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16557 		    (sc->sc_type >= WM_T_PCH_SPT))
   16558 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16559 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16560 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16561 
   16562 		if (sc->phy.acquire(sc) != 0)
   16563 			goto out;
   16564 
   16565 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16566 			uint16_t eee_advert;
   16567 
   16568 			rv = wm_read_emi_reg_locked(dev,
   16569 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16570 			if (rv)
   16571 				goto release;
   16572 
   16573 			/*
   16574 			 * Disable LPLU if both link partners support 100BaseT
   16575 			 * EEE and 100Full is advertised on both ends of the
   16576 			 * link, and enable Auto Enable LPI since there will
   16577 			 * be no driver to enable LPI while in Sx.
   16578 			 */
   16579 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16580 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16581 				uint16_t anar, phy_reg;
   16582 
   16583 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16584 				    &anar);
   16585 				if (anar & ANAR_TX_FD) {
   16586 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16587 					    PHY_CTRL_NOND0A_LPLU);
   16588 
   16589 					/* Set Auto Enable LPI after link up */
   16590 					sc->phy.readreg_locked(dev, 2,
   16591 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16592 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16593 					sc->phy.writereg_locked(dev, 2,
   16594 					    I217_LPI_GPIO_CTRL, phy_reg);
   16595 				}
   16596 			}
   16597 		}
   16598 
   16599 		/*
   16600 		 * For i217 Intel Rapid Start Technology support,
   16601 		 * when the system is going into Sx and no manageability engine
   16602 		 * is present, the driver must configure proxy to reset only on
   16603 		 * power good.	LPI (Low Power Idle) state must also reset only
   16604 		 * on power good, as well as the MTA (Multicast table array).
   16605 		 * The SMBus release must also be disabled on LCD reset.
   16606 		 */
   16607 
   16608 		/*
   16609 		 * Enable MTA to reset for Intel Rapid Start Technology
   16610 		 * Support
   16611 		 */
   16612 
   16613 release:
   16614 		sc->phy.release(sc);
   16615 	}
   16616 out:
   16617 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16618 
   16619 	if (sc->sc_type == WM_T_ICH8)
   16620 		wm_gig_downshift_workaround_ich8lan(sc);
   16621 
   16622 	if (sc->sc_type >= WM_T_PCH) {
   16623 		wm_oem_bits_config_ich8lan(sc, false);
   16624 
   16625 		/* Reset PHY to activate OEM bits on 82577/8 */
   16626 		if (sc->sc_type == WM_T_PCH)
   16627 			wm_reset_phy(sc);
   16628 
   16629 		if (sc->phy.acquire(sc) != 0)
   16630 			return;
   16631 		wm_write_smbus_addr(sc);
   16632 		sc->phy.release(sc);
   16633 	}
   16634 }
   16635 
   16636 /*
   16637  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16638  *  @sc: pointer to the HW structure
   16639  *
   16640  *  During Sx to S0 transitions on non-managed devices or managed devices
   16641  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16642  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16643  *  the PHY.
   16644  *  On i217, setup Intel Rapid Start Technology.
   16645  */
   16646 static int
   16647 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16648 {
   16649 	device_t dev = sc->sc_dev;
   16650 	int rv;
   16651 
   16652 	if (sc->sc_type < WM_T_PCH2)
   16653 		return 0;
   16654 
   16655 	rv = wm_init_phy_workarounds_pchlan(sc);
   16656 	if (rv != 0)
   16657 		return rv;
   16658 
   16659 	/* For i217 Intel Rapid Start Technology support when the system
   16660 	 * is transitioning from Sx and no manageability engine is present
   16661 	 * configure SMBus to restore on reset, disable proxy, and enable
   16662 	 * the reset on MTA (Multicast table array).
   16663 	 */
   16664 	if (sc->sc_phytype == WMPHY_I217) {
   16665 		uint16_t phy_reg;
   16666 
   16667 		rv = sc->phy.acquire(sc);
   16668 		if (rv != 0)
   16669 			return rv;
   16670 
   16671 		/* Clear Auto Enable LPI after link up */
   16672 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16673 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16674 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16675 
   16676 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16677 			/* Restore clear on SMB if no manageability engine
   16678 			 * is present
   16679 			 */
   16680 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16681 			    &phy_reg);
   16682 			if (rv != 0)
   16683 				goto release;
   16684 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16685 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16686 
   16687 			/* Disable Proxy */
   16688 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16689 		}
   16690 		/* Enable reset on MTA */
   16691 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16692 		if (rv != 0)
   16693 			goto release;
   16694 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16695 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16696 
   16697 release:
   16698 		sc->phy.release(sc);
   16699 		return rv;
   16700 	}
   16701 
   16702 	return 0;
   16703 }
   16704 
   16705 static void
   16706 wm_enable_wakeup(struct wm_softc *sc)
   16707 {
   16708 	uint32_t reg, pmreg;
   16709 	pcireg_t pmode;
   16710 	int rv = 0;
   16711 
   16712 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16713 		device_xname(sc->sc_dev), __func__));
   16714 
   16715 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16716 	    &pmreg, NULL) == 0)
   16717 		return;
   16718 
   16719 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16720 		goto pme;
   16721 
   16722 	/* Advertise the wakeup capability */
   16723 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16724 	    | CTRL_SWDPIN(3));
   16725 
   16726 	/* Keep the laser running on fiber adapters */
   16727 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16728 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16729 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16730 		reg |= CTRL_EXT_SWDPIN(3);
   16731 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16732 	}
   16733 
   16734 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16735 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16736 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16737 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16738 		wm_suspend_workarounds_ich8lan(sc);
   16739 
   16740 #if 0	/* For the multicast packet */
   16741 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16742 	reg |= WUFC_MC;
   16743 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16744 #endif
   16745 
   16746 	if (sc->sc_type >= WM_T_PCH) {
   16747 		rv = wm_enable_phy_wakeup(sc);
   16748 		if (rv != 0)
   16749 			goto pme;
   16750 	} else {
   16751 		/* Enable wakeup by the MAC */
   16752 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16753 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16754 	}
   16755 
   16756 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16757 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16758 		|| (sc->sc_type == WM_T_PCH2))
   16759 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16760 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16761 
   16762 pme:
   16763 	/* Request PME */
   16764 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16765 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16766 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16767 		/* For WOL */
   16768 		pmode |= PCI_PMCSR_PME_EN;
   16769 	} else {
   16770 		/* Disable WOL */
   16771 		pmode &= ~PCI_PMCSR_PME_EN;
   16772 	}
   16773 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16774 }
   16775 
   16776 /* Disable ASPM L0s and/or L1 for workaround */
   16777 static void
   16778 wm_disable_aspm(struct wm_softc *sc)
   16779 {
   16780 	pcireg_t reg, mask = 0;
   16781 	unsigned const char *str = "";
   16782 
   16783 	/*
   16784 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16785 	 * space.
   16786 	 */
   16787 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16788 		return;
   16789 
   16790 	switch (sc->sc_type) {
   16791 	case WM_T_82571:
   16792 	case WM_T_82572:
   16793 		/*
   16794 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16795 		 * State Power management L1 State (ASPM L1).
   16796 		 */
   16797 		mask = PCIE_LCSR_ASPM_L1;
   16798 		str = "L1 is";
   16799 		break;
   16800 	case WM_T_82573:
   16801 	case WM_T_82574:
   16802 	case WM_T_82583:
   16803 		/*
   16804 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16805 		 *
   16806 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16807 		 * some chipset.  The document of 82574 and 82583 says that
   16808 		 * disabling L0s with some specific chipset is sufficient,
   16809 		 * but we follow as of the Intel em driver does.
   16810 		 *
   16811 		 * References:
   16812 		 * Errata 8 of the Specification Update of i82573.
   16813 		 * Errata 20 of the Specification Update of i82574.
   16814 		 * Errata 9 of the Specification Update of i82583.
   16815 		 */
   16816 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16817 		str = "L0s and L1 are";
   16818 		break;
   16819 	default:
   16820 		return;
   16821 	}
   16822 
   16823 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16824 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16825 	reg &= ~mask;
   16826 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16827 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16828 
   16829 	/* Print only in wm_attach() */
   16830 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16831 		aprint_verbose_dev(sc->sc_dev,
   16832 		    "ASPM %s disabled to workaround the errata.\n", str);
   16833 }
   16834 
   16835 /* LPLU */
   16836 
   16837 static void
   16838 wm_lplu_d0_disable(struct wm_softc *sc)
   16839 {
   16840 	struct mii_data *mii = &sc->sc_mii;
   16841 	uint32_t reg;
   16842 	uint16_t phyval;
   16843 
   16844 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16845 		device_xname(sc->sc_dev), __func__));
   16846 
   16847 	if (sc->sc_phytype == WMPHY_IFE)
   16848 		return;
   16849 
   16850 	switch (sc->sc_type) {
   16851 	case WM_T_82571:
   16852 	case WM_T_82572:
   16853 	case WM_T_82573:
   16854 	case WM_T_82575:
   16855 	case WM_T_82576:
   16856 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16857 		phyval &= ~PMR_D0_LPLU;
   16858 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16859 		break;
   16860 	case WM_T_82580:
   16861 	case WM_T_I350:
   16862 	case WM_T_I210:
   16863 	case WM_T_I211:
   16864 		reg = CSR_READ(sc, WMREG_PHPM);
   16865 		reg &= ~PHPM_D0A_LPLU;
   16866 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16867 		break;
   16868 	case WM_T_82574:
   16869 	case WM_T_82583:
   16870 	case WM_T_ICH8:
   16871 	case WM_T_ICH9:
   16872 	case WM_T_ICH10:
   16873 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16874 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16875 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16876 		CSR_WRITE_FLUSH(sc);
   16877 		break;
   16878 	case WM_T_PCH:
   16879 	case WM_T_PCH2:
   16880 	case WM_T_PCH_LPT:
   16881 	case WM_T_PCH_SPT:
   16882 	case WM_T_PCH_CNP:
   16883 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16884 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16885 		if (wm_phy_resetisblocked(sc) == false)
   16886 			phyval |= HV_OEM_BITS_ANEGNOW;
   16887 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16888 		break;
   16889 	default:
   16890 		break;
   16891 	}
   16892 }
   16893 
   16894 /* EEE */
   16895 
   16896 static int
   16897 wm_set_eee_i350(struct wm_softc *sc)
   16898 {
   16899 	struct ethercom *ec = &sc->sc_ethercom;
   16900 	uint32_t ipcnfg, eeer;
   16901 	uint32_t ipcnfg_mask
   16902 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16903 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16904 
   16905 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16906 
   16907 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16908 	eeer = CSR_READ(sc, WMREG_EEER);
   16909 
   16910 	/* Enable or disable per user setting */
   16911 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16912 		ipcnfg |= ipcnfg_mask;
   16913 		eeer |= eeer_mask;
   16914 	} else {
   16915 		ipcnfg &= ~ipcnfg_mask;
   16916 		eeer &= ~eeer_mask;
   16917 	}
   16918 
   16919 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16920 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16921 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16922 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16923 
   16924 	return 0;
   16925 }
   16926 
   16927 static int
   16928 wm_set_eee_pchlan(struct wm_softc *sc)
   16929 {
   16930 	device_t dev = sc->sc_dev;
   16931 	struct ethercom *ec = &sc->sc_ethercom;
   16932 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16933 	int rv;
   16934 
   16935 	switch (sc->sc_phytype) {
   16936 	case WMPHY_82579:
   16937 		lpa = I82579_EEE_LP_ABILITY;
   16938 		pcs_status = I82579_EEE_PCS_STATUS;
   16939 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16940 		break;
   16941 	case WMPHY_I217:
   16942 		lpa = I217_EEE_LP_ABILITY;
   16943 		pcs_status = I217_EEE_PCS_STATUS;
   16944 		adv_addr = I217_EEE_ADVERTISEMENT;
   16945 		break;
   16946 	default:
   16947 		return 0;
   16948 	}
   16949 
   16950 	rv = sc->phy.acquire(sc);
   16951 	if (rv != 0) {
   16952 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16953 		return rv;
   16954 	}
   16955 
   16956 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16957 	if (rv != 0)
   16958 		goto release;
   16959 
   16960 	/* Clear bits that enable EEE in various speeds */
   16961 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16962 
   16963 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16964 		/* Save off link partner's EEE ability */
   16965 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16966 		if (rv != 0)
   16967 			goto release;
   16968 
   16969 		/* Read EEE advertisement */
   16970 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16971 			goto release;
   16972 
   16973 		/*
   16974 		 * Enable EEE only for speeds in which the link partner is
   16975 		 * EEE capable and for which we advertise EEE.
   16976 		 */
   16977 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16978 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16979 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16980 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16981 			if ((data & ANLPAR_TX_FD) != 0)
   16982 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16983 			else {
   16984 				/*
   16985 				 * EEE is not supported in 100Half, so ignore
   16986 				 * partner's EEE in 100 ability if full-duplex
   16987 				 * is not advertised.
   16988 				 */
   16989 				sc->eee_lp_ability
   16990 				    &= ~AN_EEEADVERT_100_TX;
   16991 			}
   16992 		}
   16993 	}
   16994 
   16995 	if (sc->sc_phytype == WMPHY_82579) {
   16996 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16997 		if (rv != 0)
   16998 			goto release;
   16999 
   17000 		data &= ~I82579_LPI_PLL_SHUT_100;
   17001 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   17002 	}
   17003 
   17004 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   17005 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   17006 		goto release;
   17007 
   17008 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   17009 release:
   17010 	sc->phy.release(sc);
   17011 
   17012 	return rv;
   17013 }
   17014 
   17015 static int
   17016 wm_set_eee(struct wm_softc *sc)
   17017 {
   17018 	struct ethercom *ec = &sc->sc_ethercom;
   17019 
   17020 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   17021 		return 0;
   17022 
   17023 	if (sc->sc_type == WM_T_I354) {
   17024 		/* I354 uses an external PHY */
   17025 		return 0; /* not yet */
   17026 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   17027 		return wm_set_eee_i350(sc);
   17028 	else if (sc->sc_type >= WM_T_PCH2)
   17029 		return wm_set_eee_pchlan(sc);
   17030 
   17031 	return 0;
   17032 }
   17033 
   17034 /*
   17035  * Workarounds (mainly PHY related).
   17036  * Basically, PHY's workarounds are in the PHY drivers.
   17037  */
   17038 
   17039 /* Workaround for 82566 Kumeran PCS lock loss */
   17040 static int
   17041 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   17042 {
   17043 	struct mii_data *mii = &sc->sc_mii;
   17044 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17045 	int i, reg, rv;
   17046 	uint16_t phyreg;
   17047 
   17048 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17049 		device_xname(sc->sc_dev), __func__));
   17050 
   17051 	/* If the link is not up, do nothing */
   17052 	if ((status & STATUS_LU) == 0)
   17053 		return 0;
   17054 
   17055 	/* Nothing to do if the link is other than 1Gbps */
   17056 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   17057 		return 0;
   17058 
   17059 	for (i = 0; i < 10; i++) {
   17060 		/* read twice */
   17061 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   17062 		if (rv != 0)
   17063 			return rv;
   17064 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   17065 		if (rv != 0)
   17066 			return rv;
   17067 
   17068 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   17069 			goto out;	/* GOOD! */
   17070 
   17071 		/* Reset the PHY */
   17072 		wm_reset_phy(sc);
   17073 		delay(5*1000);
   17074 	}
   17075 
   17076 	/* Disable GigE link negotiation */
   17077 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   17078 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   17079 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   17080 
   17081 	/*
   17082 	 * Call gig speed drop workaround on Gig disable before accessing
   17083 	 * any PHY registers.
   17084 	 */
   17085 	wm_gig_downshift_workaround_ich8lan(sc);
   17086 
   17087 out:
   17088 	return 0;
   17089 }
   17090 
   17091 /*
   17092  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   17093  *  @sc: pointer to the HW structure
   17094  *
   17095  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   17096  *  LPLU, Gig disable, MDIC PHY reset):
   17097  *    1) Set Kumeran Near-end loopback
   17098  *    2) Clear Kumeran Near-end loopback
   17099  *  Should only be called for ICH8[m] devices with any 1G Phy.
   17100  */
   17101 static void
   17102 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   17103 {
   17104 	uint16_t kmreg;
   17105 
   17106 	/* Only for igp3 */
   17107 	if (sc->sc_phytype == WMPHY_IGP_3) {
   17108 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   17109 			return;
   17110 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   17111 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   17112 			return;
   17113 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   17114 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   17115 	}
   17116 }
   17117 
   17118 /*
   17119  * Workaround for pch's PHYs
   17120  * XXX should be moved to new PHY driver?
   17121  */
   17122 static int
   17123 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17124 {
   17125 	device_t dev = sc->sc_dev;
   17126 	struct mii_data *mii = &sc->sc_mii;
   17127 	struct mii_softc *child;
   17128 	uint16_t phy_data, phyrev = 0;
   17129 	int phytype = sc->sc_phytype;
   17130 	int rv;
   17131 
   17132 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17133 		device_xname(dev), __func__));
   17134 	KASSERT(sc->sc_type == WM_T_PCH);
   17135 
   17136 	/* Set MDIO slow mode before any other MDIO access */
   17137 	if (phytype == WMPHY_82577)
   17138 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   17139 			return rv;
   17140 
   17141 	child = LIST_FIRST(&mii->mii_phys);
   17142 	if (child != NULL)
   17143 		phyrev = child->mii_mpd_rev;
   17144 
   17145 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   17146 	if ((child != NULL) &&
   17147 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   17148 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   17149 		/* Disable generation of early preamble (0x4431) */
   17150 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17151 		    &phy_data);
   17152 		if (rv != 0)
   17153 			return rv;
   17154 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   17155 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   17156 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17157 		    phy_data);
   17158 		if (rv != 0)
   17159 			return rv;
   17160 
   17161 		/* Preamble tuning for SSC */
   17162 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   17163 		if (rv != 0)
   17164 			return rv;
   17165 	}
   17166 
   17167 	/* 82578 */
   17168 	if (phytype == WMPHY_82578) {
   17169 		/*
   17170 		 * Return registers to default by doing a soft reset then
   17171 		 * writing 0x3140 to the control register
   17172 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   17173 		 */
   17174 		if ((child != NULL) && (phyrev < 2)) {
   17175 			PHY_RESET(child);
   17176 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   17177 			if (rv != 0)
   17178 				return rv;
   17179 		}
   17180 	}
   17181 
   17182 	/* Select page 0 */
   17183 	if ((rv = sc->phy.acquire(sc)) != 0)
   17184 		return rv;
   17185 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   17186 	sc->phy.release(sc);
   17187 	if (rv != 0)
   17188 		return rv;
   17189 
   17190 	/*
   17191 	 * Configure the K1 Si workaround during phy reset assuming there is
   17192 	 * link so that it disables K1 if link is in 1Gbps.
   17193 	 */
   17194 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   17195 		return rv;
   17196 
   17197 	/* Workaround for link disconnects on a busy hub in half duplex */
   17198 	rv = sc->phy.acquire(sc);
   17199 	if (rv)
   17200 		return rv;
   17201 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   17202 	if (rv)
   17203 		goto release;
   17204 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   17205 	    phy_data & 0x00ff);
   17206 	if (rv)
   17207 		goto release;
   17208 
   17209 	/* Set MSE higher to enable link to stay up when noise is high */
   17210 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   17211 release:
   17212 	sc->phy.release(sc);
   17213 
   17214 	return rv;
   17215 }
   17216 
   17217 /*
   17218  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   17219  *  @sc:   pointer to the HW structure
   17220  */
   17221 static void
   17222 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   17223 {
   17224 
   17225 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17226 		device_xname(sc->sc_dev), __func__));
   17227 
   17228 	if (sc->phy.acquire(sc) != 0)
   17229 		return;
   17230 
   17231 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17232 
   17233 	sc->phy.release(sc);
   17234 }
   17235 
   17236 static void
   17237 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   17238 {
   17239 	device_t dev = sc->sc_dev;
   17240 	uint32_t mac_reg;
   17241 	uint16_t i, wuce;
   17242 	int count;
   17243 
   17244 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17245 		device_xname(dev), __func__));
   17246 
   17247 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   17248 		return;
   17249 
   17250 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   17251 	count = wm_rar_count(sc);
   17252 	for (i = 0; i < count; i++) {
   17253 		uint16_t lo, hi;
   17254 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17255 		lo = (uint16_t)(mac_reg & 0xffff);
   17256 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   17257 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   17258 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   17259 
   17260 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17261 		lo = (uint16_t)(mac_reg & 0xffff);
   17262 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   17263 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   17264 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   17265 	}
   17266 
   17267 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   17268 }
   17269 
   17270 /*
   17271  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   17272  *  with 82579 PHY
   17273  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   17274  */
   17275 static int
   17276 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   17277 {
   17278 	device_t dev = sc->sc_dev;
   17279 	int rar_count;
   17280 	int rv;
   17281 	uint32_t mac_reg;
   17282 	uint16_t dft_ctrl, data;
   17283 	uint16_t i;
   17284 
   17285 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17286 		device_xname(dev), __func__));
   17287 
   17288 	if (sc->sc_type < WM_T_PCH2)
   17289 		return 0;
   17290 
   17291 	/* Acquire PHY semaphore */
   17292 	rv = sc->phy.acquire(sc);
   17293 	if (rv != 0)
   17294 		return rv;
   17295 
   17296 	/* Disable Rx path while enabling/disabling workaround */
   17297 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   17298 	if (rv != 0)
   17299 		goto out;
   17300 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17301 	    dft_ctrl | (1 << 14));
   17302 	if (rv != 0)
   17303 		goto out;
   17304 
   17305 	if (enable) {
   17306 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   17307 		 * SHRAL/H) and initial CRC values to the MAC
   17308 		 */
   17309 		rar_count = wm_rar_count(sc);
   17310 		for (i = 0; i < rar_count; i++) {
   17311 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   17312 			uint32_t addr_high, addr_low;
   17313 
   17314 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17315 			if (!(addr_high & RAL_AV))
   17316 				continue;
   17317 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17318 			mac_addr[0] = (addr_low & 0xFF);
   17319 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   17320 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   17321 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   17322 			mac_addr[4] = (addr_high & 0xFF);
   17323 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   17324 
   17325 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   17326 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   17327 		}
   17328 
   17329 		/* Write Rx addresses to the PHY */
   17330 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17331 	}
   17332 
   17333 	/*
   17334 	 * If enable ==
   17335 	 *	true: Enable jumbo frame workaround in the MAC.
   17336 	 *	false: Write MAC register values back to h/w defaults.
   17337 	 */
   17338 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   17339 	if (enable) {
   17340 		mac_reg &= ~(1 << 14);
   17341 		mac_reg |= (7 << 15);
   17342 	} else
   17343 		mac_reg &= ~(0xf << 14);
   17344 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   17345 
   17346 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   17347 	if (enable) {
   17348 		mac_reg |= RCTL_SECRC;
   17349 		sc->sc_rctl |= RCTL_SECRC;
   17350 		sc->sc_flags |= WM_F_CRC_STRIP;
   17351 	} else {
   17352 		mac_reg &= ~RCTL_SECRC;
   17353 		sc->sc_rctl &= ~RCTL_SECRC;
   17354 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   17355 	}
   17356 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   17357 
   17358 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   17359 	if (rv != 0)
   17360 		goto out;
   17361 	if (enable)
   17362 		data |= 1 << 0;
   17363 	else
   17364 		data &= ~(1 << 0);
   17365 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   17366 	if (rv != 0)
   17367 		goto out;
   17368 
   17369 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   17370 	if (rv != 0)
   17371 		goto out;
   17372 	/*
   17373 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   17374 	 * on both the enable case and the disable case. Is it correct?
   17375 	 */
   17376 	data &= ~(0xf << 8);
   17377 	data |= (0xb << 8);
   17378 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   17379 	if (rv != 0)
   17380 		goto out;
   17381 
   17382 	/*
   17383 	 * If enable ==
   17384 	 *	true: Enable jumbo frame workaround in the PHY.
   17385 	 *	false: Write PHY register values back to h/w defaults.
   17386 	 */
   17387 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   17388 	if (rv != 0)
   17389 		goto out;
   17390 	data &= ~(0x7F << 5);
   17391 	if (enable)
   17392 		data |= (0x37 << 5);
   17393 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   17394 	if (rv != 0)
   17395 		goto out;
   17396 
   17397 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   17398 	if (rv != 0)
   17399 		goto out;
   17400 	if (enable)
   17401 		data &= ~(1 << 13);
   17402 	else
   17403 		data |= (1 << 13);
   17404 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   17405 	if (rv != 0)
   17406 		goto out;
   17407 
   17408 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   17409 	if (rv != 0)
   17410 		goto out;
   17411 	data &= ~(0x3FF << 2);
   17412 	if (enable)
   17413 		data |= (I82579_TX_PTR_GAP << 2);
   17414 	else
   17415 		data |= (0x8 << 2);
   17416 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   17417 	if (rv != 0)
   17418 		goto out;
   17419 
   17420 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   17421 	    enable ? 0xf100 : 0x7e00);
   17422 	if (rv != 0)
   17423 		goto out;
   17424 
   17425 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   17426 	if (rv != 0)
   17427 		goto out;
   17428 	if (enable)
   17429 		data |= 1 << 10;
   17430 	else
   17431 		data &= ~(1 << 10);
   17432 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   17433 	if (rv != 0)
   17434 		goto out;
   17435 
   17436 	/* Re-enable Rx path after enabling/disabling workaround */
   17437 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17438 	    dft_ctrl & ~(1 << 14));
   17439 
   17440 out:
   17441 	sc->phy.release(sc);
   17442 
   17443 	return rv;
   17444 }
   17445 
   17446 /*
   17447  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   17448  *  done after every PHY reset.
   17449  */
   17450 static int
   17451 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17452 {
   17453 	device_t dev = sc->sc_dev;
   17454 	int rv;
   17455 
   17456 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17457 		device_xname(dev), __func__));
   17458 	KASSERT(sc->sc_type == WM_T_PCH2);
   17459 
   17460 	/* Set MDIO slow mode before any other MDIO access */
   17461 	rv = wm_set_mdio_slow_mode_hv(sc);
   17462 	if (rv != 0)
   17463 		return rv;
   17464 
   17465 	rv = sc->phy.acquire(sc);
   17466 	if (rv != 0)
   17467 		return rv;
   17468 	/* Set MSE higher to enable link to stay up when noise is high */
   17469 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   17470 	if (rv != 0)
   17471 		goto release;
   17472 	/* Drop link after 5 times MSE threshold was reached */
   17473 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   17474 release:
   17475 	sc->phy.release(sc);
   17476 
   17477 	return rv;
   17478 }
   17479 
   17480 /**
   17481  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   17482  *  @link: link up bool flag
   17483  *
   17484  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   17485  *  preventing further DMA write requests.  Workaround the issue by disabling
   17486  *  the de-assertion of the clock request when in 1Gpbs mode.
   17487  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   17488  *  speeds in order to avoid Tx hangs.
   17489  **/
   17490 static int
   17491 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   17492 {
   17493 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   17494 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17495 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   17496 	uint16_t phyreg;
   17497 
   17498 	if (link && (speed == STATUS_SPEED_1000)) {
   17499 		int rv;
   17500 
   17501 		rv = sc->phy.acquire(sc);
   17502 		if (rv != 0)
   17503 			return rv;
   17504 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17505 		    &phyreg);
   17506 		if (rv != 0)
   17507 			goto release;
   17508 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17509 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17510 		if (rv != 0)
   17511 			goto release;
   17512 		delay(20);
   17513 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17514 
   17515 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17516 		    &phyreg);
   17517 release:
   17518 		sc->phy.release(sc);
   17519 		return rv;
   17520 	}
   17521 
   17522 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17523 
   17524 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17525 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17526 	    || !link
   17527 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17528 		goto update_fextnvm6;
   17529 
   17530 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17531 
   17532 	/* Clear link status transmit timeout */
   17533 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17534 	if (speed == STATUS_SPEED_100) {
   17535 		/* Set inband Tx timeout to 5x10us for 100Half */
   17536 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17537 
   17538 		/* Do not extend the K1 entry latency for 100Half */
   17539 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17540 	} else {
   17541 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17542 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17543 
   17544 		/* Extend the K1 entry latency for 10 Mbps */
   17545 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17546 	}
   17547 
   17548 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17549 
   17550 update_fextnvm6:
   17551 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17552 	return 0;
   17553 }
   17554 
   17555 /*
   17556  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17557  *  @sc:   pointer to the HW structure
   17558  *  @link: link up bool flag
   17559  *
   17560  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17561  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17562  *  If link is down, the function will restore the default K1 setting located
   17563  *  in the NVM.
   17564  */
   17565 static int
   17566 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17567 {
   17568 	int k1_enable = sc->sc_nvm_k1_enabled;
   17569 	int rv;
   17570 
   17571 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17572 		device_xname(sc->sc_dev), __func__));
   17573 
   17574 	rv = sc->phy.acquire(sc);
   17575 	if (rv != 0)
   17576 		return rv;
   17577 
   17578 	if (link) {
   17579 		k1_enable = 0;
   17580 
   17581 		/* Link stall fix for link up */
   17582 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17583 		    0x0100);
   17584 	} else {
   17585 		/* Link stall fix for link down */
   17586 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17587 		    0x4100);
   17588 	}
   17589 
   17590 	wm_configure_k1_ich8lan(sc, k1_enable);
   17591 	sc->phy.release(sc);
   17592 
   17593 	return 0;
   17594 }
   17595 
   17596 /*
   17597  *  wm_k1_workaround_lv - K1 Si workaround
   17598  *  @sc:   pointer to the HW structure
   17599  *
   17600  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17601  *  Disable K1 for 1000 and 100 speeds
   17602  */
   17603 static int
   17604 wm_k1_workaround_lv(struct wm_softc *sc)
   17605 {
   17606 	uint32_t reg;
   17607 	uint16_t phyreg;
   17608 	int rv;
   17609 
   17610 	if (sc->sc_type != WM_T_PCH2)
   17611 		return 0;
   17612 
   17613 	/* Set K1 beacon duration based on 10Mbps speed */
   17614 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17615 	if (rv != 0)
   17616 		return rv;
   17617 
   17618 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17619 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17620 		if (phyreg &
   17621 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17622 			/* LV 1G/100 Packet drop issue wa  */
   17623 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17624 			    &phyreg);
   17625 			if (rv != 0)
   17626 				return rv;
   17627 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17628 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17629 			    phyreg);
   17630 			if (rv != 0)
   17631 				return rv;
   17632 		} else {
   17633 			/* For 10Mbps */
   17634 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17635 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17636 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17637 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17638 		}
   17639 	}
   17640 
   17641 	return 0;
   17642 }
   17643 
   17644 /*
   17645  *  wm_link_stall_workaround_hv - Si workaround
   17646  *  @sc: pointer to the HW structure
   17647  *
   17648  *  This function works around a Si bug where the link partner can get
   17649  *  a link up indication before the PHY does. If small packets are sent
   17650  *  by the link partner they can be placed in the packet buffer without
   17651  *  being properly accounted for by the PHY and will stall preventing
   17652  *  further packets from being received.  The workaround is to clear the
   17653  *  packet buffer after the PHY detects link up.
   17654  */
   17655 static int
   17656 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17657 {
   17658 	uint16_t phyreg;
   17659 
   17660 	if (sc->sc_phytype != WMPHY_82578)
   17661 		return 0;
   17662 
   17663 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17664 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17665 	if ((phyreg & BMCR_LOOP) != 0)
   17666 		return 0;
   17667 
   17668 	/* Check if link is up and at 1Gbps */
   17669 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17670 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17671 	    | BM_CS_STATUS_SPEED_MASK;
   17672 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17673 		| BM_CS_STATUS_SPEED_1000))
   17674 		return 0;
   17675 
   17676 	delay(200 * 1000);	/* XXX too big */
   17677 
   17678 	/* Flush the packets in the fifo buffer */
   17679 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17680 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17681 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17682 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17683 
   17684 	return 0;
   17685 }
   17686 
   17687 static int
   17688 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17689 {
   17690 	int rv;
   17691 
   17692 	rv = sc->phy.acquire(sc);
   17693 	if (rv != 0) {
   17694 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17695 		    __func__);
   17696 		return rv;
   17697 	}
   17698 
   17699 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17700 
   17701 	sc->phy.release(sc);
   17702 
   17703 	return rv;
   17704 }
   17705 
   17706 static int
   17707 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17708 {
   17709 	int rv;
   17710 	uint16_t reg;
   17711 
   17712 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17713 	if (rv != 0)
   17714 		return rv;
   17715 
   17716 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17717 	    reg | HV_KMRN_MDIO_SLOW);
   17718 }
   17719 
   17720 /*
   17721  *  wm_configure_k1_ich8lan - Configure K1 power state
   17722  *  @sc: pointer to the HW structure
   17723  *  @enable: K1 state to configure
   17724  *
   17725  *  Configure the K1 power state based on the provided parameter.
   17726  *  Assumes semaphore already acquired.
   17727  */
   17728 static void
   17729 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17730 {
   17731 	uint32_t ctrl, ctrl_ext, tmp;
   17732 	uint16_t kmreg;
   17733 	int rv;
   17734 
   17735 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17736 
   17737 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17738 	if (rv != 0)
   17739 		return;
   17740 
   17741 	if (k1_enable)
   17742 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17743 	else
   17744 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17745 
   17746 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17747 	if (rv != 0)
   17748 		return;
   17749 
   17750 	delay(20);
   17751 
   17752 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17753 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17754 
   17755 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17756 	tmp |= CTRL_FRCSPD;
   17757 
   17758 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17759 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17760 	CSR_WRITE_FLUSH(sc);
   17761 	delay(20);
   17762 
   17763 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17764 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17765 	CSR_WRITE_FLUSH(sc);
   17766 	delay(20);
   17767 
   17768 	return;
   17769 }
   17770 
   17771 /* special case - for 82575 - need to do manual init ... */
   17772 static void
   17773 wm_reset_init_script_82575(struct wm_softc *sc)
   17774 {
   17775 	/*
   17776 	 * Remark: this is untested code - we have no board without EEPROM
   17777 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17778 	 */
   17779 
   17780 	/* SerDes configuration via SERDESCTRL */
   17781 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17782 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17783 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17784 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17785 
   17786 	/* CCM configuration via CCMCTL register */
   17787 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17788 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17789 
   17790 	/* PCIe lanes configuration */
   17791 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17792 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17793 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17794 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17795 
   17796 	/* PCIe PLL Configuration */
   17797 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17798 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17799 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17800 }
   17801 
   17802 static void
   17803 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17804 {
   17805 	uint32_t reg;
   17806 	uint16_t nvmword;
   17807 	int rv;
   17808 
   17809 	if (sc->sc_type != WM_T_82580)
   17810 		return;
   17811 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17812 		return;
   17813 
   17814 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17815 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17816 	if (rv != 0) {
   17817 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17818 		    __func__);
   17819 		return;
   17820 	}
   17821 
   17822 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17823 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17824 		reg |= MDICNFG_DEST;
   17825 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17826 		reg |= MDICNFG_COM_MDIO;
   17827 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17828 }
   17829 
   17830 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17831 
   17832 static bool
   17833 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17834 {
   17835 	uint32_t reg;
   17836 	uint16_t id1, id2;
   17837 	int i, rv;
   17838 
   17839 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17840 		device_xname(sc->sc_dev), __func__));
   17841 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17842 
   17843 	id1 = id2 = 0xffff;
   17844 	for (i = 0; i < 2; i++) {
   17845 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17846 		    &id1);
   17847 		if ((rv != 0) || MII_INVALIDID(id1))
   17848 			continue;
   17849 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17850 		    &id2);
   17851 		if ((rv != 0) || MII_INVALIDID(id2))
   17852 			continue;
   17853 		break;
   17854 	}
   17855 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17856 		goto out;
   17857 
   17858 	/*
   17859 	 * In case the PHY needs to be in mdio slow mode,
   17860 	 * set slow mode and try to get the PHY id again.
   17861 	 */
   17862 	rv = 0;
   17863 	if (sc->sc_type < WM_T_PCH_LPT) {
   17864 		wm_set_mdio_slow_mode_hv_locked(sc);
   17865 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17866 		    &id1);
   17867 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17868 		    &id2);
   17869 	}
   17870 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17871 		device_printf(sc->sc_dev, "XXX return with false\n");
   17872 		return false;
   17873 	}
   17874 out:
   17875 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17876 		/* Only unforce SMBus if ME is not active */
   17877 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17878 			uint16_t phyreg;
   17879 
   17880 			/* Unforce SMBus mode in PHY */
   17881 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17882 			    CV_SMB_CTRL, &phyreg);
   17883 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17884 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17885 			    CV_SMB_CTRL, phyreg);
   17886 
   17887 			/* Unforce SMBus mode in MAC */
   17888 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17889 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17890 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17891 		}
   17892 	}
   17893 	return true;
   17894 }
   17895 
   17896 static void
   17897 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17898 {
   17899 	uint32_t reg;
   17900 	int i;
   17901 
   17902 	/* Set PHY Config Counter to 50msec */
   17903 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17904 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17905 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17906 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17907 
   17908 	/* Toggle LANPHYPC */
   17909 	reg = CSR_READ(sc, WMREG_CTRL);
   17910 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17911 	reg &= ~CTRL_LANPHYPC_VALUE;
   17912 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17913 	CSR_WRITE_FLUSH(sc);
   17914 	delay(1000);
   17915 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17916 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17917 	CSR_WRITE_FLUSH(sc);
   17918 
   17919 	if (sc->sc_type < WM_T_PCH_LPT)
   17920 		delay(50 * 1000);
   17921 	else {
   17922 		i = 20;
   17923 
   17924 		do {
   17925 			delay(5 * 1000);
   17926 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17927 		    && i--);
   17928 
   17929 		delay(30 * 1000);
   17930 	}
   17931 }
   17932 
   17933 static int
   17934 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17935 {
   17936 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17937 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17938 	uint32_t rxa;
   17939 	uint16_t scale = 0, lat_enc = 0;
   17940 	int32_t obff_hwm = 0;
   17941 	int64_t lat_ns, value;
   17942 
   17943 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17944 		device_xname(sc->sc_dev), __func__));
   17945 
   17946 	if (link) {
   17947 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17948 		uint32_t status;
   17949 		uint16_t speed;
   17950 		pcireg_t preg;
   17951 
   17952 		status = CSR_READ(sc, WMREG_STATUS);
   17953 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17954 		case STATUS_SPEED_10:
   17955 			speed = 10;
   17956 			break;
   17957 		case STATUS_SPEED_100:
   17958 			speed = 100;
   17959 			break;
   17960 		case STATUS_SPEED_1000:
   17961 			speed = 1000;
   17962 			break;
   17963 		default:
   17964 			device_printf(sc->sc_dev, "Unknown speed "
   17965 			    "(status = %08x)\n", status);
   17966 			return -1;
   17967 		}
   17968 
   17969 		/* Rx Packet Buffer Allocation size (KB) */
   17970 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17971 
   17972 		/*
   17973 		 * Determine the maximum latency tolerated by the device.
   17974 		 *
   17975 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17976 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17977 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17978 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17979 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17980 		 */
   17981 		lat_ns = ((int64_t)rxa * 1024 -
   17982 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17983 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17984 		if (lat_ns < 0)
   17985 			lat_ns = 0;
   17986 		else
   17987 			lat_ns /= speed;
   17988 		value = lat_ns;
   17989 
   17990 		while (value > LTRV_VALUE) {
   17991 			scale ++;
   17992 			value = howmany(value, __BIT(5));
   17993 		}
   17994 		if (scale > LTRV_SCALE_MAX) {
   17995 			device_printf(sc->sc_dev,
   17996 			    "Invalid LTR latency scale %d\n", scale);
   17997 			return -1;
   17998 		}
   17999 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   18000 
   18001 		/* Determine the maximum latency tolerated by the platform */
   18002 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   18003 		    WM_PCI_LTR_CAP_LPT);
   18004 		max_snoop = preg & 0xffff;
   18005 		max_nosnoop = preg >> 16;
   18006 
   18007 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   18008 
   18009 		if (lat_enc > max_ltr_enc) {
   18010 			lat_enc = max_ltr_enc;
   18011 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   18012 			    * PCI_LTR_SCALETONS(
   18013 				    __SHIFTOUT(lat_enc,
   18014 					PCI_LTR_MAXSNOOPLAT_SCALE));
   18015 		}
   18016 
   18017 		if (lat_ns) {
   18018 			lat_ns *= speed * 1000;
   18019 			lat_ns /= 8;
   18020 			lat_ns /= 1000000000;
   18021 			obff_hwm = (int32_t)(rxa - lat_ns);
   18022 		}
   18023 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   18024 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   18025 			    "(rxa = %d, lat_ns = %d)\n",
   18026 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   18027 			return -1;
   18028 		}
   18029 	}
   18030 	/* Snoop and No-Snoop latencies the same */
   18031 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   18032 	CSR_WRITE(sc, WMREG_LTRV, reg);
   18033 
   18034 	/* Set OBFF high water mark */
   18035 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   18036 	reg |= obff_hwm;
   18037 	CSR_WRITE(sc, WMREG_SVT, reg);
   18038 
   18039 	/* Enable OBFF */
   18040 	reg = CSR_READ(sc, WMREG_SVCR);
   18041 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   18042 	CSR_WRITE(sc, WMREG_SVCR, reg);
   18043 
   18044 	return 0;
   18045 }
   18046 
   18047 /*
   18048  * I210 Errata 25 and I211 Errata 10
   18049  * Slow System Clock.
   18050  *
   18051  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   18052  */
   18053 static int
   18054 wm_pll_workaround_i210(struct wm_softc *sc)
   18055 {
   18056 	uint32_t mdicnfg, wuc;
   18057 	uint32_t reg;
   18058 	pcireg_t pcireg;
   18059 	uint32_t pmreg;
   18060 	uint16_t nvmword, tmp_nvmword;
   18061 	uint16_t phyval;
   18062 	bool wa_done = false;
   18063 	int i, rv = 0;
   18064 
   18065 	/* Get Power Management cap offset */
   18066 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   18067 	    &pmreg, NULL) == 0)
   18068 		return -1;
   18069 
   18070 	/* Save WUC and MDICNFG registers */
   18071 	wuc = CSR_READ(sc, WMREG_WUC);
   18072 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   18073 
   18074 	reg = mdicnfg & ~MDICNFG_DEST;
   18075 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   18076 
   18077 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   18078 		/*
   18079 		 * The default value of the Initialization Control Word 1
   18080 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   18081 		 */
   18082 		nvmword = INVM_DEFAULT_AL;
   18083 	}
   18084 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   18085 
   18086 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   18087 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   18088 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   18089 
   18090 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   18091 			rv = 0;
   18092 			break; /* OK */
   18093 		} else
   18094 			rv = -1;
   18095 
   18096 		wa_done = true;
   18097 		/* Directly reset the internal PHY */
   18098 		reg = CSR_READ(sc, WMREG_CTRL);
   18099 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   18100 
   18101 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   18102 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   18103 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   18104 
   18105 		CSR_WRITE(sc, WMREG_WUC, 0);
   18106 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   18107 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18108 
   18109 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   18110 		    pmreg + PCI_PMCSR);
   18111 		pcireg |= PCI_PMCSR_STATE_D3;
   18112 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   18113 		    pmreg + PCI_PMCSR, pcireg);
   18114 		delay(1000);
   18115 		pcireg &= ~PCI_PMCSR_STATE_D3;
   18116 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   18117 		    pmreg + PCI_PMCSR, pcireg);
   18118 
   18119 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   18120 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18121 
   18122 		/* Restore WUC register */
   18123 		CSR_WRITE(sc, WMREG_WUC, wuc);
   18124 	}
   18125 
   18126 	/* Restore MDICNFG setting */
   18127 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   18128 	if (wa_done)
   18129 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   18130 	return rv;
   18131 }
   18132 
   18133 static void
   18134 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   18135 {
   18136 	uint32_t reg;
   18137 
   18138 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   18139 		device_xname(sc->sc_dev), __func__));
   18140 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   18141 	    || (sc->sc_type == WM_T_PCH_CNP));
   18142 
   18143 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   18144 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   18145 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   18146 
   18147 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   18148 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   18149 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   18150 }
   18151 
   18152 /* Sysctl functions */
   18153 static int
   18154 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   18155 {
   18156 	struct sysctlnode node = *rnode;
   18157 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18158 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18159 	struct wm_softc *sc = txq->txq_sc;
   18160 	uint32_t reg;
   18161 
   18162 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   18163 	node.sysctl_data = &reg;
   18164 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18165 }
   18166 
   18167 static int
   18168 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   18169 {
   18170 	struct sysctlnode node = *rnode;
   18171 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18172 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18173 	struct wm_softc *sc = txq->txq_sc;
   18174 	uint32_t reg;
   18175 
   18176 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   18177 	node.sysctl_data = &reg;
   18178 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18179 }
   18180 
   18181 #ifdef WM_DEBUG
   18182 static int
   18183 wm_sysctl_debug(SYSCTLFN_ARGS)
   18184 {
   18185 	struct sysctlnode node = *rnode;
   18186 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   18187 	uint32_t dflags;
   18188 	int error;
   18189 
   18190 	dflags = sc->sc_debug;
   18191 	node.sysctl_data = &dflags;
   18192 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   18193 
   18194 	if (error || newp == NULL)
   18195 		return error;
   18196 
   18197 	sc->sc_debug = dflags;
   18198 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   18199 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   18200 
   18201 	return 0;
   18202 }
   18203 #endif
   18204