Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.785
      1 /*	$NetBSD: if_wm.c,v 1.785 2023/09/25 06:18:09 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.785 2023/09/25 06:18:09 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 
    464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	bool wmq_wq_enqueued;
    482 	struct work wmq_cookie;
    483 	void *wmq_si;
    484 };
    485 
    486 struct wm_phyop {
    487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    488 	void (*release)(struct wm_softc *);
    489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    490 	int (*writereg_locked)(device_t, int, int, uint16_t);
    491 	int reset_delay_us;
    492 	bool no_errprint;
    493 };
    494 
    495 struct wm_nvmop {
    496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    497 	void (*release)(struct wm_softc *);
    498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    499 };
    500 
    501 /*
    502  * Software state per device.
    503  */
    504 struct wm_softc {
    505 	device_t sc_dev;		/* generic device information */
    506 	bus_space_tag_t sc_st;		/* bus space tag */
    507 	bus_space_handle_t sc_sh;	/* bus space handle */
    508 	bus_size_t sc_ss;		/* bus space size */
    509 	bus_space_tag_t sc_iot;		/* I/O space tag */
    510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    511 	bus_size_t sc_ios;		/* I/O space size */
    512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    514 	bus_size_t sc_flashs;		/* flash registers space size */
    515 	off_t sc_flashreg_offset;	/*
    516 					 * offset to flash registers from
    517 					 * start of BAR
    518 					 */
    519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    520 
    521 	struct ethercom sc_ethercom;	/* Ethernet common data */
    522 	struct mii_data sc_mii;		/* MII/media information */
    523 
    524 	pci_chipset_tag_t sc_pc;
    525 	pcitag_t sc_pcitag;
    526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    528 
    529 	uint16_t sc_pcidevid;		/* PCI device ID */
    530 	wm_chip_type sc_type;		/* MAC type */
    531 	int sc_rev;			/* MAC revision */
    532 	wm_phy_type sc_phytype;		/* PHY type */
    533 	uint8_t sc_sfptype;		/* SFP type */
    534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    535 #define	WM_MEDIATYPE_UNKNOWN		0x00
    536 #define	WM_MEDIATYPE_FIBER		0x01
    537 #define	WM_MEDIATYPE_COPPER		0x02
    538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    540 	u_int sc_flags;			/* flags; see below */
    541 	u_short sc_if_flags;		/* last if_flags */
    542 	int sc_ec_capenable;		/* last ec_capenable */
    543 	int sc_flowflags;		/* 802.3x flow control flags */
    544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    545 	int sc_align_tweak;
    546 
    547 	void *sc_ihs[WM_MAX_NINTR];	/*
    548 					 * interrupt cookie.
    549 					 * - legacy and msi use sc_ihs[0] only
    550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    551 					 */
    552 	pci_intr_handle_t *sc_intrs;	/*
    553 					 * legacy and msi use sc_intrs[0] only
    554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    555 					 */
    556 	int sc_nintrs;			/* number of interrupts */
    557 
    558 	int sc_link_intr_idx;		/* index of MSI-X tables */
    559 
    560 	callout_t sc_tick_ch;		/* tick callout */
    561 	bool sc_core_stopping;
    562 
    563 	int sc_nvm_ver_major;
    564 	int sc_nvm_ver_minor;
    565 	int sc_nvm_ver_build;
    566 	int sc_nvm_addrbits;		/* NVM address bits */
    567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    568 	int sc_ich8_flash_base;
    569 	int sc_ich8_flash_bank_size;
    570 	int sc_nvm_k1_enabled;
    571 
    572 	int sc_nqueues;
    573 	struct wm_queue *sc_queue;
    574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    578 	struct workqueue *sc_queue_wq;
    579 	bool sc_txrx_use_workqueue;
    580 
    581 	int sc_affinity_offset;
    582 
    583 #ifdef WM_EVENT_COUNTERS
    584 	/* Event counters. */
    585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    586 
    587 	/* >= WM_T_82542_2_1 */
    588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    593 
    594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    599 	struct evcnt sc_ev_scc;		/* Single Collision */
    600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    602 	struct evcnt sc_ev_latecol;	/* Late Collision */
    603 	struct evcnt sc_ev_colc;	/* Collision */
    604 	struct evcnt sc_ev_cbtmpc;	/* Circuit Breaker Tx Mng. Packet */
    605 	struct evcnt sc_ev_dc;		/* Defer */
    606 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    607 	struct evcnt sc_ev_sec;		/* Sequence Error */
    608 
    609 	/* Old */
    610 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    611 	/* New */
    612 	struct evcnt sc_ev_htdpmc;	/* Host Tx Discarded Pkts by MAC */
    613 
    614 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    615 	struct evcnt sc_ev_cbrdpc;	/* Circuit Breaker Rx Dropped Packet */
    616 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    617 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    618 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    619 	struct evcnt sc_ev_prc511;	/* Packets Rx (256-511 bytes) */
    620 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    621 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    622 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    623 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    624 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    625 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    626 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    627 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    628 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    629 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    630 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    631 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    632 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    633 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    634 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    635 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    636 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    637 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    638 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    639 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    640 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    641 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    642 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    643 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    644 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    645 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    646 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    647 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx */
    648 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    649 
    650 	/* Old */
    651 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    652 	/* New */
    653 	struct evcnt sc_ev_cbrmpc;	/* Circuit Breaker Rx Mng. Packet */
    654 
    655 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    656 
    657 	/* Old */
    658 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    659 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    660 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    661 	struct evcnt sc_ev_ictxatc;	/* Intr. Cause Tx Abs Timer Expire */
    662 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    663 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    664 	/*
    665 	 * sc_ev_rxdmtc is shared with both "Intr. cause" and
    666 	 * non "Intr. cause" register.
    667 	 */
    668 	struct evcnt sc_ev_rxdmtc;	/* (Intr. Cause) Rx Desc Min Thresh */
    669 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    670 	/* New */
    671 	struct evcnt sc_ev_rpthc;	/* Rx Packets To Host */
    672 	struct evcnt sc_ev_debug1;	/* Debug Counter 1 */
    673 	struct evcnt sc_ev_debug2;	/* Debug Counter 2 */
    674 	struct evcnt sc_ev_debug3;	/* Debug Counter 3 */
    675 	struct evcnt sc_ev_hgptc;	/* Host Good Packets TX */
    676 	struct evcnt sc_ev_debug4;	/* Debug Counter 4 */
    677 	struct evcnt sc_ev_htcbdpc;	/* Host Tx Circuit Breaker Drp. Pkts */
    678 	struct evcnt sc_ev_hgorc;	/* Host Good Octets Rx */
    679 	struct evcnt sc_ev_hgotc;	/* Host Good Octets Tx */
    680 	struct evcnt sc_ev_lenerrs;	/* Length Error */
    681 	struct evcnt sc_ev_tlpic;	/* EEE Tx LPI */
    682 	struct evcnt sc_ev_rlpic;	/* EEE Rx LPI */
    683 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    684 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    685 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    686 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    687 	struct evcnt sc_ev_scvpc;	/* SerDes/SGMII Code Violation Pkt. */
    688 	struct evcnt sc_ev_hrmpc;	/* Header Redirection Missed Packet */
    689 #endif /* WM_EVENT_COUNTERS */
    690 
    691 	struct sysctllog *sc_sysctllog;
    692 
    693 	/* This variable are used only on the 82547. */
    694 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    695 
    696 	uint32_t sc_ctrl;		/* prototype CTRL register */
    697 #if 0
    698 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    699 #endif
    700 	uint32_t sc_icr;		/* prototype interrupt bits */
    701 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    702 	uint32_t sc_tctl;		/* prototype TCTL register */
    703 	uint32_t sc_rctl;		/* prototype RCTL register */
    704 	uint32_t sc_txcw;		/* prototype TXCW register */
    705 	uint32_t sc_tipg;		/* prototype TIPG register */
    706 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    707 	uint32_t sc_pba;		/* prototype PBA register */
    708 
    709 	int sc_tbi_linkup;		/* TBI link status */
    710 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    711 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    712 	struct timeval sc_linkup_delay_time; /* delay LINK_STATE_UP */
    713 
    714 	int sc_mchash_type;		/* multicast filter offset */
    715 
    716 	krndsource_t rnd_source;	/* random source */
    717 
    718 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    719 
    720 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    721 	kmutex_t *sc_ich_phymtx;	/*
    722 					 * 82574/82583/ICH/PCH specific PHY
    723 					 * mutex. For 82574/82583, the mutex
    724 					 * is used for both PHY and NVM.
    725 					 */
    726 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    727 
    728 	struct wm_phyop phy;
    729 	struct wm_nvmop nvm;
    730 
    731 	struct workqueue *sc_reset_wq;
    732 	struct work sc_reset_work;
    733 	volatile unsigned sc_reset_pending;
    734 
    735 	bool sc_dying;
    736 
    737 #ifdef WM_DEBUG
    738 	uint32_t sc_debug;
    739 	bool sc_trigger_reset;
    740 #endif
    741 };
    742 
    743 #define	WM_RXCHAIN_RESET(rxq)						\
    744 do {									\
    745 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    746 	*(rxq)->rxq_tailp = NULL;					\
    747 	(rxq)->rxq_len = 0;						\
    748 } while (/*CONSTCOND*/0)
    749 
    750 #define	WM_RXCHAIN_LINK(rxq, m)						\
    751 do {									\
    752 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    753 	(rxq)->rxq_tailp = &(m)->m_next;				\
    754 } while (/*CONSTCOND*/0)
    755 
    756 #ifdef WM_EVENT_COUNTERS
    757 #ifdef __HAVE_ATOMIC64_LOADSTORE
    758 #define	WM_EVCNT_INCR(ev)						\
    759 	atomic_store_relaxed(&((ev)->ev_count),				\
    760 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    761 #define	WM_EVCNT_STORE(ev, val)						\
    762 	atomic_store_relaxed(&((ev)->ev_count), (val))
    763 #define	WM_EVCNT_ADD(ev, val)						\
    764 	atomic_store_relaxed(&((ev)->ev_count),				\
    765 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    766 #else
    767 #define	WM_EVCNT_INCR(ev)						\
    768 	((ev)->ev_count)++
    769 #define	WM_EVCNT_STORE(ev, val)						\
    770 	((ev)->ev_count = (val))
    771 #define	WM_EVCNT_ADD(ev, val)						\
    772 	(ev)->ev_count += (val)
    773 #endif
    774 
    775 #define WM_Q_EVCNT_INCR(qname, evname)			\
    776 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    777 #define WM_Q_EVCNT_STORE(qname, evname, val)		\
    778 	WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val))
    779 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    780 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    781 #else /* !WM_EVENT_COUNTERS */
    782 #define	WM_EVCNT_INCR(ev)	/* nothing */
    783 #define	WM_EVCNT_STORE(ev, val)	/* nothing */
    784 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    785 
    786 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    787 #define WM_Q_EVCNT_STORE(qname, evname, val)	/* nothing */
    788 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    789 #endif /* !WM_EVENT_COUNTERS */
    790 
    791 #define	CSR_READ(sc, reg)						\
    792 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    793 #define	CSR_WRITE(sc, reg, val)						\
    794 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    795 #define	CSR_WRITE_FLUSH(sc)						\
    796 	(void)CSR_READ((sc), WMREG_STATUS)
    797 
    798 #define ICH8_FLASH_READ32(sc, reg)					\
    799 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    800 	    (reg) + sc->sc_flashreg_offset)
    801 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    802 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    803 	    (reg) + sc->sc_flashreg_offset, (data))
    804 
    805 #define ICH8_FLASH_READ16(sc, reg)					\
    806 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    807 	    (reg) + sc->sc_flashreg_offset)
    808 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    809 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    810 	    (reg) + sc->sc_flashreg_offset, (data))
    811 
    812 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    813 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    814 
    815 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    816 #define	WM_CDTXADDR_HI(txq, x)						\
    817 	(sizeof(bus_addr_t) == 8 ?					\
    818 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    819 
    820 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    821 #define	WM_CDRXADDR_HI(rxq, x)						\
    822 	(sizeof(bus_addr_t) == 8 ?					\
    823 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    824 
    825 /*
    826  * Register read/write functions.
    827  * Other than CSR_{READ|WRITE}().
    828  */
    829 #if 0
    830 static inline uint32_t wm_io_read(struct wm_softc *, int);
    831 #endif
    832 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    833 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    834     uint32_t, uint32_t);
    835 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    836 
    837 /*
    838  * Descriptor sync/init functions.
    839  */
    840 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    841 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    842 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    843 
    844 /*
    845  * Device driver interface functions and commonly used functions.
    846  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    847  */
    848 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    849 static int	wm_match(device_t, cfdata_t, void *);
    850 static void	wm_attach(device_t, device_t, void *);
    851 static int	wm_detach(device_t, int);
    852 static bool	wm_suspend(device_t, const pmf_qual_t *);
    853 static bool	wm_resume(device_t, const pmf_qual_t *);
    854 static bool	wm_watchdog(struct ifnet *);
    855 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    856     uint16_t *);
    857 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    858     uint16_t *);
    859 static void	wm_tick(void *);
    860 static int	wm_ifflags_cb(struct ethercom *);
    861 static int	wm_ioctl(struct ifnet *, u_long, void *);
    862 /* MAC address related */
    863 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    864 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    865 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    866 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    867 static int	wm_rar_count(struct wm_softc *);
    868 static void	wm_set_filter(struct wm_softc *);
    869 /* Reset and init related */
    870 static void	wm_set_vlan(struct wm_softc *);
    871 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    872 static void	wm_get_auto_rd_done(struct wm_softc *);
    873 static void	wm_lan_init_done(struct wm_softc *);
    874 static void	wm_get_cfg_done(struct wm_softc *);
    875 static int	wm_phy_post_reset(struct wm_softc *);
    876 static int	wm_write_smbus_addr(struct wm_softc *);
    877 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    878 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    879 static void	wm_initialize_hardware_bits(struct wm_softc *);
    880 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    881 static int	wm_reset_phy(struct wm_softc *);
    882 static void	wm_flush_desc_rings(struct wm_softc *);
    883 static void	wm_reset(struct wm_softc *);
    884 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    885 static void	wm_rxdrain(struct wm_rxqueue *);
    886 static void	wm_init_rss(struct wm_softc *);
    887 static void	wm_adjust_qnum(struct wm_softc *, int);
    888 static inline bool	wm_is_using_msix(struct wm_softc *);
    889 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    890 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    891 static int	wm_setup_legacy(struct wm_softc *);
    892 static int	wm_setup_msix(struct wm_softc *);
    893 static int	wm_init(struct ifnet *);
    894 static int	wm_init_locked(struct ifnet *);
    895 static void	wm_init_sysctls(struct wm_softc *);
    896 static void	wm_update_stats(struct wm_softc *);
    897 static void	wm_clear_evcnt(struct wm_softc *);
    898 static void	wm_unset_stopping_flags(struct wm_softc *);
    899 static void	wm_set_stopping_flags(struct wm_softc *);
    900 static void	wm_stop(struct ifnet *, int);
    901 static void	wm_stop_locked(struct ifnet *, bool, bool);
    902 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    903 static void	wm_82547_txfifo_stall(void *);
    904 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    905 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    906 /* DMA related */
    907 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    908 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    909 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    910 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    911     struct wm_txqueue *);
    912 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    913 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    914 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    915     struct wm_rxqueue *);
    916 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    917 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    918 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    919 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    920 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    921 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    922 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    923     struct wm_txqueue *);
    924 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    925     struct wm_rxqueue *);
    926 static int	wm_alloc_txrx_queues(struct wm_softc *);
    927 static void	wm_free_txrx_queues(struct wm_softc *);
    928 static int	wm_init_txrx_queues(struct wm_softc *);
    929 /* Start */
    930 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    931     struct wm_txsoft *, uint32_t *, uint8_t *);
    932 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    933 static void	wm_start(struct ifnet *);
    934 static void	wm_start_locked(struct ifnet *);
    935 static int	wm_transmit(struct ifnet *, struct mbuf *);
    936 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    937 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    938     bool);
    939 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    940     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    941 static void	wm_nq_start(struct ifnet *);
    942 static void	wm_nq_start_locked(struct ifnet *);
    943 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    944 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    945 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    946     bool);
    947 static void	wm_deferred_start_locked(struct wm_txqueue *);
    948 static void	wm_handle_queue(void *);
    949 static void	wm_handle_queue_work(struct work *, void *);
    950 static void	wm_handle_reset_work(struct work *, void *);
    951 /* Interrupt */
    952 static bool	wm_txeof(struct wm_txqueue *, u_int);
    953 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    954 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    955 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    956 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    957 static void	wm_linkintr(struct wm_softc *, uint32_t);
    958 static int	wm_intr_legacy(void *);
    959 static inline void	wm_txrxintr_disable(struct wm_queue *);
    960 static inline void	wm_txrxintr_enable(struct wm_queue *);
    961 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    962 static int	wm_txrxintr_msix(void *);
    963 static int	wm_linkintr_msix(void *);
    964 
    965 /*
    966  * Media related.
    967  * GMII, SGMII, TBI, SERDES and SFP.
    968  */
    969 /* Common */
    970 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    971 /* GMII related */
    972 static void	wm_gmii_reset(struct wm_softc *);
    973 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    974 static int	wm_get_phy_id_82575(struct wm_softc *);
    975 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    976 static int	wm_gmii_mediachange(struct ifnet *);
    977 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    978 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    979 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    980 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    981 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    982 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    983 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    984 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    985 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    986 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    987 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    988 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    989 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    990 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    991 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    992 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    993 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    994 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    995 	bool);
    996 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    997 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    998 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    999 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
   1000 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
   1001 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
   1002 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
   1003 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
   1004 static void	wm_gmii_statchg(struct ifnet *);
   1005 /*
   1006  * kumeran related (80003, ICH* and PCH*).
   1007  * These functions are not for accessing MII registers but for accessing
   1008  * kumeran specific registers.
   1009  */
   1010 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
   1011 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
   1012 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
   1013 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
   1014 /* EMI register related */
   1015 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
   1016 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
   1017 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
   1018 /* SGMII */
   1019 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
   1020 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
   1021 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
   1022 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
   1023 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
   1024 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
   1025 /* TBI related */
   1026 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
   1027 static void	wm_tbi_mediainit(struct wm_softc *);
   1028 static int	wm_tbi_mediachange(struct ifnet *);
   1029 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
   1030 static int	wm_check_for_link(struct wm_softc *);
   1031 static void	wm_tbi_tick(struct wm_softc *);
   1032 /* SERDES related */
   1033 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1034 static int	wm_serdes_mediachange(struct ifnet *);
   1035 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1036 static void	wm_serdes_tick(struct wm_softc *);
   1037 /* SFP related */
   1038 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1039 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1040 
   1041 /*
   1042  * NVM related.
   1043  * Microwire, SPI (w/wo EERD) and Flash.
   1044  */
   1045 /* Misc functions */
   1046 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1047 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1048 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1049 /* Microwire */
   1050 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1051 /* SPI */
   1052 static int	wm_nvm_ready_spi(struct wm_softc *);
   1053 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1054 /* Using with EERD */
   1055 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1056 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1057 /* Flash */
   1058 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1059     unsigned int *);
   1060 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1061 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1062 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1063     uint32_t *);
   1064 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1065 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1066 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1067 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1068 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1069 /* iNVM */
   1070 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1071 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1072 /* Lock, detecting NVM type, validate checksum and read */
   1073 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1074 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1075 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1076 static void	wm_nvm_version_invm(struct wm_softc *);
   1077 static void	wm_nvm_version(struct wm_softc *);
   1078 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1079 
   1080 /*
   1081  * Hardware semaphores.
   1082  * Very complexed...
   1083  */
   1084 static int	wm_get_null(struct wm_softc *);
   1085 static void	wm_put_null(struct wm_softc *);
   1086 static int	wm_get_eecd(struct wm_softc *);
   1087 static void	wm_put_eecd(struct wm_softc *);
   1088 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1089 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1090 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1091 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1092 static int	wm_get_nvm_80003(struct wm_softc *);
   1093 static void	wm_put_nvm_80003(struct wm_softc *);
   1094 static int	wm_get_nvm_82571(struct wm_softc *);
   1095 static void	wm_put_nvm_82571(struct wm_softc *);
   1096 static int	wm_get_phy_82575(struct wm_softc *);
   1097 static void	wm_put_phy_82575(struct wm_softc *);
   1098 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1099 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1100 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1101 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1102 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1103 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1104 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1105 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1106 
   1107 /*
   1108  * Management mode and power management related subroutines.
   1109  * BMC, AMT, suspend/resume and EEE.
   1110  */
   1111 #if 0
   1112 static int	wm_check_mng_mode(struct wm_softc *);
   1113 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1114 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1115 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1116 #endif
   1117 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1118 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1119 static void	wm_get_hw_control(struct wm_softc *);
   1120 static void	wm_release_hw_control(struct wm_softc *);
   1121 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1122 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1123 static void	wm_init_manageability(struct wm_softc *);
   1124 static void	wm_release_manageability(struct wm_softc *);
   1125 static void	wm_get_wakeup(struct wm_softc *);
   1126 static int	wm_ulp_disable(struct wm_softc *);
   1127 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1128 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1129 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1130 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1131 static void	wm_enable_wakeup(struct wm_softc *);
   1132 static void	wm_disable_aspm(struct wm_softc *);
   1133 /* LPLU (Low Power Link Up) */
   1134 static void	wm_lplu_d0_disable(struct wm_softc *);
   1135 /* EEE */
   1136 static int	wm_set_eee_i350(struct wm_softc *);
   1137 static int	wm_set_eee_pchlan(struct wm_softc *);
   1138 static int	wm_set_eee(struct wm_softc *);
   1139 
   1140 /*
   1141  * Workarounds (mainly PHY related).
   1142  * Basically, PHY's workarounds are in the PHY drivers.
   1143  */
   1144 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1145 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1146 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1147 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1148 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1149 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1150 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1151 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1152 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1153 static int	wm_k1_workaround_lv(struct wm_softc *);
   1154 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1155 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1156 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1157 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1158 static void	wm_reset_init_script_82575(struct wm_softc *);
   1159 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1160 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1161 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1162 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1163 static int	wm_pll_workaround_i210(struct wm_softc *);
   1164 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1165 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1166 static void	wm_set_linkdown_discard(struct wm_softc *);
   1167 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1168 
   1169 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1170 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1171 #ifdef WM_DEBUG
   1172 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1173 #endif
   1174 
   1175 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1176     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1177 
   1178 /*
   1179  * Devices supported by this driver.
   1180  */
   1181 static const struct wm_product {
   1182 	pci_vendor_id_t		wmp_vendor;
   1183 	pci_product_id_t	wmp_product;
   1184 	const char		*wmp_name;
   1185 	wm_chip_type		wmp_type;
   1186 	uint32_t		wmp_flags;
   1187 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1188 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1189 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1190 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1191 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1192 } wm_products[] = {
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1194 	  "Intel i82542 1000BASE-X Ethernet",
   1195 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1198 	  "Intel i82543GC 1000BASE-X Ethernet",
   1199 	  WM_T_82543,		WMP_F_FIBER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1202 	  "Intel i82543GC 1000BASE-T Ethernet",
   1203 	  WM_T_82543,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1206 	  "Intel i82544EI 1000BASE-T Ethernet",
   1207 	  WM_T_82544,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1210 	  "Intel i82544EI 1000BASE-X Ethernet",
   1211 	  WM_T_82544,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1214 	  "Intel i82544GC 1000BASE-T Ethernet",
   1215 	  WM_T_82544,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1218 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1219 	  WM_T_82544,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1222 	  "Intel i82540EM 1000BASE-T Ethernet",
   1223 	  WM_T_82540,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1226 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1227 	  WM_T_82540,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1230 	  "Intel i82540EP 1000BASE-T Ethernet",
   1231 	  WM_T_82540,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1234 	  "Intel i82540EP 1000BASE-T Ethernet",
   1235 	  WM_T_82540,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1238 	  "Intel i82540EP 1000BASE-T Ethernet",
   1239 	  WM_T_82540,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1242 	  "Intel i82545EM 1000BASE-T Ethernet",
   1243 	  WM_T_82545,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1246 	  "Intel i82545GM 1000BASE-T Ethernet",
   1247 	  WM_T_82545_3,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1250 	  "Intel i82545GM 1000BASE-X Ethernet",
   1251 	  WM_T_82545_3,		WMP_F_FIBER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1254 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1255 	  WM_T_82545_3,		WMP_F_SERDES },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1258 	  "Intel i82546EB 1000BASE-T Ethernet",
   1259 	  WM_T_82546,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1262 	  "Intel i82546EB 1000BASE-T Ethernet",
   1263 	  WM_T_82546,		WMP_F_COPPER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1266 	  "Intel i82545EM 1000BASE-X Ethernet",
   1267 	  WM_T_82545,		WMP_F_FIBER },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1270 	  "Intel i82546EB 1000BASE-X Ethernet",
   1271 	  WM_T_82546,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1274 	  "Intel i82546GB 1000BASE-T Ethernet",
   1275 	  WM_T_82546_3,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1278 	  "Intel i82546GB 1000BASE-X Ethernet",
   1279 	  WM_T_82546_3,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1282 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82546_3,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1286 	  "i82546GB quad-port Gigabit Ethernet",
   1287 	  WM_T_82546_3,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1290 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1291 	  WM_T_82546_3,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1294 	  "Intel PRO/1000MT (82546GB)",
   1295 	  WM_T_82546_3,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1298 	  "Intel i82541EI 1000BASE-T Ethernet",
   1299 	  WM_T_82541,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1302 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1303 	  WM_T_82541,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1306 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1307 	  WM_T_82541,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1310 	  "Intel i82541ER 1000BASE-T Ethernet",
   1311 	  WM_T_82541_2,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1314 	  "Intel i82541GI 1000BASE-T Ethernet",
   1315 	  WM_T_82541_2,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1318 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1319 	  WM_T_82541_2,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1322 	  "Intel i82541PI 1000BASE-T Ethernet",
   1323 	  WM_T_82541_2,		WMP_F_COPPER },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1326 	  "Intel i82547EI 1000BASE-T Ethernet",
   1327 	  WM_T_82547,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1330 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1331 	  WM_T_82547,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1334 	  "Intel i82547GI 1000BASE-T Ethernet",
   1335 	  WM_T_82547_2,		WMP_F_COPPER },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1338 	  "Intel PRO/1000 PT (82571EB)",
   1339 	  WM_T_82571,		WMP_F_COPPER },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1342 	  "Intel PRO/1000 PF (82571EB)",
   1343 	  WM_T_82571,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1346 	  "Intel PRO/1000 PB (82571EB)",
   1347 	  WM_T_82571,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1350 	  "Intel PRO/1000 QT (82571EB)",
   1351 	  WM_T_82571,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1354 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1355 	  WM_T_82571,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1358 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1359 	  WM_T_82571,		WMP_F_COPPER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1362 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1363 	  WM_T_82571,		WMP_F_SERDES },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1366 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1367 	  WM_T_82571,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1370 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1371 	  WM_T_82571,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1374 	  "Intel i82572EI 1000baseT Ethernet",
   1375 	  WM_T_82572,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1378 	  "Intel i82572EI 1000baseX Ethernet",
   1379 	  WM_T_82572,		WMP_F_FIBER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1382 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1383 	  WM_T_82572,		WMP_F_SERDES },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1386 	  "Intel i82572EI 1000baseT Ethernet",
   1387 	  WM_T_82572,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1390 	  "Intel i82573E",
   1391 	  WM_T_82573,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1394 	  "Intel i82573E IAMT",
   1395 	  WM_T_82573,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1398 	  "Intel i82573L Gigabit Ethernet",
   1399 	  WM_T_82573,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1402 	  "Intel i82574L",
   1403 	  WM_T_82574,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1406 	  "Intel i82574L",
   1407 	  WM_T_82574,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1410 	  "Intel i82583V",
   1411 	  WM_T_82583,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1414 	  "i80003 dual 1000baseT Ethernet",
   1415 	  WM_T_80003,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1418 	  "i80003 dual 1000baseX Ethernet",
   1419 	  WM_T_80003,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1422 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1423 	  WM_T_80003,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1426 	  "Intel i80003 1000baseT Ethernet",
   1427 	  WM_T_80003,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1430 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1431 	  WM_T_80003,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1434 	  "Intel i82801H (M_AMT) LAN Controller",
   1435 	  WM_T_ICH8,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1437 	  "Intel i82801H (AMT) LAN Controller",
   1438 	  WM_T_ICH8,		WMP_F_COPPER },
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1440 	  "Intel i82801H LAN Controller",
   1441 	  WM_T_ICH8,		WMP_F_COPPER },
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1443 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1444 	  WM_T_ICH8,		WMP_F_COPPER },
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1446 	  "Intel i82801H (M) LAN Controller",
   1447 	  WM_T_ICH8,		WMP_F_COPPER },
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1449 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1450 	  WM_T_ICH8,		WMP_F_COPPER },
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1452 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1453 	  WM_T_ICH8,		WMP_F_COPPER },
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1455 	  "82567V-3 LAN Controller",
   1456 	  WM_T_ICH8,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1458 	  "82801I (AMT) LAN Controller",
   1459 	  WM_T_ICH9,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1461 	  "82801I 10/100 LAN Controller",
   1462 	  WM_T_ICH9,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1464 	  "82801I (G) 10/100 LAN Controller",
   1465 	  WM_T_ICH9,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1467 	  "82801I (GT) 10/100 LAN Controller",
   1468 	  WM_T_ICH9,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1470 	  "82801I (C) LAN Controller",
   1471 	  WM_T_ICH9,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1473 	  "82801I mobile LAN Controller",
   1474 	  WM_T_ICH9,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1476 	  "82801I mobile (V) LAN Controller",
   1477 	  WM_T_ICH9,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1479 	  "82801I mobile (AMT) LAN Controller",
   1480 	  WM_T_ICH9,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1482 	  "82567LM-4 LAN Controller",
   1483 	  WM_T_ICH9,		WMP_F_COPPER },
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1485 	  "82567LM-2 LAN Controller",
   1486 	  WM_T_ICH10,		WMP_F_COPPER },
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1488 	  "82567LF-2 LAN Controller",
   1489 	  WM_T_ICH10,		WMP_F_COPPER },
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1491 	  "82567LM-3 LAN Controller",
   1492 	  WM_T_ICH10,		WMP_F_COPPER },
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1494 	  "82567LF-3 LAN Controller",
   1495 	  WM_T_ICH10,		WMP_F_COPPER },
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1497 	  "82567V-2 LAN Controller",
   1498 	  WM_T_ICH10,		WMP_F_COPPER },
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1500 	  "82567V-3? LAN Controller",
   1501 	  WM_T_ICH10,		WMP_F_COPPER },
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1503 	  "HANKSVILLE LAN Controller",
   1504 	  WM_T_ICH10,		WMP_F_COPPER },
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1506 	  "PCH LAN (82577LM) Controller",
   1507 	  WM_T_PCH,		WMP_F_COPPER },
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1509 	  "PCH LAN (82577LC) Controller",
   1510 	  WM_T_PCH,		WMP_F_COPPER },
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1512 	  "PCH LAN (82578DM) Controller",
   1513 	  WM_T_PCH,		WMP_F_COPPER },
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1515 	  "PCH LAN (82578DC) Controller",
   1516 	  WM_T_PCH,		WMP_F_COPPER },
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1518 	  "PCH2 LAN (82579LM) Controller",
   1519 	  WM_T_PCH2,		WMP_F_COPPER },
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1521 	  "PCH2 LAN (82579V) Controller",
   1522 	  WM_T_PCH2,		WMP_F_COPPER },
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1524 	  "82575EB dual-1000baseT Ethernet",
   1525 	  WM_T_82575,		WMP_F_COPPER },
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1527 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1528 	  WM_T_82575,		WMP_F_SERDES },
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1530 	  "82575GB quad-1000baseT Ethernet",
   1531 	  WM_T_82575,		WMP_F_COPPER },
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1533 	  "82575GB quad-1000baseT Ethernet (PM)",
   1534 	  WM_T_82575,		WMP_F_COPPER },
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1536 	  "82576 1000BaseT Ethernet",
   1537 	  WM_T_82576,		WMP_F_COPPER },
   1538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1539 	  "82576 1000BaseX Ethernet",
   1540 	  WM_T_82576,		WMP_F_FIBER },
   1541 
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1543 	  "82576 gigabit Ethernet (SERDES)",
   1544 	  WM_T_82576,		WMP_F_SERDES },
   1545 
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1547 	  "82576 quad-1000BaseT Ethernet",
   1548 	  WM_T_82576,		WMP_F_COPPER },
   1549 
   1550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1551 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1552 	  WM_T_82576,		WMP_F_COPPER },
   1553 
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1555 	  "82576 gigabit Ethernet",
   1556 	  WM_T_82576,		WMP_F_COPPER },
   1557 
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1559 	  "82576 gigabit Ethernet (SERDES)",
   1560 	  WM_T_82576,		WMP_F_SERDES },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1562 	  "82576 quad-gigabit Ethernet (SERDES)",
   1563 	  WM_T_82576,		WMP_F_SERDES },
   1564 
   1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1566 	  "82580 1000BaseT Ethernet",
   1567 	  WM_T_82580,		WMP_F_COPPER },
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1569 	  "82580 1000BaseX Ethernet",
   1570 	  WM_T_82580,		WMP_F_FIBER },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1573 	  "82580 1000BaseT Ethernet (SERDES)",
   1574 	  WM_T_82580,		WMP_F_SERDES },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1577 	  "82580 gigabit Ethernet (SGMII)",
   1578 	  WM_T_82580,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1580 	  "82580 dual-1000BaseT Ethernet",
   1581 	  WM_T_82580,		WMP_F_COPPER },
   1582 
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1584 	  "82580 quad-1000BaseX Ethernet",
   1585 	  WM_T_82580,		WMP_F_FIBER },
   1586 
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1588 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1589 	  WM_T_82580,		WMP_F_COPPER },
   1590 
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1592 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1593 	  WM_T_82580,		WMP_F_SERDES },
   1594 
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1596 	  "DH89XXCC 1000BASE-KX Ethernet",
   1597 	  WM_T_82580,		WMP_F_SERDES },
   1598 
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1600 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1601 	  WM_T_82580,		WMP_F_SERDES },
   1602 
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1604 	  "I350 Gigabit Network Connection",
   1605 	  WM_T_I350,		WMP_F_COPPER },
   1606 
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1608 	  "I350 Gigabit Fiber Network Connection",
   1609 	  WM_T_I350,		WMP_F_FIBER },
   1610 
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1612 	  "I350 Gigabit Backplane Connection",
   1613 	  WM_T_I350,		WMP_F_SERDES },
   1614 
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1616 	  "I350 Quad Port Gigabit Ethernet",
   1617 	  WM_T_I350,		WMP_F_SERDES },
   1618 
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1620 	  "I350 Gigabit Connection",
   1621 	  WM_T_I350,		WMP_F_COPPER },
   1622 
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1624 	  "I354 Gigabit Ethernet (KX)",
   1625 	  WM_T_I354,		WMP_F_SERDES },
   1626 
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1628 	  "I354 Gigabit Ethernet (SGMII)",
   1629 	  WM_T_I354,		WMP_F_COPPER },
   1630 
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1632 	  "I354 Gigabit Ethernet (2.5G)",
   1633 	  WM_T_I354,		WMP_F_COPPER },
   1634 
   1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1636 	  "I210-T1 Ethernet Server Adapter",
   1637 	  WM_T_I210,		WMP_F_COPPER },
   1638 
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1640 	  "I210 Ethernet (Copper OEM)",
   1641 	  WM_T_I210,		WMP_F_COPPER },
   1642 
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1644 	  "I210 Ethernet (Copper IT)",
   1645 	  WM_T_I210,		WMP_F_COPPER },
   1646 
   1647 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1648 	  "I210 Ethernet (Copper, FLASH less)",
   1649 	  WM_T_I210,		WMP_F_COPPER },
   1650 
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1652 	  "I210 Gigabit Ethernet (Fiber)",
   1653 	  WM_T_I210,		WMP_F_FIBER },
   1654 
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1656 	  "I210 Gigabit Ethernet (SERDES)",
   1657 	  WM_T_I210,		WMP_F_SERDES },
   1658 
   1659 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1660 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1661 	  WM_T_I210,		WMP_F_SERDES },
   1662 
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1664 	  "I210 Gigabit Ethernet (SGMII)",
   1665 	  WM_T_I210,		WMP_F_COPPER },
   1666 
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1668 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1669 	  WM_T_I210,		WMP_F_COPPER },
   1670 
   1671 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1672 	  "I211 Ethernet (COPPER)",
   1673 	  WM_T_I211,		WMP_F_COPPER },
   1674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1675 	  "I217 V Ethernet Connection",
   1676 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1678 	  "I217 LM Ethernet Connection",
   1679 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1681 	  "I218 V Ethernet Connection",
   1682 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1683 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1684 	  "I218 V Ethernet Connection",
   1685 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1687 	  "I218 V Ethernet Connection",
   1688 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1690 	  "I218 LM Ethernet Connection",
   1691 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1693 	  "I218 LM Ethernet Connection",
   1694 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1695 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1696 	  "I218 LM Ethernet Connection",
   1697 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1699 	  "I219 LM Ethernet Connection",
   1700 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1702 	  "I219 LM (2) Ethernet Connection",
   1703 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1705 	  "I219 LM (3) Ethernet Connection",
   1706 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1707 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1708 	  "I219 LM (4) Ethernet Connection",
   1709 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1711 	  "I219 LM (5) Ethernet Connection",
   1712 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1714 	  "I219 LM (6) Ethernet Connection",
   1715 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1717 	  "I219 LM (7) Ethernet Connection",
   1718 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1719 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1720 	  "I219 LM (8) Ethernet Connection",
   1721 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1723 	  "I219 LM (9) Ethernet Connection",
   1724 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1726 	  "I219 LM (10) Ethernet Connection",
   1727 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1729 	  "I219 LM (11) Ethernet Connection",
   1730 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1731 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1732 	  "I219 LM (12) Ethernet Connection",
   1733 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1735 	  "I219 LM (13) Ethernet Connection",
   1736 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1738 	  "I219 LM (14) Ethernet Connection",
   1739 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1741 	  "I219 LM (15) Ethernet Connection",
   1742 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1743 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1744 	  "I219 LM (16) Ethernet Connection",
   1745 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1747 	  "I219 LM (17) Ethernet Connection",
   1748 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1750 	  "I219 LM (18) Ethernet Connection",
   1751 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1753 	  "I219 LM (19) Ethernet Connection",
   1754 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1755 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1756 	  "I219 V Ethernet Connection",
   1757 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1759 	  "I219 V (2) Ethernet Connection",
   1760 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1762 	  "I219 V (4) Ethernet Connection",
   1763 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1765 	  "I219 V (5) Ethernet Connection",
   1766 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1767 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1768 	  "I219 V (6) Ethernet Connection",
   1769 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1771 	  "I219 V (7) Ethernet Connection",
   1772 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1774 	  "I219 V (8) Ethernet Connection",
   1775 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1777 	  "I219 V (9) Ethernet Connection",
   1778 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1779 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1780 	  "I219 V (10) Ethernet Connection",
   1781 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1783 	  "I219 V (11) Ethernet Connection",
   1784 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1786 	  "I219 V (12) Ethernet Connection",
   1787 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1789 	  "I219 V (13) Ethernet Connection",
   1790 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1791 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1792 	  "I219 V (14) Ethernet Connection",
   1793 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1795 	  "I219 V (15) Ethernet Connection",
   1796 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1798 	  "I219 V (16) Ethernet Connection",
   1799 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1801 	  "I219 V (17) Ethernet Connection",
   1802 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1804 	  "I219 V (18) Ethernet Connection",
   1805 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1807 	  "I219 V (19) Ethernet Connection",
   1808 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1809 	{ 0,			0,
   1810 	  NULL,
   1811 	  0,			0 },
   1812 };
   1813 
   1814 /*
   1815  * Register read/write functions.
   1816  * Other than CSR_{READ|WRITE}().
   1817  */
   1818 
   1819 #if 0 /* Not currently used */
   1820 static inline uint32_t
   1821 wm_io_read(struct wm_softc *sc, int reg)
   1822 {
   1823 
   1824 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1825 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1826 }
   1827 #endif
   1828 
   1829 static inline void
   1830 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1831 {
   1832 
   1833 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1834 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1835 }
   1836 
   1837 static inline void
   1838 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1839     uint32_t data)
   1840 {
   1841 	uint32_t regval;
   1842 	int i;
   1843 
   1844 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1845 
   1846 	CSR_WRITE(sc, reg, regval);
   1847 
   1848 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1849 		delay(5);
   1850 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1851 			break;
   1852 	}
   1853 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1854 		aprint_error("%s: WARNING:"
   1855 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1856 		    device_xname(sc->sc_dev), reg);
   1857 	}
   1858 }
   1859 
   1860 static inline void
   1861 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1862 {
   1863 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1864 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1865 }
   1866 
   1867 /*
   1868  * Descriptor sync/init functions.
   1869  */
   1870 static inline void
   1871 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1872 {
   1873 	struct wm_softc *sc = txq->txq_sc;
   1874 
   1875 	/* If it will wrap around, sync to the end of the ring. */
   1876 	if ((start + num) > WM_NTXDESC(txq)) {
   1877 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1878 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1879 		    (WM_NTXDESC(txq) - start), ops);
   1880 		num -= (WM_NTXDESC(txq) - start);
   1881 		start = 0;
   1882 	}
   1883 
   1884 	/* Now sync whatever is left. */
   1885 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1886 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1887 }
   1888 
   1889 static inline void
   1890 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1891 {
   1892 	struct wm_softc *sc = rxq->rxq_sc;
   1893 
   1894 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1895 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1896 }
   1897 
   1898 static inline void
   1899 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1900 {
   1901 	struct wm_softc *sc = rxq->rxq_sc;
   1902 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1903 	struct mbuf *m = rxs->rxs_mbuf;
   1904 
   1905 	/*
   1906 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1907 	 * so that the payload after the Ethernet header is aligned
   1908 	 * to a 4-byte boundary.
   1909 
   1910 	 * XXX BRAINDAMAGE ALERT!
   1911 	 * The stupid chip uses the same size for every buffer, which
   1912 	 * is set in the Receive Control register.  We are using the 2K
   1913 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1914 	 * reason, we can't "scoot" packets longer than the standard
   1915 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1916 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1917 	 * the upper layer copy the headers.
   1918 	 */
   1919 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1920 
   1921 	if (sc->sc_type == WM_T_82574) {
   1922 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1923 		rxd->erx_data.erxd_addr =
   1924 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1925 		rxd->erx_data.erxd_dd = 0;
   1926 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1927 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1928 
   1929 		rxd->nqrx_data.nrxd_paddr =
   1930 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1931 		/* Currently, split header is not supported. */
   1932 		rxd->nqrx_data.nrxd_haddr = 0;
   1933 	} else {
   1934 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1935 
   1936 		wm_set_dma_addr(&rxd->wrx_addr,
   1937 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1938 		rxd->wrx_len = 0;
   1939 		rxd->wrx_cksum = 0;
   1940 		rxd->wrx_status = 0;
   1941 		rxd->wrx_errors = 0;
   1942 		rxd->wrx_special = 0;
   1943 	}
   1944 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1945 
   1946 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1947 }
   1948 
   1949 /*
   1950  * Device driver interface functions and commonly used functions.
   1951  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1952  */
   1953 
   1954 /* Lookup supported device table */
   1955 static const struct wm_product *
   1956 wm_lookup(const struct pci_attach_args *pa)
   1957 {
   1958 	const struct wm_product *wmp;
   1959 
   1960 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1961 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1962 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1963 			return wmp;
   1964 	}
   1965 	return NULL;
   1966 }
   1967 
   1968 /* The match function (ca_match) */
   1969 static int
   1970 wm_match(device_t parent, cfdata_t cf, void *aux)
   1971 {
   1972 	struct pci_attach_args *pa = aux;
   1973 
   1974 	if (wm_lookup(pa) != NULL)
   1975 		return 1;
   1976 
   1977 	return 0;
   1978 }
   1979 
   1980 /* The attach function (ca_attach) */
   1981 static void
   1982 wm_attach(device_t parent, device_t self, void *aux)
   1983 {
   1984 	struct wm_softc *sc = device_private(self);
   1985 	struct pci_attach_args *pa = aux;
   1986 	prop_dictionary_t dict;
   1987 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1988 	pci_chipset_tag_t pc = pa->pa_pc;
   1989 	int counts[PCI_INTR_TYPE_SIZE];
   1990 	pci_intr_type_t max_type;
   1991 	const char *eetype, *xname;
   1992 	bus_space_tag_t memt;
   1993 	bus_space_handle_t memh;
   1994 	bus_size_t memsize;
   1995 	int memh_valid;
   1996 	int i, error;
   1997 	const struct wm_product *wmp;
   1998 	prop_data_t ea;
   1999 	prop_number_t pn;
   2000 	uint8_t enaddr[ETHER_ADDR_LEN];
   2001 	char buf[256];
   2002 	char wqname[MAXCOMLEN];
   2003 	uint16_t cfg1, cfg2, swdpin, nvmword;
   2004 	pcireg_t preg, memtype;
   2005 	uint16_t eeprom_data, apme_mask;
   2006 	bool force_clear_smbi;
   2007 	uint32_t link_mode;
   2008 	uint32_t reg;
   2009 
   2010 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   2011 	sc->sc_debug = WM_DEBUG_DEFAULT;
   2012 #endif
   2013 	sc->sc_dev = self;
   2014 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   2015 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   2016 	sc->sc_core_stopping = false;
   2017 
   2018 	wmp = wm_lookup(pa);
   2019 #ifdef DIAGNOSTIC
   2020 	if (wmp == NULL) {
   2021 		printf("\n");
   2022 		panic("wm_attach: impossible");
   2023 	}
   2024 #endif
   2025 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   2026 
   2027 	sc->sc_pc = pa->pa_pc;
   2028 	sc->sc_pcitag = pa->pa_tag;
   2029 
   2030 	if (pci_dma64_available(pa)) {
   2031 		aprint_verbose(", 64-bit DMA");
   2032 		sc->sc_dmat = pa->pa_dmat64;
   2033 	} else {
   2034 		aprint_verbose(", 32-bit DMA");
   2035 		sc->sc_dmat = pa->pa_dmat;
   2036 	}
   2037 
   2038 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2039 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2040 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2041 
   2042 	sc->sc_type = wmp->wmp_type;
   2043 
   2044 	/* Set default function pointers */
   2045 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2046 	sc->phy.release = sc->nvm.release = wm_put_null;
   2047 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2048 
   2049 	if (sc->sc_type < WM_T_82543) {
   2050 		if (sc->sc_rev < 2) {
   2051 			aprint_error_dev(sc->sc_dev,
   2052 			    "i82542 must be at least rev. 2\n");
   2053 			return;
   2054 		}
   2055 		if (sc->sc_rev < 3)
   2056 			sc->sc_type = WM_T_82542_2_0;
   2057 	}
   2058 
   2059 	/*
   2060 	 * Disable MSI for Errata:
   2061 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2062 	 *
   2063 	 *  82544: Errata 25
   2064 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2065 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2066 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2067 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2068 	 *
   2069 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2070 	 *
   2071 	 *  82571 & 82572: Errata 63
   2072 	 */
   2073 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2074 	    || (sc->sc_type == WM_T_82572))
   2075 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2076 
   2077 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2078 	    || (sc->sc_type == WM_T_82580)
   2079 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2080 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2081 		sc->sc_flags |= WM_F_NEWQUEUE;
   2082 
   2083 	/* Set device properties (mactype) */
   2084 	dict = device_properties(sc->sc_dev);
   2085 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2086 
   2087 	/*
   2088 	 * Map the device.  All devices support memory-mapped acccess,
   2089 	 * and it is really required for normal operation.
   2090 	 */
   2091 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2092 	switch (memtype) {
   2093 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2094 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2095 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2096 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2097 		break;
   2098 	default:
   2099 		memh_valid = 0;
   2100 		break;
   2101 	}
   2102 
   2103 	if (memh_valid) {
   2104 		sc->sc_st = memt;
   2105 		sc->sc_sh = memh;
   2106 		sc->sc_ss = memsize;
   2107 	} else {
   2108 		aprint_error_dev(sc->sc_dev,
   2109 		    "unable to map device registers\n");
   2110 		return;
   2111 	}
   2112 
   2113 	/*
   2114 	 * In addition, i82544 and later support I/O mapped indirect
   2115 	 * register access.  It is not desirable (nor supported in
   2116 	 * this driver) to use it for normal operation, though it is
   2117 	 * required to work around bugs in some chip versions.
   2118 	 */
   2119 	switch (sc->sc_type) {
   2120 	case WM_T_82544:
   2121 	case WM_T_82541:
   2122 	case WM_T_82541_2:
   2123 	case WM_T_82547:
   2124 	case WM_T_82547_2:
   2125 		/* First we have to find the I/O BAR. */
   2126 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2127 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2128 			if (memtype == PCI_MAPREG_TYPE_IO)
   2129 				break;
   2130 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2131 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2132 				i += 4;	/* skip high bits, too */
   2133 		}
   2134 		if (i < PCI_MAPREG_END) {
   2135 			/*
   2136 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2137 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2138 			 * It's no problem because newer chips has no this
   2139 			 * bug.
   2140 			 *
   2141 			 * The i8254x doesn't apparently respond when the
   2142 			 * I/O BAR is 0, which looks somewhat like it's not
   2143 			 * been configured.
   2144 			 */
   2145 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2146 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2147 				aprint_error_dev(sc->sc_dev,
   2148 				    "WARNING: I/O BAR at zero.\n");
   2149 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2150 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2151 			    == 0) {
   2152 				sc->sc_flags |= WM_F_IOH_VALID;
   2153 			} else
   2154 				aprint_error_dev(sc->sc_dev,
   2155 				    "WARNING: unable to map I/O space\n");
   2156 		}
   2157 		break;
   2158 	default:
   2159 		break;
   2160 	}
   2161 
   2162 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2163 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2164 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2165 	if (sc->sc_type < WM_T_82542_2_1)
   2166 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2167 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2168 
   2169 	/* Power up chip */
   2170 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2171 	    && error != EOPNOTSUPP) {
   2172 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2173 		return;
   2174 	}
   2175 
   2176 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2177 	/*
   2178 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2179 	 * resource.
   2180 	 */
   2181 	if (sc->sc_nqueues > 1) {
   2182 		max_type = PCI_INTR_TYPE_MSIX;
   2183 		/*
   2184 		 *  82583 has a MSI-X capability in the PCI configuration space
   2185 		 * but it doesn't support it. At least the document doesn't
   2186 		 * say anything about MSI-X.
   2187 		 */
   2188 		counts[PCI_INTR_TYPE_MSIX]
   2189 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2190 	} else {
   2191 		max_type = PCI_INTR_TYPE_MSI;
   2192 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2193 	}
   2194 
   2195 	/* Allocation settings */
   2196 	counts[PCI_INTR_TYPE_MSI] = 1;
   2197 	counts[PCI_INTR_TYPE_INTX] = 1;
   2198 	/* overridden by disable flags */
   2199 	if (wm_disable_msi != 0) {
   2200 		counts[PCI_INTR_TYPE_MSI] = 0;
   2201 		if (wm_disable_msix != 0) {
   2202 			max_type = PCI_INTR_TYPE_INTX;
   2203 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2204 		}
   2205 	} else if (wm_disable_msix != 0) {
   2206 		max_type = PCI_INTR_TYPE_MSI;
   2207 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2208 	}
   2209 
   2210 alloc_retry:
   2211 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2212 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2213 		return;
   2214 	}
   2215 
   2216 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2217 		error = wm_setup_msix(sc);
   2218 		if (error) {
   2219 			pci_intr_release(pc, sc->sc_intrs,
   2220 			    counts[PCI_INTR_TYPE_MSIX]);
   2221 
   2222 			/* Setup for MSI: Disable MSI-X */
   2223 			max_type = PCI_INTR_TYPE_MSI;
   2224 			counts[PCI_INTR_TYPE_MSI] = 1;
   2225 			counts[PCI_INTR_TYPE_INTX] = 1;
   2226 			goto alloc_retry;
   2227 		}
   2228 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2229 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2230 		error = wm_setup_legacy(sc);
   2231 		if (error) {
   2232 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2233 			    counts[PCI_INTR_TYPE_MSI]);
   2234 
   2235 			/* The next try is for INTx: Disable MSI */
   2236 			max_type = PCI_INTR_TYPE_INTX;
   2237 			counts[PCI_INTR_TYPE_INTX] = 1;
   2238 			goto alloc_retry;
   2239 		}
   2240 	} else {
   2241 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2242 		error = wm_setup_legacy(sc);
   2243 		if (error) {
   2244 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2245 			    counts[PCI_INTR_TYPE_INTX]);
   2246 			return;
   2247 		}
   2248 	}
   2249 
   2250 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2251 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2252 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2253 	    WQ_PERCPU | WQ_MPSAFE);
   2254 	if (error) {
   2255 		aprint_error_dev(sc->sc_dev,
   2256 		    "unable to create TxRx workqueue\n");
   2257 		goto out;
   2258 	}
   2259 
   2260 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2261 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2262 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2263 	    WQ_MPSAFE);
   2264 	if (error) {
   2265 		workqueue_destroy(sc->sc_queue_wq);
   2266 		aprint_error_dev(sc->sc_dev,
   2267 		    "unable to create reset workqueue\n");
   2268 		goto out;
   2269 	}
   2270 
   2271 	/*
   2272 	 * Check the function ID (unit number of the chip).
   2273 	 */
   2274 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2275 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2276 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2277 	    || (sc->sc_type == WM_T_82580)
   2278 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2279 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2280 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2281 	else
   2282 		sc->sc_funcid = 0;
   2283 
   2284 	/*
   2285 	 * Determine a few things about the bus we're connected to.
   2286 	 */
   2287 	if (sc->sc_type < WM_T_82543) {
   2288 		/* We don't really know the bus characteristics here. */
   2289 		sc->sc_bus_speed = 33;
   2290 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2291 		/*
   2292 		 * CSA (Communication Streaming Architecture) is about as fast
   2293 		 * a 32-bit 66MHz PCI Bus.
   2294 		 */
   2295 		sc->sc_flags |= WM_F_CSA;
   2296 		sc->sc_bus_speed = 66;
   2297 		aprint_verbose_dev(sc->sc_dev,
   2298 		    "Communication Streaming Architecture\n");
   2299 		if (sc->sc_type == WM_T_82547) {
   2300 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2301 			callout_setfunc(&sc->sc_txfifo_ch,
   2302 			    wm_82547_txfifo_stall, sc);
   2303 			aprint_verbose_dev(sc->sc_dev,
   2304 			    "using 82547 Tx FIFO stall work-around\n");
   2305 		}
   2306 	} else if (sc->sc_type >= WM_T_82571) {
   2307 		sc->sc_flags |= WM_F_PCIE;
   2308 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2309 		    && (sc->sc_type != WM_T_ICH10)
   2310 		    && (sc->sc_type != WM_T_PCH)
   2311 		    && (sc->sc_type != WM_T_PCH2)
   2312 		    && (sc->sc_type != WM_T_PCH_LPT)
   2313 		    && (sc->sc_type != WM_T_PCH_SPT)
   2314 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2315 			/* ICH* and PCH* have no PCIe capability registers */
   2316 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2317 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2318 				NULL) == 0)
   2319 				aprint_error_dev(sc->sc_dev,
   2320 				    "unable to find PCIe capability\n");
   2321 		}
   2322 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2323 	} else {
   2324 		reg = CSR_READ(sc, WMREG_STATUS);
   2325 		if (reg & STATUS_BUS64)
   2326 			sc->sc_flags |= WM_F_BUS64;
   2327 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2328 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2329 
   2330 			sc->sc_flags |= WM_F_PCIX;
   2331 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2332 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2333 				aprint_error_dev(sc->sc_dev,
   2334 				    "unable to find PCIX capability\n");
   2335 			else if (sc->sc_type != WM_T_82545_3 &&
   2336 			    sc->sc_type != WM_T_82546_3) {
   2337 				/*
   2338 				 * Work around a problem caused by the BIOS
   2339 				 * setting the max memory read byte count
   2340 				 * incorrectly.
   2341 				 */
   2342 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2343 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2344 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2345 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2346 
   2347 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2348 				    PCIX_CMD_BYTECNT_SHIFT;
   2349 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2350 				    PCIX_STATUS_MAXB_SHIFT;
   2351 				if (bytecnt > maxb) {
   2352 					aprint_verbose_dev(sc->sc_dev,
   2353 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2354 					    512 << bytecnt, 512 << maxb);
   2355 					pcix_cmd = (pcix_cmd &
   2356 					    ~PCIX_CMD_BYTECNT_MASK) |
   2357 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2358 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2359 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2360 					    pcix_cmd);
   2361 				}
   2362 			}
   2363 		}
   2364 		/*
   2365 		 * The quad port adapter is special; it has a PCIX-PCIX
   2366 		 * bridge on the board, and can run the secondary bus at
   2367 		 * a higher speed.
   2368 		 */
   2369 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2370 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2371 								      : 66;
   2372 		} else if (sc->sc_flags & WM_F_PCIX) {
   2373 			switch (reg & STATUS_PCIXSPD_MASK) {
   2374 			case STATUS_PCIXSPD_50_66:
   2375 				sc->sc_bus_speed = 66;
   2376 				break;
   2377 			case STATUS_PCIXSPD_66_100:
   2378 				sc->sc_bus_speed = 100;
   2379 				break;
   2380 			case STATUS_PCIXSPD_100_133:
   2381 				sc->sc_bus_speed = 133;
   2382 				break;
   2383 			default:
   2384 				aprint_error_dev(sc->sc_dev,
   2385 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2386 				    reg & STATUS_PCIXSPD_MASK);
   2387 				sc->sc_bus_speed = 66;
   2388 				break;
   2389 			}
   2390 		} else
   2391 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2392 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2393 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2394 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2395 	}
   2396 
   2397 	/* clear interesting stat counters */
   2398 	CSR_READ(sc, WMREG_COLC);
   2399 	CSR_READ(sc, WMREG_RXERRC);
   2400 
   2401 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2402 	    || (sc->sc_type >= WM_T_ICH8))
   2403 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2404 	if (sc->sc_type >= WM_T_ICH8)
   2405 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2406 
   2407 	/* Set PHY, NVM mutex related stuff */
   2408 	switch (sc->sc_type) {
   2409 	case WM_T_82542_2_0:
   2410 	case WM_T_82542_2_1:
   2411 	case WM_T_82543:
   2412 	case WM_T_82544:
   2413 		/* Microwire */
   2414 		sc->nvm.read = wm_nvm_read_uwire;
   2415 		sc->sc_nvm_wordsize = 64;
   2416 		sc->sc_nvm_addrbits = 6;
   2417 		break;
   2418 	case WM_T_82540:
   2419 	case WM_T_82545:
   2420 	case WM_T_82545_3:
   2421 	case WM_T_82546:
   2422 	case WM_T_82546_3:
   2423 		/* Microwire */
   2424 		sc->nvm.read = wm_nvm_read_uwire;
   2425 		reg = CSR_READ(sc, WMREG_EECD);
   2426 		if (reg & EECD_EE_SIZE) {
   2427 			sc->sc_nvm_wordsize = 256;
   2428 			sc->sc_nvm_addrbits = 8;
   2429 		} else {
   2430 			sc->sc_nvm_wordsize = 64;
   2431 			sc->sc_nvm_addrbits = 6;
   2432 		}
   2433 		sc->sc_flags |= WM_F_LOCK_EECD;
   2434 		sc->nvm.acquire = wm_get_eecd;
   2435 		sc->nvm.release = wm_put_eecd;
   2436 		break;
   2437 	case WM_T_82541:
   2438 	case WM_T_82541_2:
   2439 	case WM_T_82547:
   2440 	case WM_T_82547_2:
   2441 		reg = CSR_READ(sc, WMREG_EECD);
   2442 		/*
   2443 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2444 		 * on 8254[17], so set flags and functios before calling it.
   2445 		 */
   2446 		sc->sc_flags |= WM_F_LOCK_EECD;
   2447 		sc->nvm.acquire = wm_get_eecd;
   2448 		sc->nvm.release = wm_put_eecd;
   2449 		if (reg & EECD_EE_TYPE) {
   2450 			/* SPI */
   2451 			sc->nvm.read = wm_nvm_read_spi;
   2452 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2453 			wm_nvm_set_addrbits_size_eecd(sc);
   2454 		} else {
   2455 			/* Microwire */
   2456 			sc->nvm.read = wm_nvm_read_uwire;
   2457 			if ((reg & EECD_EE_ABITS) != 0) {
   2458 				sc->sc_nvm_wordsize = 256;
   2459 				sc->sc_nvm_addrbits = 8;
   2460 			} else {
   2461 				sc->sc_nvm_wordsize = 64;
   2462 				sc->sc_nvm_addrbits = 6;
   2463 			}
   2464 		}
   2465 		break;
   2466 	case WM_T_82571:
   2467 	case WM_T_82572:
   2468 		/* SPI */
   2469 		sc->nvm.read = wm_nvm_read_eerd;
   2470 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2471 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2472 		wm_nvm_set_addrbits_size_eecd(sc);
   2473 		sc->phy.acquire = wm_get_swsm_semaphore;
   2474 		sc->phy.release = wm_put_swsm_semaphore;
   2475 		sc->nvm.acquire = wm_get_nvm_82571;
   2476 		sc->nvm.release = wm_put_nvm_82571;
   2477 		break;
   2478 	case WM_T_82573:
   2479 	case WM_T_82574:
   2480 	case WM_T_82583:
   2481 		sc->nvm.read = wm_nvm_read_eerd;
   2482 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2483 		if (sc->sc_type == WM_T_82573) {
   2484 			sc->phy.acquire = wm_get_swsm_semaphore;
   2485 			sc->phy.release = wm_put_swsm_semaphore;
   2486 			sc->nvm.acquire = wm_get_nvm_82571;
   2487 			sc->nvm.release = wm_put_nvm_82571;
   2488 		} else {
   2489 			/* Both PHY and NVM use the same semaphore. */
   2490 			sc->phy.acquire = sc->nvm.acquire
   2491 			    = wm_get_swfwhw_semaphore;
   2492 			sc->phy.release = sc->nvm.release
   2493 			    = wm_put_swfwhw_semaphore;
   2494 		}
   2495 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2496 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2497 			sc->sc_nvm_wordsize = 2048;
   2498 		} else {
   2499 			/* SPI */
   2500 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2501 			wm_nvm_set_addrbits_size_eecd(sc);
   2502 		}
   2503 		break;
   2504 	case WM_T_82575:
   2505 	case WM_T_82576:
   2506 	case WM_T_82580:
   2507 	case WM_T_I350:
   2508 	case WM_T_I354:
   2509 	case WM_T_80003:
   2510 		/* SPI */
   2511 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2512 		wm_nvm_set_addrbits_size_eecd(sc);
   2513 		if ((sc->sc_type == WM_T_80003)
   2514 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2515 			sc->nvm.read = wm_nvm_read_eerd;
   2516 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2517 		} else {
   2518 			sc->nvm.read = wm_nvm_read_spi;
   2519 			sc->sc_flags |= WM_F_LOCK_EECD;
   2520 		}
   2521 		sc->phy.acquire = wm_get_phy_82575;
   2522 		sc->phy.release = wm_put_phy_82575;
   2523 		sc->nvm.acquire = wm_get_nvm_80003;
   2524 		sc->nvm.release = wm_put_nvm_80003;
   2525 		break;
   2526 	case WM_T_ICH8:
   2527 	case WM_T_ICH9:
   2528 	case WM_T_ICH10:
   2529 	case WM_T_PCH:
   2530 	case WM_T_PCH2:
   2531 	case WM_T_PCH_LPT:
   2532 		sc->nvm.read = wm_nvm_read_ich8;
   2533 		/* FLASH */
   2534 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2535 		sc->sc_nvm_wordsize = 2048;
   2536 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2537 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2538 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2539 			aprint_error_dev(sc->sc_dev,
   2540 			    "can't map FLASH registers\n");
   2541 			goto out;
   2542 		}
   2543 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2544 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2545 		    ICH_FLASH_SECTOR_SIZE;
   2546 		sc->sc_ich8_flash_bank_size =
   2547 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2548 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2549 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2550 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2551 		sc->sc_flashreg_offset = 0;
   2552 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2553 		sc->phy.release = wm_put_swflag_ich8lan;
   2554 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2555 		sc->nvm.release = wm_put_nvm_ich8lan;
   2556 		break;
   2557 	case WM_T_PCH_SPT:
   2558 	case WM_T_PCH_CNP:
   2559 		sc->nvm.read = wm_nvm_read_spt;
   2560 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2561 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2562 		sc->sc_flasht = sc->sc_st;
   2563 		sc->sc_flashh = sc->sc_sh;
   2564 		sc->sc_ich8_flash_base = 0;
   2565 		sc->sc_nvm_wordsize =
   2566 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2567 		    * NVM_SIZE_MULTIPLIER;
   2568 		/* It is size in bytes, we want words */
   2569 		sc->sc_nvm_wordsize /= 2;
   2570 		/* Assume 2 banks */
   2571 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2572 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2573 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2574 		sc->phy.release = wm_put_swflag_ich8lan;
   2575 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2576 		sc->nvm.release = wm_put_nvm_ich8lan;
   2577 		break;
   2578 	case WM_T_I210:
   2579 	case WM_T_I211:
   2580 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2581 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2582 		if (wm_nvm_flash_presence_i210(sc)) {
   2583 			sc->nvm.read = wm_nvm_read_eerd;
   2584 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2585 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2586 			wm_nvm_set_addrbits_size_eecd(sc);
   2587 		} else {
   2588 			sc->nvm.read = wm_nvm_read_invm;
   2589 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2590 			sc->sc_nvm_wordsize = INVM_SIZE;
   2591 		}
   2592 		sc->phy.acquire = wm_get_phy_82575;
   2593 		sc->phy.release = wm_put_phy_82575;
   2594 		sc->nvm.acquire = wm_get_nvm_80003;
   2595 		sc->nvm.release = wm_put_nvm_80003;
   2596 		break;
   2597 	default:
   2598 		break;
   2599 	}
   2600 
   2601 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2602 	switch (sc->sc_type) {
   2603 	case WM_T_82571:
   2604 	case WM_T_82572:
   2605 		reg = CSR_READ(sc, WMREG_SWSM2);
   2606 		if ((reg & SWSM2_LOCK) == 0) {
   2607 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2608 			force_clear_smbi = true;
   2609 		} else
   2610 			force_clear_smbi = false;
   2611 		break;
   2612 	case WM_T_82573:
   2613 	case WM_T_82574:
   2614 	case WM_T_82583:
   2615 		force_clear_smbi = true;
   2616 		break;
   2617 	default:
   2618 		force_clear_smbi = false;
   2619 		break;
   2620 	}
   2621 	if (force_clear_smbi) {
   2622 		reg = CSR_READ(sc, WMREG_SWSM);
   2623 		if ((reg & SWSM_SMBI) != 0)
   2624 			aprint_error_dev(sc->sc_dev,
   2625 			    "Please update the Bootagent\n");
   2626 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2627 	}
   2628 
   2629 	/*
   2630 	 * Defer printing the EEPROM type until after verifying the checksum
   2631 	 * This allows the EEPROM type to be printed correctly in the case
   2632 	 * that no EEPROM is attached.
   2633 	 */
   2634 	/*
   2635 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2636 	 * this for later, so we can fail future reads from the EEPROM.
   2637 	 */
   2638 	if (wm_nvm_validate_checksum(sc)) {
   2639 		/*
   2640 		 * Read twice again because some PCI-e parts fail the
   2641 		 * first check due to the link being in sleep state.
   2642 		 */
   2643 		if (wm_nvm_validate_checksum(sc))
   2644 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2645 	}
   2646 
   2647 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2648 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2649 	else {
   2650 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2651 		    sc->sc_nvm_wordsize);
   2652 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2653 			aprint_verbose("iNVM");
   2654 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2655 			aprint_verbose("FLASH(HW)");
   2656 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2657 			aprint_verbose("FLASH");
   2658 		else {
   2659 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2660 				eetype = "SPI";
   2661 			else
   2662 				eetype = "MicroWire";
   2663 			aprint_verbose("(%d address bits) %s EEPROM",
   2664 			    sc->sc_nvm_addrbits, eetype);
   2665 		}
   2666 	}
   2667 	wm_nvm_version(sc);
   2668 	aprint_verbose("\n");
   2669 
   2670 	/*
   2671 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2672 	 * incorrect.
   2673 	 */
   2674 	wm_gmii_setup_phytype(sc, 0, 0);
   2675 
   2676 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2677 	switch (sc->sc_type) {
   2678 	case WM_T_ICH8:
   2679 	case WM_T_ICH9:
   2680 	case WM_T_ICH10:
   2681 	case WM_T_PCH:
   2682 	case WM_T_PCH2:
   2683 	case WM_T_PCH_LPT:
   2684 	case WM_T_PCH_SPT:
   2685 	case WM_T_PCH_CNP:
   2686 		apme_mask = WUC_APME;
   2687 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2688 		if ((eeprom_data & apme_mask) != 0)
   2689 			sc->sc_flags |= WM_F_WOL;
   2690 		break;
   2691 	default:
   2692 		break;
   2693 	}
   2694 
   2695 	/* Reset the chip to a known state. */
   2696 	wm_reset(sc);
   2697 
   2698 	/*
   2699 	 * Check for I21[01] PLL workaround.
   2700 	 *
   2701 	 * Three cases:
   2702 	 * a) Chip is I211.
   2703 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2704 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2705 	 */
   2706 	if (sc->sc_type == WM_T_I211)
   2707 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2708 	if (sc->sc_type == WM_T_I210) {
   2709 		if (!wm_nvm_flash_presence_i210(sc))
   2710 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2711 		else if ((sc->sc_nvm_ver_major < 3)
   2712 		    || ((sc->sc_nvm_ver_major == 3)
   2713 			&& (sc->sc_nvm_ver_minor < 25))) {
   2714 			aprint_verbose_dev(sc->sc_dev,
   2715 			    "ROM image version %d.%d is older than 3.25\n",
   2716 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2717 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2718 		}
   2719 	}
   2720 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2721 		wm_pll_workaround_i210(sc);
   2722 
   2723 	wm_get_wakeup(sc);
   2724 
   2725 	/* Non-AMT based hardware can now take control from firmware */
   2726 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2727 		wm_get_hw_control(sc);
   2728 
   2729 	/*
   2730 	 * Read the Ethernet address from the EEPROM, if not first found
   2731 	 * in device properties.
   2732 	 */
   2733 	ea = prop_dictionary_get(dict, "mac-address");
   2734 	if (ea != NULL) {
   2735 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2736 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2737 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2738 	} else {
   2739 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2740 			aprint_error_dev(sc->sc_dev,
   2741 			    "unable to read Ethernet address\n");
   2742 			goto out;
   2743 		}
   2744 	}
   2745 
   2746 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2747 	    ether_sprintf(enaddr));
   2748 
   2749 	/*
   2750 	 * Read the config info from the EEPROM, and set up various
   2751 	 * bits in the control registers based on their contents.
   2752 	 */
   2753 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2754 	if (pn != NULL) {
   2755 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2756 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2757 	} else {
   2758 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2759 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2760 			goto out;
   2761 		}
   2762 	}
   2763 
   2764 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2765 	if (pn != NULL) {
   2766 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2767 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2768 	} else {
   2769 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2770 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2771 			goto out;
   2772 		}
   2773 	}
   2774 
   2775 	/* check for WM_F_WOL */
   2776 	switch (sc->sc_type) {
   2777 	case WM_T_82542_2_0:
   2778 	case WM_T_82542_2_1:
   2779 	case WM_T_82543:
   2780 		/* dummy? */
   2781 		eeprom_data = 0;
   2782 		apme_mask = NVM_CFG3_APME;
   2783 		break;
   2784 	case WM_T_82544:
   2785 		apme_mask = NVM_CFG2_82544_APM_EN;
   2786 		eeprom_data = cfg2;
   2787 		break;
   2788 	case WM_T_82546:
   2789 	case WM_T_82546_3:
   2790 	case WM_T_82571:
   2791 	case WM_T_82572:
   2792 	case WM_T_82573:
   2793 	case WM_T_82574:
   2794 	case WM_T_82583:
   2795 	case WM_T_80003:
   2796 	case WM_T_82575:
   2797 	case WM_T_82576:
   2798 		apme_mask = NVM_CFG3_APME;
   2799 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2800 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2801 		break;
   2802 	case WM_T_82580:
   2803 	case WM_T_I350:
   2804 	case WM_T_I354:
   2805 	case WM_T_I210:
   2806 	case WM_T_I211:
   2807 		apme_mask = NVM_CFG3_APME;
   2808 		wm_nvm_read(sc,
   2809 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2810 		    1, &eeprom_data);
   2811 		break;
   2812 	case WM_T_ICH8:
   2813 	case WM_T_ICH9:
   2814 	case WM_T_ICH10:
   2815 	case WM_T_PCH:
   2816 	case WM_T_PCH2:
   2817 	case WM_T_PCH_LPT:
   2818 	case WM_T_PCH_SPT:
   2819 	case WM_T_PCH_CNP:
   2820 		/* Already checked before wm_reset () */
   2821 		apme_mask = eeprom_data = 0;
   2822 		break;
   2823 	default: /* XXX 82540 */
   2824 		apme_mask = NVM_CFG3_APME;
   2825 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2826 		break;
   2827 	}
   2828 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2829 	if ((eeprom_data & apme_mask) != 0)
   2830 		sc->sc_flags |= WM_F_WOL;
   2831 
   2832 	/*
   2833 	 * We have the eeprom settings, now apply the special cases
   2834 	 * where the eeprom may be wrong or the board won't support
   2835 	 * wake on lan on a particular port
   2836 	 */
   2837 	switch (sc->sc_pcidevid) {
   2838 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2839 		sc->sc_flags &= ~WM_F_WOL;
   2840 		break;
   2841 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2842 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2843 		/* Wake events only supported on port A for dual fiber
   2844 		 * regardless of eeprom setting */
   2845 		if (sc->sc_funcid == 1)
   2846 			sc->sc_flags &= ~WM_F_WOL;
   2847 		break;
   2848 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2849 		/* If quad port adapter, disable WoL on all but port A */
   2850 		if (sc->sc_funcid != 0)
   2851 			sc->sc_flags &= ~WM_F_WOL;
   2852 		break;
   2853 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2854 		/* Wake events only supported on port A for dual fiber
   2855 		 * regardless of eeprom setting */
   2856 		if (sc->sc_funcid == 1)
   2857 			sc->sc_flags &= ~WM_F_WOL;
   2858 		break;
   2859 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2860 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2861 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2862 		/* If quad port adapter, disable WoL on all but port A */
   2863 		if (sc->sc_funcid != 0)
   2864 			sc->sc_flags &= ~WM_F_WOL;
   2865 		break;
   2866 	}
   2867 
   2868 	if (sc->sc_type >= WM_T_82575) {
   2869 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2870 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2871 			    nvmword);
   2872 			if ((sc->sc_type == WM_T_82575) ||
   2873 			    (sc->sc_type == WM_T_82576)) {
   2874 				/* Check NVM for autonegotiation */
   2875 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2876 				    != 0)
   2877 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2878 			}
   2879 			if ((sc->sc_type == WM_T_82575) ||
   2880 			    (sc->sc_type == WM_T_I350)) {
   2881 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2882 					sc->sc_flags |= WM_F_MAS;
   2883 			}
   2884 		}
   2885 	}
   2886 
   2887 	/*
   2888 	 * XXX need special handling for some multiple port cards
   2889 	 * to disable a paticular port.
   2890 	 */
   2891 
   2892 	if (sc->sc_type >= WM_T_82544) {
   2893 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2894 		if (pn != NULL) {
   2895 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2896 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2897 		} else {
   2898 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2899 				aprint_error_dev(sc->sc_dev,
   2900 				    "unable to read SWDPIN\n");
   2901 				goto out;
   2902 			}
   2903 		}
   2904 	}
   2905 
   2906 	if (cfg1 & NVM_CFG1_ILOS)
   2907 		sc->sc_ctrl |= CTRL_ILOS;
   2908 
   2909 	/*
   2910 	 * XXX
   2911 	 * This code isn't correct because pin 2 and 3 are located
   2912 	 * in different position on newer chips. Check all datasheet.
   2913 	 *
   2914 	 * Until resolve this problem, check if a chip < 82580
   2915 	 */
   2916 	if (sc->sc_type <= WM_T_82580) {
   2917 		if (sc->sc_type >= WM_T_82544) {
   2918 			sc->sc_ctrl |=
   2919 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2920 			    CTRL_SWDPIO_SHIFT;
   2921 			sc->sc_ctrl |=
   2922 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2923 			    CTRL_SWDPINS_SHIFT;
   2924 		} else {
   2925 			sc->sc_ctrl |=
   2926 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2927 			    CTRL_SWDPIO_SHIFT;
   2928 		}
   2929 	}
   2930 
   2931 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2932 		wm_nvm_read(sc,
   2933 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2934 		    1, &nvmword);
   2935 		if (nvmword & NVM_CFG3_ILOS)
   2936 			sc->sc_ctrl |= CTRL_ILOS;
   2937 	}
   2938 
   2939 #if 0
   2940 	if (sc->sc_type >= WM_T_82544) {
   2941 		if (cfg1 & NVM_CFG1_IPS0)
   2942 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2943 		if (cfg1 & NVM_CFG1_IPS1)
   2944 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2945 		sc->sc_ctrl_ext |=
   2946 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2947 		    CTRL_EXT_SWDPIO_SHIFT;
   2948 		sc->sc_ctrl_ext |=
   2949 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2950 		    CTRL_EXT_SWDPINS_SHIFT;
   2951 	} else {
   2952 		sc->sc_ctrl_ext |=
   2953 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2954 		    CTRL_EXT_SWDPIO_SHIFT;
   2955 	}
   2956 #endif
   2957 
   2958 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2959 #if 0
   2960 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2961 #endif
   2962 
   2963 	if (sc->sc_type == WM_T_PCH) {
   2964 		uint16_t val;
   2965 
   2966 		/* Save the NVM K1 bit setting */
   2967 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2968 
   2969 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2970 			sc->sc_nvm_k1_enabled = 1;
   2971 		else
   2972 			sc->sc_nvm_k1_enabled = 0;
   2973 	}
   2974 
   2975 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2976 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2977 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2978 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2979 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2980 	    || sc->sc_type == WM_T_82573
   2981 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2982 		/* Copper only */
   2983 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2984 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2985 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2986 	    || (sc->sc_type ==WM_T_I211)) {
   2987 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2988 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2989 		switch (link_mode) {
   2990 		case CTRL_EXT_LINK_MODE_1000KX:
   2991 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2992 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2993 			break;
   2994 		case CTRL_EXT_LINK_MODE_SGMII:
   2995 			if (wm_sgmii_uses_mdio(sc)) {
   2996 				aprint_normal_dev(sc->sc_dev,
   2997 				    "SGMII(MDIO)\n");
   2998 				sc->sc_flags |= WM_F_SGMII;
   2999 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3000 				break;
   3001 			}
   3002 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   3003 			/*FALLTHROUGH*/
   3004 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   3005 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   3006 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   3007 				if (link_mode
   3008 				    == CTRL_EXT_LINK_MODE_SGMII) {
   3009 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3010 					sc->sc_flags |= WM_F_SGMII;
   3011 					aprint_verbose_dev(sc->sc_dev,
   3012 					    "SGMII\n");
   3013 				} else {
   3014 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   3015 					aprint_verbose_dev(sc->sc_dev,
   3016 					    "SERDES\n");
   3017 				}
   3018 				break;
   3019 			}
   3020 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   3021 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   3022 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3023 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   3024 				sc->sc_flags |= WM_F_SGMII;
   3025 			}
   3026 			/* Do not change link mode for 100BaseFX */
   3027 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   3028 				break;
   3029 
   3030 			/* Change current link mode setting */
   3031 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   3032 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3033 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   3034 			else
   3035 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   3036 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3037 			break;
   3038 		case CTRL_EXT_LINK_MODE_GMII:
   3039 		default:
   3040 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   3041 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3042 			break;
   3043 		}
   3044 
   3045 		reg &= ~CTRL_EXT_I2C_ENA;
   3046 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3047 			reg |= CTRL_EXT_I2C_ENA;
   3048 		else
   3049 			reg &= ~CTRL_EXT_I2C_ENA;
   3050 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3051 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3052 			if (!wm_sgmii_uses_mdio(sc))
   3053 				wm_gmii_setup_phytype(sc, 0, 0);
   3054 			wm_reset_mdicnfg_82580(sc);
   3055 		}
   3056 	} else if (sc->sc_type < WM_T_82543 ||
   3057 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3058 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3059 			aprint_error_dev(sc->sc_dev,
   3060 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3061 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3062 		}
   3063 	} else {
   3064 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3065 			aprint_error_dev(sc->sc_dev,
   3066 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3067 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3068 		}
   3069 	}
   3070 
   3071 	if (sc->sc_type >= WM_T_PCH2)
   3072 		sc->sc_flags |= WM_F_EEE;
   3073 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3074 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3075 		/* XXX: Need special handling for I354. (not yet) */
   3076 		if (sc->sc_type != WM_T_I354)
   3077 			sc->sc_flags |= WM_F_EEE;
   3078 	}
   3079 
   3080 	/*
   3081 	 * The I350 has a bug where it always strips the CRC whether
   3082 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3083 	 */
   3084 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3085 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3086 		sc->sc_flags |= WM_F_CRC_STRIP;
   3087 
   3088 	/*
   3089 	 * Workaround for some chips to delay sending LINK_STATE_UP.
   3090 	 * Some systems can't send packet soon after linkup. See also
   3091 	 * wm_linkintr_gmii(), wm_tick() and wm_gmii_mediastatus().
   3092 	 */
   3093 	switch (sc->sc_type) {
   3094 	case WM_T_I350:
   3095 	case WM_T_I354:
   3096 	case WM_T_I210:
   3097 	case WM_T_I211:
   3098 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3099 			sc->sc_flags |= WM_F_DELAY_LINKUP;
   3100 		break;
   3101 	default:
   3102 		break;
   3103 	}
   3104 
   3105 	/* Set device properties (macflags) */
   3106 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3107 
   3108 	if (sc->sc_flags != 0) {
   3109 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3110 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3111 	}
   3112 
   3113 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3114 
   3115 	/* Initialize the media structures accordingly. */
   3116 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3117 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3118 	else
   3119 		wm_tbi_mediainit(sc); /* All others */
   3120 
   3121 	ifp = &sc->sc_ethercom.ec_if;
   3122 	xname = device_xname(sc->sc_dev);
   3123 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3124 	ifp->if_softc = sc;
   3125 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3126 	ifp->if_extflags = IFEF_MPSAFE;
   3127 	ifp->if_ioctl = wm_ioctl;
   3128 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3129 		ifp->if_start = wm_nq_start;
   3130 		/*
   3131 		 * When the number of CPUs is one and the controller can use
   3132 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3133 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3134 		 * and the other is used for link status changing.
   3135 		 * In this situation, wm_nq_transmit() is disadvantageous
   3136 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3137 		 */
   3138 		if (wm_is_using_multiqueue(sc))
   3139 			ifp->if_transmit = wm_nq_transmit;
   3140 	} else {
   3141 		ifp->if_start = wm_start;
   3142 		/*
   3143 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3144 		 * described above.
   3145 		 */
   3146 		if (wm_is_using_multiqueue(sc))
   3147 			ifp->if_transmit = wm_transmit;
   3148 	}
   3149 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3150 	ifp->if_init = wm_init;
   3151 	ifp->if_stop = wm_stop;
   3152 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3153 	IFQ_SET_READY(&ifp->if_snd);
   3154 
   3155 	/* Check for jumbo frame */
   3156 	switch (sc->sc_type) {
   3157 	case WM_T_82573:
   3158 		/* XXX limited to 9234 if ASPM is disabled */
   3159 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3160 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3161 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3162 		break;
   3163 	case WM_T_82571:
   3164 	case WM_T_82572:
   3165 	case WM_T_82574:
   3166 	case WM_T_82583:
   3167 	case WM_T_82575:
   3168 	case WM_T_82576:
   3169 	case WM_T_82580:
   3170 	case WM_T_I350:
   3171 	case WM_T_I354:
   3172 	case WM_T_I210:
   3173 	case WM_T_I211:
   3174 	case WM_T_80003:
   3175 	case WM_T_ICH9:
   3176 	case WM_T_ICH10:
   3177 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3178 	case WM_T_PCH_LPT:
   3179 	case WM_T_PCH_SPT:
   3180 	case WM_T_PCH_CNP:
   3181 		/* XXX limited to 9234 */
   3182 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3183 		break;
   3184 	case WM_T_PCH:
   3185 		/* XXX limited to 4096 */
   3186 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3187 		break;
   3188 	case WM_T_82542_2_0:
   3189 	case WM_T_82542_2_1:
   3190 	case WM_T_ICH8:
   3191 		/* No support for jumbo frame */
   3192 		break;
   3193 	default:
   3194 		/* ETHER_MAX_LEN_JUMBO */
   3195 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3196 		break;
   3197 	}
   3198 
   3199 	/* If we're a i82543 or greater, we can support VLANs. */
   3200 	if (sc->sc_type >= WM_T_82543) {
   3201 		sc->sc_ethercom.ec_capabilities |=
   3202 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3203 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3204 	}
   3205 
   3206 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3207 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3208 
   3209 	/*
   3210 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3211 	 * on i82543 and later.
   3212 	 */
   3213 	if (sc->sc_type >= WM_T_82543) {
   3214 		ifp->if_capabilities |=
   3215 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3216 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3217 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3218 		    IFCAP_CSUM_TCPv6_Tx |
   3219 		    IFCAP_CSUM_UDPv6_Tx;
   3220 	}
   3221 
   3222 	/*
   3223 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3224 	 *
   3225 	 *	82541GI (8086:1076) ... no
   3226 	 *	82572EI (8086:10b9) ... yes
   3227 	 */
   3228 	if (sc->sc_type >= WM_T_82571) {
   3229 		ifp->if_capabilities |=
   3230 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3231 	}
   3232 
   3233 	/*
   3234 	 * If we're a i82544 or greater (except i82547), we can do
   3235 	 * TCP segmentation offload.
   3236 	 */
   3237 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3238 		ifp->if_capabilities |= IFCAP_TSOv4;
   3239 
   3240 	if (sc->sc_type >= WM_T_82571)
   3241 		ifp->if_capabilities |= IFCAP_TSOv6;
   3242 
   3243 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3244 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3245 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3246 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3247 
   3248 	/* Attach the interface. */
   3249 	if_initialize(ifp);
   3250 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3251 	ether_ifattach(ifp, enaddr);
   3252 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3253 	if_register(ifp);
   3254 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3255 	    RND_FLAG_DEFAULT);
   3256 
   3257 #ifdef WM_EVENT_COUNTERS
   3258 	/* Attach event counters. */
   3259 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3260 	    NULL, xname, "linkintr");
   3261 
   3262 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3263 	    NULL, xname, "CRC Error");
   3264 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3265 	    NULL, xname, "Symbol Error");
   3266 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3267 	    NULL, xname, "Missed Packets");
   3268 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3269 	    NULL, xname, "Collision");
   3270 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3271 	    NULL, xname, "Sequence Error");
   3272 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3273 	    NULL, xname, "Receive Length Error");
   3274 
   3275 	if (sc->sc_type >= WM_T_82543) {
   3276 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3277 		    NULL, xname, "Alignment Error");
   3278 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3279 		    NULL, xname, "Receive Error");
   3280 		/* XXX Does 82575 have HTDPMC? */
   3281 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3282 			evcnt_attach_dynamic(&sc->sc_ev_cexterr,
   3283 			    EVCNT_TYPE_MISC, NULL, xname,
   3284 			    "Carrier Extension Error");
   3285 		else
   3286 			evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
   3287 			    EVCNT_TYPE_MISC, NULL, xname,
   3288 			    "Host Transmit Discarded Packets by MAC");
   3289 
   3290 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3291 		    NULL, xname, "Tx with No CRS");
   3292 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3293 		    NULL, xname, "TCP Segmentation Context Tx");
   3294 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3295 			evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
   3296 			    EVCNT_TYPE_MISC, NULL, xname,
   3297 			    "TCP Segmentation Context Tx Fail");
   3298 		else {
   3299 			/* XXX Is the circuit breaker only for 82576? */
   3300 			evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
   3301 			    EVCNT_TYPE_MISC, NULL, xname,
   3302 			    "Circuit Breaker Rx Dropped Packet");
   3303 			evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
   3304 			    EVCNT_TYPE_MISC, NULL, xname,
   3305 			    "Circuit Breaker Rx Manageability Packet");
   3306 		}
   3307 	}
   3308 
   3309 	if (sc->sc_type >= WM_T_82542_2_1) {
   3310 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3311 		    NULL, xname, "tx_xoff");
   3312 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3313 		    NULL, xname, "tx_xon");
   3314 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3315 		    NULL, xname, "rx_xoff");
   3316 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3317 		    NULL, xname, "rx_xon");
   3318 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3319 		    NULL, xname, "rx_macctl");
   3320 	}
   3321 
   3322 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3323 	    NULL, xname, "Single Collision");
   3324 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3325 	    NULL, xname, "Excessive Collisions");
   3326 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3327 	    NULL, xname, "Multiple Collision");
   3328 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3329 	    NULL, xname, "Late Collisions");
   3330 
   3331 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3332 		evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
   3333 		    NULL, xname, "Circuit Breaker Tx Manageability Packet");
   3334 
   3335 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3336 	    NULL, xname, "Defer");
   3337 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3338 	    NULL, xname, "Packets Rx (64 bytes)");
   3339 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3340 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3341 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3342 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3343 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3344 	    NULL, xname, "Packets Rx (256-511 bytes)");
   3345 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3346 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3347 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3348 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3349 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3350 	    NULL, xname, "Good Packets Rx");
   3351 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3352 	    NULL, xname, "Broadcast Packets Rx");
   3353 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3354 	    NULL, xname, "Multicast Packets Rx");
   3355 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3356 	    NULL, xname, "Good Packets Tx");
   3357 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3358 	    NULL, xname, "Good Octets Rx");
   3359 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3360 	    NULL, xname, "Good Octets Tx");
   3361 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3362 	    NULL, xname, "Rx No Buffers");
   3363 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3364 	    NULL, xname, "Rx Undersize");
   3365 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3366 	    NULL, xname, "Rx Fragment");
   3367 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3368 	    NULL, xname, "Rx Oversize");
   3369 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3370 	    NULL, xname, "Rx Jabber");
   3371 	if (sc->sc_type >= WM_T_82540) {
   3372 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3373 		    NULL, xname, "Management Packets RX");
   3374 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3375 		    NULL, xname, "Management Packets Dropped");
   3376 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3377 		    NULL, xname, "Management Packets TX");
   3378 	}
   3379 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3380 	    NULL, xname, "Total Octets Rx");
   3381 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3382 	    NULL, xname, "Total Octets Tx");
   3383 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3384 	    NULL, xname, "Total Packets Rx");
   3385 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3386 	    NULL, xname, "Total Packets Tx");
   3387 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3388 	    NULL, xname, "Packets Tx (64 bytes)");
   3389 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3390 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3391 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3392 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3393 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3394 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3395 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3396 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3397 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3398 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3399 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3400 	    NULL, xname, "Multicast Packets Tx");
   3401 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3402 	    NULL, xname, "Broadcast Packets Tx");
   3403 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3404 	    NULL, xname, "Interrupt Assertion");
   3405 	if (sc->sc_type < WM_T_82575) {
   3406 		evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3407 		    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3408 		evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3409 		    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3410 		evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3411 		    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3412 		evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC,
   3413 		    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3414 		evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3415 		    NULL, xname, "Intr. Cause Tx Queue Empty");
   3416 		evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3417 		    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3418 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3419 		    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3420 
   3421 		/* XXX 82575 document says it has ICRXOC. Is that right? */
   3422 		evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3423 		    NULL, xname, "Interrupt Cause Receiver Overrun");
   3424 	} else if (!WM_IS_ICHPCH(sc)) {
   3425 		/*
   3426 		 * For 82575 and newer.
   3427 		 *
   3428 		 * On 80003, ICHs and PCHs, it seems all of the following
   3429 		 * registers are zero.
   3430 		 */
   3431 		evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
   3432 		    NULL, xname, "Rx Packets To Host");
   3433 		evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
   3434 		    NULL, xname, "Debug Counter 1");
   3435 		evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
   3436 		    NULL, xname, "Debug Counter 2");
   3437 		evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
   3438 		    NULL, xname, "Debug Counter 3");
   3439 
   3440 		/*
   3441 		 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
   3442 		 * I think it's wrong. The real count I observed is the same
   3443 		 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
   3444 		 * It's HGPTC(Host Good Packets Tx) which is described in
   3445 		 * 82576's datasheet.
   3446 		 */
   3447 		evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
   3448 		    NULL, xname, "Host Good Packets TX");
   3449 
   3450 		evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
   3451 		    NULL, xname, "Debug Counter 4");
   3452 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3453 		    NULL, xname, "Rx Desc Min Thresh");
   3454 		/* XXX Is the circuit breaker only for 82576? */
   3455 		evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
   3456 		    NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
   3457 
   3458 		evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
   3459 		    NULL, xname, "Host Good Octets Rx");
   3460 		evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
   3461 		    NULL, xname, "Host Good Octets Tx");
   3462 		evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
   3463 		    NULL, xname, "Length Errors");
   3464 		evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
   3465 		    NULL, xname, "SerDes/SGMII Code Violation Packet");
   3466 		evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
   3467 		    NULL, xname, "Header Redirection Missed Packet");
   3468 	}
   3469 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3470 		evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
   3471 		    NULL, xname, "EEE Tx LPI");
   3472 		evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
   3473 		    NULL, xname, "EEE Rx LPI");
   3474 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3475 		    NULL, xname, "BMC2OS Packets received by host");
   3476 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3477 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3478 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3479 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3480 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3481 		    NULL, xname, "OS2BMC Packets received by BMC");
   3482 	}
   3483 #endif /* WM_EVENT_COUNTERS */
   3484 
   3485 	sc->sc_txrx_use_workqueue = false;
   3486 
   3487 	if (wm_phy_need_linkdown_discard(sc)) {
   3488 		DPRINTF(sc, WM_DEBUG_LINK,
   3489 		    ("%s: %s: Set linkdown discard flag\n",
   3490 			device_xname(sc->sc_dev), __func__));
   3491 		wm_set_linkdown_discard(sc);
   3492 	}
   3493 
   3494 	wm_init_sysctls(sc);
   3495 
   3496 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3497 		pmf_class_network_register(self, ifp);
   3498 	else
   3499 		aprint_error_dev(self, "couldn't establish power handler\n");
   3500 
   3501 	sc->sc_flags |= WM_F_ATTACHED;
   3502 out:
   3503 	return;
   3504 }
   3505 
   3506 /* The detach function (ca_detach) */
   3507 static int
   3508 wm_detach(device_t self, int flags __unused)
   3509 {
   3510 	struct wm_softc *sc = device_private(self);
   3511 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3512 	int i;
   3513 
   3514 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3515 		return 0;
   3516 
   3517 	/* Stop the interface. Callouts are stopped in it. */
   3518 	IFNET_LOCK(ifp);
   3519 	sc->sc_dying = true;
   3520 	wm_stop(ifp, 1);
   3521 	IFNET_UNLOCK(ifp);
   3522 
   3523 	pmf_device_deregister(self);
   3524 
   3525 	sysctl_teardown(&sc->sc_sysctllog);
   3526 
   3527 #ifdef WM_EVENT_COUNTERS
   3528 	evcnt_detach(&sc->sc_ev_linkintr);
   3529 
   3530 	evcnt_detach(&sc->sc_ev_crcerrs);
   3531 	evcnt_detach(&sc->sc_ev_symerrc);
   3532 	evcnt_detach(&sc->sc_ev_mpc);
   3533 	evcnt_detach(&sc->sc_ev_colc);
   3534 	evcnt_detach(&sc->sc_ev_sec);
   3535 	evcnt_detach(&sc->sc_ev_rlec);
   3536 
   3537 	if (sc->sc_type >= WM_T_82543) {
   3538 		evcnt_detach(&sc->sc_ev_algnerrc);
   3539 		evcnt_detach(&sc->sc_ev_rxerrc);
   3540 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3541 			evcnt_detach(&sc->sc_ev_cexterr);
   3542 		else
   3543 			evcnt_detach(&sc->sc_ev_htdpmc);
   3544 
   3545 		evcnt_detach(&sc->sc_ev_tncrs);
   3546 		evcnt_detach(&sc->sc_ev_tsctc);
   3547 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3548 			evcnt_detach(&sc->sc_ev_tsctfc);
   3549 		else {
   3550 			evcnt_detach(&sc->sc_ev_cbrdpc);
   3551 			evcnt_detach(&sc->sc_ev_cbrmpc);
   3552 		}
   3553 	}
   3554 
   3555 	if (sc->sc_type >= WM_T_82542_2_1) {
   3556 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3557 		evcnt_detach(&sc->sc_ev_tx_xon);
   3558 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3559 		evcnt_detach(&sc->sc_ev_rx_xon);
   3560 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3561 	}
   3562 
   3563 	evcnt_detach(&sc->sc_ev_scc);
   3564 	evcnt_detach(&sc->sc_ev_ecol);
   3565 	evcnt_detach(&sc->sc_ev_mcc);
   3566 	evcnt_detach(&sc->sc_ev_latecol);
   3567 
   3568 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3569 		evcnt_detach(&sc->sc_ev_cbtmpc);
   3570 
   3571 	evcnt_detach(&sc->sc_ev_dc);
   3572 	evcnt_detach(&sc->sc_ev_prc64);
   3573 	evcnt_detach(&sc->sc_ev_prc127);
   3574 	evcnt_detach(&sc->sc_ev_prc255);
   3575 	evcnt_detach(&sc->sc_ev_prc511);
   3576 	evcnt_detach(&sc->sc_ev_prc1023);
   3577 	evcnt_detach(&sc->sc_ev_prc1522);
   3578 	evcnt_detach(&sc->sc_ev_gprc);
   3579 	evcnt_detach(&sc->sc_ev_bprc);
   3580 	evcnt_detach(&sc->sc_ev_mprc);
   3581 	evcnt_detach(&sc->sc_ev_gptc);
   3582 	evcnt_detach(&sc->sc_ev_gorc);
   3583 	evcnt_detach(&sc->sc_ev_gotc);
   3584 	evcnt_detach(&sc->sc_ev_rnbc);
   3585 	evcnt_detach(&sc->sc_ev_ruc);
   3586 	evcnt_detach(&sc->sc_ev_rfc);
   3587 	evcnt_detach(&sc->sc_ev_roc);
   3588 	evcnt_detach(&sc->sc_ev_rjc);
   3589 	if (sc->sc_type >= WM_T_82540) {
   3590 		evcnt_detach(&sc->sc_ev_mgtprc);
   3591 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3592 		evcnt_detach(&sc->sc_ev_mgtptc);
   3593 	}
   3594 	evcnt_detach(&sc->sc_ev_tor);
   3595 	evcnt_detach(&sc->sc_ev_tot);
   3596 	evcnt_detach(&sc->sc_ev_tpr);
   3597 	evcnt_detach(&sc->sc_ev_tpt);
   3598 	evcnt_detach(&sc->sc_ev_ptc64);
   3599 	evcnt_detach(&sc->sc_ev_ptc127);
   3600 	evcnt_detach(&sc->sc_ev_ptc255);
   3601 	evcnt_detach(&sc->sc_ev_ptc511);
   3602 	evcnt_detach(&sc->sc_ev_ptc1023);
   3603 	evcnt_detach(&sc->sc_ev_ptc1522);
   3604 	evcnt_detach(&sc->sc_ev_mptc);
   3605 	evcnt_detach(&sc->sc_ev_bptc);
   3606 	evcnt_detach(&sc->sc_ev_iac);
   3607 	if (sc->sc_type < WM_T_82575) {
   3608 		evcnt_detach(&sc->sc_ev_icrxptc);
   3609 		evcnt_detach(&sc->sc_ev_icrxatc);
   3610 		evcnt_detach(&sc->sc_ev_ictxptc);
   3611 		evcnt_detach(&sc->sc_ev_ictxatc);
   3612 		evcnt_detach(&sc->sc_ev_ictxqec);
   3613 		evcnt_detach(&sc->sc_ev_ictxqmtc);
   3614 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3615 		evcnt_detach(&sc->sc_ev_icrxoc);
   3616 	} else if (!WM_IS_ICHPCH(sc)) {
   3617 		evcnt_detach(&sc->sc_ev_rpthc);
   3618 		evcnt_detach(&sc->sc_ev_debug1);
   3619 		evcnt_detach(&sc->sc_ev_debug2);
   3620 		evcnt_detach(&sc->sc_ev_debug3);
   3621 		evcnt_detach(&sc->sc_ev_hgptc);
   3622 		evcnt_detach(&sc->sc_ev_debug4);
   3623 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3624 		evcnt_detach(&sc->sc_ev_htcbdpc);
   3625 
   3626 		evcnt_detach(&sc->sc_ev_hgorc);
   3627 		evcnt_detach(&sc->sc_ev_hgotc);
   3628 		evcnt_detach(&sc->sc_ev_lenerrs);
   3629 		evcnt_detach(&sc->sc_ev_scvpc);
   3630 		evcnt_detach(&sc->sc_ev_hrmpc);
   3631 	}
   3632 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3633 		evcnt_detach(&sc->sc_ev_tlpic);
   3634 		evcnt_detach(&sc->sc_ev_rlpic);
   3635 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3636 		evcnt_detach(&sc->sc_ev_o2bspc);
   3637 		evcnt_detach(&sc->sc_ev_b2ospc);
   3638 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3639 	}
   3640 #endif /* WM_EVENT_COUNTERS */
   3641 
   3642 	rnd_detach_source(&sc->rnd_source);
   3643 
   3644 	/* Tell the firmware about the release */
   3645 	mutex_enter(sc->sc_core_lock);
   3646 	wm_release_manageability(sc);
   3647 	wm_release_hw_control(sc);
   3648 	wm_enable_wakeup(sc);
   3649 	mutex_exit(sc->sc_core_lock);
   3650 
   3651 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3652 
   3653 	ether_ifdetach(ifp);
   3654 	if_detach(ifp);
   3655 	if_percpuq_destroy(sc->sc_ipq);
   3656 
   3657 	/* Delete all remaining media. */
   3658 	ifmedia_fini(&sc->sc_mii.mii_media);
   3659 
   3660 	/* Unload RX dmamaps and free mbufs */
   3661 	for (i = 0; i < sc->sc_nqueues; i++) {
   3662 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3663 		mutex_enter(rxq->rxq_lock);
   3664 		wm_rxdrain(rxq);
   3665 		mutex_exit(rxq->rxq_lock);
   3666 	}
   3667 	/* Must unlock here */
   3668 
   3669 	/* Disestablish the interrupt handler */
   3670 	for (i = 0; i < sc->sc_nintrs; i++) {
   3671 		if (sc->sc_ihs[i] != NULL) {
   3672 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3673 			sc->sc_ihs[i] = NULL;
   3674 		}
   3675 	}
   3676 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3677 
   3678 	/* wm_stop() ensured that the workqueues are stopped. */
   3679 	workqueue_destroy(sc->sc_queue_wq);
   3680 	workqueue_destroy(sc->sc_reset_wq);
   3681 
   3682 	for (i = 0; i < sc->sc_nqueues; i++)
   3683 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3684 
   3685 	wm_free_txrx_queues(sc);
   3686 
   3687 	/* Unmap the registers */
   3688 	if (sc->sc_ss) {
   3689 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3690 		sc->sc_ss = 0;
   3691 	}
   3692 	if (sc->sc_ios) {
   3693 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3694 		sc->sc_ios = 0;
   3695 	}
   3696 	if (sc->sc_flashs) {
   3697 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3698 		sc->sc_flashs = 0;
   3699 	}
   3700 
   3701 	if (sc->sc_core_lock)
   3702 		mutex_obj_free(sc->sc_core_lock);
   3703 	if (sc->sc_ich_phymtx)
   3704 		mutex_obj_free(sc->sc_ich_phymtx);
   3705 	if (sc->sc_ich_nvmmtx)
   3706 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3707 
   3708 	return 0;
   3709 }
   3710 
   3711 static bool
   3712 wm_suspend(device_t self, const pmf_qual_t *qual)
   3713 {
   3714 	struct wm_softc *sc = device_private(self);
   3715 
   3716 	wm_release_manageability(sc);
   3717 	wm_release_hw_control(sc);
   3718 	wm_enable_wakeup(sc);
   3719 
   3720 	return true;
   3721 }
   3722 
   3723 static bool
   3724 wm_resume(device_t self, const pmf_qual_t *qual)
   3725 {
   3726 	struct wm_softc *sc = device_private(self);
   3727 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3728 	pcireg_t reg;
   3729 	char buf[256];
   3730 
   3731 	reg = CSR_READ(sc, WMREG_WUS);
   3732 	if (reg != 0) {
   3733 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3734 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3735 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3736 	}
   3737 
   3738 	if (sc->sc_type >= WM_T_PCH2)
   3739 		wm_resume_workarounds_pchlan(sc);
   3740 	IFNET_LOCK(ifp);
   3741 	if ((ifp->if_flags & IFF_UP) == 0) {
   3742 		/* >= PCH_SPT hardware workaround before reset. */
   3743 		if (sc->sc_type >= WM_T_PCH_SPT)
   3744 			wm_flush_desc_rings(sc);
   3745 
   3746 		wm_reset(sc);
   3747 		/* Non-AMT based hardware can now take control from firmware */
   3748 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3749 			wm_get_hw_control(sc);
   3750 		wm_init_manageability(sc);
   3751 	} else {
   3752 		/*
   3753 		 * We called pmf_class_network_register(), so if_init() is
   3754 		 * automatically called when IFF_UP. wm_reset(),
   3755 		 * wm_get_hw_control() and wm_init_manageability() are called
   3756 		 * via wm_init().
   3757 		 */
   3758 	}
   3759 	IFNET_UNLOCK(ifp);
   3760 
   3761 	return true;
   3762 }
   3763 
   3764 /*
   3765  * wm_watchdog:
   3766  *
   3767  *	Watchdog checker.
   3768  */
   3769 static bool
   3770 wm_watchdog(struct ifnet *ifp)
   3771 {
   3772 	int qid;
   3773 	struct wm_softc *sc = ifp->if_softc;
   3774 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3775 
   3776 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3777 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3778 
   3779 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3780 	}
   3781 
   3782 #ifdef WM_DEBUG
   3783 	if (sc->sc_trigger_reset) {
   3784 		/* debug operation, no need for atomicity or reliability */
   3785 		sc->sc_trigger_reset = 0;
   3786 		hang_queue++;
   3787 	}
   3788 #endif
   3789 
   3790 	if (hang_queue == 0)
   3791 		return true;
   3792 
   3793 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3794 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3795 
   3796 	return false;
   3797 }
   3798 
   3799 /*
   3800  * Perform an interface watchdog reset.
   3801  */
   3802 static void
   3803 wm_handle_reset_work(struct work *work, void *arg)
   3804 {
   3805 	struct wm_softc * const sc = arg;
   3806 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3807 
   3808 	/* Don't want ioctl operations to happen */
   3809 	IFNET_LOCK(ifp);
   3810 
   3811 	/* reset the interface. */
   3812 	wm_init(ifp);
   3813 
   3814 	IFNET_UNLOCK(ifp);
   3815 
   3816 	/*
   3817 	 * There are still some upper layer processing which call
   3818 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3819 	 */
   3820 	/* Try to get more packets going. */
   3821 	ifp->if_start(ifp);
   3822 
   3823 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3824 }
   3825 
   3826 
   3827 static void
   3828 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3829 {
   3830 
   3831 	mutex_enter(txq->txq_lock);
   3832 	if (txq->txq_sending &&
   3833 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3834 		wm_watchdog_txq_locked(ifp, txq, hang);
   3835 
   3836 	mutex_exit(txq->txq_lock);
   3837 }
   3838 
   3839 static void
   3840 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3841     uint16_t *hang)
   3842 {
   3843 	struct wm_softc *sc = ifp->if_softc;
   3844 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3845 
   3846 	KASSERT(mutex_owned(txq->txq_lock));
   3847 
   3848 	/*
   3849 	 * Since we're using delayed interrupts, sweep up
   3850 	 * before we report an error.
   3851 	 */
   3852 	wm_txeof(txq, UINT_MAX);
   3853 
   3854 	if (txq->txq_sending)
   3855 		*hang |= __BIT(wmq->wmq_id);
   3856 
   3857 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3858 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3859 		    device_xname(sc->sc_dev));
   3860 	} else {
   3861 #ifdef WM_DEBUG
   3862 		int i, j;
   3863 		struct wm_txsoft *txs;
   3864 #endif
   3865 		log(LOG_ERR,
   3866 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3867 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3868 		    txq->txq_next);
   3869 		if_statinc(ifp, if_oerrors);
   3870 #ifdef WM_DEBUG
   3871 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3872 		     i = WM_NEXTTXS(txq, i)) {
   3873 			txs = &txq->txq_soft[i];
   3874 			printf("txs %d tx %d -> %d\n",
   3875 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3876 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3877 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3878 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3879 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3880 					printf("\t %#08x%08x\n",
   3881 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3882 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3883 				} else {
   3884 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3885 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3886 					    txq->txq_descs[j].wtx_addr.wa_low);
   3887 					printf("\t %#04x%02x%02x%08x\n",
   3888 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3889 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3890 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3891 					    txq->txq_descs[j].wtx_cmdlen);
   3892 				}
   3893 				if (j == txs->txs_lastdesc)
   3894 					break;
   3895 			}
   3896 		}
   3897 #endif
   3898 	}
   3899 }
   3900 
   3901 /*
   3902  * wm_tick:
   3903  *
   3904  *	One second timer, used to check link status, sweep up
   3905  *	completed transmit jobs, etc.
   3906  */
   3907 static void
   3908 wm_tick(void *arg)
   3909 {
   3910 	struct wm_softc *sc = arg;
   3911 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3912 
   3913 	mutex_enter(sc->sc_core_lock);
   3914 
   3915 	if (sc->sc_core_stopping) {
   3916 		mutex_exit(sc->sc_core_lock);
   3917 		return;
   3918 	}
   3919 
   3920 	wm_update_stats(sc);
   3921 
   3922 	if (sc->sc_flags & WM_F_HAS_MII) {
   3923 		bool dotick = true;
   3924 
   3925 		/*
   3926 		 * Workaround for some chips to delay sending LINK_STATE_UP.
   3927 		 * See also wm_linkintr_gmii() and wm_gmii_mediastatus().
   3928 		 */
   3929 		if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   3930 			struct timeval now;
   3931 
   3932 			getmicrotime(&now);
   3933 			if (timercmp(&now, &sc->sc_linkup_delay_time, <))
   3934 				dotick = false;
   3935 			else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   3936 				/* Simplify by checking tv_sec only. */
   3937 
   3938 				sc->sc_linkup_delay_time.tv_sec = 0;
   3939 				sc->sc_linkup_delay_time.tv_usec = 0;
   3940 			}
   3941 		}
   3942 		if (dotick)
   3943 			mii_tick(&sc->sc_mii);
   3944 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3945 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3946 		wm_serdes_tick(sc);
   3947 	else
   3948 		wm_tbi_tick(sc);
   3949 
   3950 	mutex_exit(sc->sc_core_lock);
   3951 
   3952 	if (wm_watchdog(ifp))
   3953 		callout_schedule(&sc->sc_tick_ch, hz);
   3954 }
   3955 
   3956 static int
   3957 wm_ifflags_cb(struct ethercom *ec)
   3958 {
   3959 	struct ifnet *ifp = &ec->ec_if;
   3960 	struct wm_softc *sc = ifp->if_softc;
   3961 	u_short iffchange;
   3962 	int ecchange;
   3963 	bool needreset = false;
   3964 	int rc = 0;
   3965 
   3966 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3967 		device_xname(sc->sc_dev), __func__));
   3968 
   3969 	KASSERT(IFNET_LOCKED(ifp));
   3970 
   3971 	mutex_enter(sc->sc_core_lock);
   3972 
   3973 	/*
   3974 	 * Check for if_flags.
   3975 	 * Main usage is to prevent linkdown when opening bpf.
   3976 	 */
   3977 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3978 	sc->sc_if_flags = ifp->if_flags;
   3979 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3980 		needreset = true;
   3981 		goto ec;
   3982 	}
   3983 
   3984 	/* iff related updates */
   3985 	if ((iffchange & IFF_PROMISC) != 0)
   3986 		wm_set_filter(sc);
   3987 
   3988 	wm_set_vlan(sc);
   3989 
   3990 ec:
   3991 	/* Check for ec_capenable. */
   3992 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3993 	sc->sc_ec_capenable = ec->ec_capenable;
   3994 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3995 		needreset = true;
   3996 		goto out;
   3997 	}
   3998 
   3999 	/* ec related updates */
   4000 	wm_set_eee(sc);
   4001 
   4002 out:
   4003 	if (needreset)
   4004 		rc = ENETRESET;
   4005 	mutex_exit(sc->sc_core_lock);
   4006 
   4007 	return rc;
   4008 }
   4009 
   4010 static bool
   4011 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   4012 {
   4013 
   4014 	switch (sc->sc_phytype) {
   4015 	case WMPHY_82577: /* ihphy */
   4016 	case WMPHY_82578: /* atphy */
   4017 	case WMPHY_82579: /* ihphy */
   4018 	case WMPHY_I217: /* ihphy */
   4019 	case WMPHY_82580: /* ihphy */
   4020 	case WMPHY_I350: /* ihphy */
   4021 		return true;
   4022 	default:
   4023 		return false;
   4024 	}
   4025 }
   4026 
   4027 static void
   4028 wm_set_linkdown_discard(struct wm_softc *sc)
   4029 {
   4030 
   4031 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4032 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4033 
   4034 		mutex_enter(txq->txq_lock);
   4035 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   4036 		mutex_exit(txq->txq_lock);
   4037 	}
   4038 }
   4039 
   4040 static void
   4041 wm_clear_linkdown_discard(struct wm_softc *sc)
   4042 {
   4043 
   4044 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4045 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4046 
   4047 		mutex_enter(txq->txq_lock);
   4048 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   4049 		mutex_exit(txq->txq_lock);
   4050 	}
   4051 }
   4052 
   4053 /*
   4054  * wm_ioctl:		[ifnet interface function]
   4055  *
   4056  *	Handle control requests from the operator.
   4057  */
   4058 static int
   4059 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   4060 {
   4061 	struct wm_softc *sc = ifp->if_softc;
   4062 	struct ifreq *ifr = (struct ifreq *)data;
   4063 	struct ifaddr *ifa = (struct ifaddr *)data;
   4064 	struct sockaddr_dl *sdl;
   4065 	int error;
   4066 
   4067 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4068 		device_xname(sc->sc_dev), __func__));
   4069 
   4070 	switch (cmd) {
   4071 	case SIOCADDMULTI:
   4072 	case SIOCDELMULTI:
   4073 		break;
   4074 	default:
   4075 		KASSERT(IFNET_LOCKED(ifp));
   4076 	}
   4077 
   4078 	if (cmd == SIOCZIFDATA) {
   4079 		/*
   4080 		 * Special handling for SIOCZIFDATA.
   4081 		 * Copying and clearing the if_data structure is done with
   4082 		 * ether_ioctl() below.
   4083 		 */
   4084 		mutex_enter(sc->sc_core_lock);
   4085 		wm_update_stats(sc);
   4086 		wm_clear_evcnt(sc);
   4087 		mutex_exit(sc->sc_core_lock);
   4088 	}
   4089 
   4090 	switch (cmd) {
   4091 	case SIOCSIFMEDIA:
   4092 		mutex_enter(sc->sc_core_lock);
   4093 		/* Flow control requires full-duplex mode. */
   4094 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4095 		    (ifr->ifr_media & IFM_FDX) == 0)
   4096 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4097 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4098 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4099 				/* We can do both TXPAUSE and RXPAUSE. */
   4100 				ifr->ifr_media |=
   4101 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4102 			}
   4103 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4104 		}
   4105 		mutex_exit(sc->sc_core_lock);
   4106 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4107 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4108 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4109 				DPRINTF(sc, WM_DEBUG_LINK,
   4110 				    ("%s: %s: Set linkdown discard flag\n",
   4111 					device_xname(sc->sc_dev), __func__));
   4112 				wm_set_linkdown_discard(sc);
   4113 			}
   4114 		}
   4115 		break;
   4116 	case SIOCINITIFADDR:
   4117 		mutex_enter(sc->sc_core_lock);
   4118 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4119 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4120 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4121 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4122 			/* Unicast address is the first multicast entry */
   4123 			wm_set_filter(sc);
   4124 			error = 0;
   4125 			mutex_exit(sc->sc_core_lock);
   4126 			break;
   4127 		}
   4128 		mutex_exit(sc->sc_core_lock);
   4129 		/*FALLTHROUGH*/
   4130 	default:
   4131 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4132 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4133 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4134 				DPRINTF(sc, WM_DEBUG_LINK,
   4135 				    ("%s: %s: Set linkdown discard flag\n",
   4136 					device_xname(sc->sc_dev), __func__));
   4137 				wm_set_linkdown_discard(sc);
   4138 			}
   4139 		}
   4140 		const int s = splnet();
   4141 		/* It may call wm_start, so unlock here */
   4142 		error = ether_ioctl(ifp, cmd, data);
   4143 		splx(s);
   4144 		if (error != ENETRESET)
   4145 			break;
   4146 
   4147 		error = 0;
   4148 
   4149 		if (cmd == SIOCSIFCAP)
   4150 			error = if_init(ifp);
   4151 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4152 			mutex_enter(sc->sc_core_lock);
   4153 			if (sc->sc_if_flags & IFF_RUNNING) {
   4154 				/*
   4155 				 * Multicast list has changed; set the
   4156 				 * hardware filter accordingly.
   4157 				 */
   4158 				wm_set_filter(sc);
   4159 			}
   4160 			mutex_exit(sc->sc_core_lock);
   4161 		}
   4162 		break;
   4163 	}
   4164 
   4165 	return error;
   4166 }
   4167 
   4168 /* MAC address related */
   4169 
   4170 /*
   4171  * Get the offset of MAC address and return it.
   4172  * If error occured, use offset 0.
   4173  */
   4174 static uint16_t
   4175 wm_check_alt_mac_addr(struct wm_softc *sc)
   4176 {
   4177 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4178 	uint16_t offset = NVM_OFF_MACADDR;
   4179 
   4180 	/* Try to read alternative MAC address pointer */
   4181 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4182 		return 0;
   4183 
   4184 	/* Check pointer if it's valid or not. */
   4185 	if ((offset == 0x0000) || (offset == 0xffff))
   4186 		return 0;
   4187 
   4188 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4189 	/*
   4190 	 * Check whether alternative MAC address is valid or not.
   4191 	 * Some cards have non 0xffff pointer but those don't use
   4192 	 * alternative MAC address in reality.
   4193 	 *
   4194 	 * Check whether the broadcast bit is set or not.
   4195 	 */
   4196 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4197 		if (((myea[0] & 0xff) & 0x01) == 0)
   4198 			return offset; /* Found */
   4199 
   4200 	/* Not found */
   4201 	return 0;
   4202 }
   4203 
   4204 static int
   4205 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4206 {
   4207 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4208 	uint16_t offset = NVM_OFF_MACADDR;
   4209 	int do_invert = 0;
   4210 
   4211 	switch (sc->sc_type) {
   4212 	case WM_T_82580:
   4213 	case WM_T_I350:
   4214 	case WM_T_I354:
   4215 		/* EEPROM Top Level Partitioning */
   4216 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4217 		break;
   4218 	case WM_T_82571:
   4219 	case WM_T_82575:
   4220 	case WM_T_82576:
   4221 	case WM_T_80003:
   4222 	case WM_T_I210:
   4223 	case WM_T_I211:
   4224 		offset = wm_check_alt_mac_addr(sc);
   4225 		if (offset == 0)
   4226 			if ((sc->sc_funcid & 0x01) == 1)
   4227 				do_invert = 1;
   4228 		break;
   4229 	default:
   4230 		if ((sc->sc_funcid & 0x01) == 1)
   4231 			do_invert = 1;
   4232 		break;
   4233 	}
   4234 
   4235 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4236 		goto bad;
   4237 
   4238 	enaddr[0] = myea[0] & 0xff;
   4239 	enaddr[1] = myea[0] >> 8;
   4240 	enaddr[2] = myea[1] & 0xff;
   4241 	enaddr[3] = myea[1] >> 8;
   4242 	enaddr[4] = myea[2] & 0xff;
   4243 	enaddr[5] = myea[2] >> 8;
   4244 
   4245 	/*
   4246 	 * Toggle the LSB of the MAC address on the second port
   4247 	 * of some dual port cards.
   4248 	 */
   4249 	if (do_invert != 0)
   4250 		enaddr[5] ^= 1;
   4251 
   4252 	return 0;
   4253 
   4254 bad:
   4255 	return -1;
   4256 }
   4257 
   4258 /*
   4259  * wm_set_ral:
   4260  *
   4261  *	Set an entery in the receive address list.
   4262  */
   4263 static void
   4264 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4265 {
   4266 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4267 	uint32_t wlock_mac;
   4268 	int rv;
   4269 
   4270 	if (enaddr != NULL) {
   4271 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4272 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4273 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4274 		ral_hi |= RAL_AV;
   4275 	} else {
   4276 		ral_lo = 0;
   4277 		ral_hi = 0;
   4278 	}
   4279 
   4280 	switch (sc->sc_type) {
   4281 	case WM_T_82542_2_0:
   4282 	case WM_T_82542_2_1:
   4283 	case WM_T_82543:
   4284 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4285 		CSR_WRITE_FLUSH(sc);
   4286 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4287 		CSR_WRITE_FLUSH(sc);
   4288 		break;
   4289 	case WM_T_PCH2:
   4290 	case WM_T_PCH_LPT:
   4291 	case WM_T_PCH_SPT:
   4292 	case WM_T_PCH_CNP:
   4293 		if (idx == 0) {
   4294 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4295 			CSR_WRITE_FLUSH(sc);
   4296 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4297 			CSR_WRITE_FLUSH(sc);
   4298 			return;
   4299 		}
   4300 		if (sc->sc_type != WM_T_PCH2) {
   4301 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4302 			    FWSM_WLOCK_MAC);
   4303 			addrl = WMREG_SHRAL(idx - 1);
   4304 			addrh = WMREG_SHRAH(idx - 1);
   4305 		} else {
   4306 			wlock_mac = 0;
   4307 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4308 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4309 		}
   4310 
   4311 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4312 			rv = wm_get_swflag_ich8lan(sc);
   4313 			if (rv != 0)
   4314 				return;
   4315 			CSR_WRITE(sc, addrl, ral_lo);
   4316 			CSR_WRITE_FLUSH(sc);
   4317 			CSR_WRITE(sc, addrh, ral_hi);
   4318 			CSR_WRITE_FLUSH(sc);
   4319 			wm_put_swflag_ich8lan(sc);
   4320 		}
   4321 
   4322 		break;
   4323 	default:
   4324 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4325 		CSR_WRITE_FLUSH(sc);
   4326 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4327 		CSR_WRITE_FLUSH(sc);
   4328 		break;
   4329 	}
   4330 }
   4331 
   4332 /*
   4333  * wm_mchash:
   4334  *
   4335  *	Compute the hash of the multicast address for the 4096-bit
   4336  *	multicast filter.
   4337  */
   4338 static uint32_t
   4339 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4340 {
   4341 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4342 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4343 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4344 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4345 	uint32_t hash;
   4346 
   4347 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4348 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4349 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4350 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4351 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4352 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4353 		return (hash & 0x3ff);
   4354 	}
   4355 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4356 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4357 
   4358 	return (hash & 0xfff);
   4359 }
   4360 
   4361 /*
   4362  *
   4363  *
   4364  */
   4365 static int
   4366 wm_rar_count(struct wm_softc *sc)
   4367 {
   4368 	int size;
   4369 
   4370 	switch (sc->sc_type) {
   4371 	case WM_T_ICH8:
   4372 		size = WM_RAL_TABSIZE_ICH8 -1;
   4373 		break;
   4374 	case WM_T_ICH9:
   4375 	case WM_T_ICH10:
   4376 	case WM_T_PCH:
   4377 		size = WM_RAL_TABSIZE_ICH8;
   4378 		break;
   4379 	case WM_T_PCH2:
   4380 		size = WM_RAL_TABSIZE_PCH2;
   4381 		break;
   4382 	case WM_T_PCH_LPT:
   4383 	case WM_T_PCH_SPT:
   4384 	case WM_T_PCH_CNP:
   4385 		size = WM_RAL_TABSIZE_PCH_LPT;
   4386 		break;
   4387 	case WM_T_82575:
   4388 	case WM_T_I210:
   4389 	case WM_T_I211:
   4390 		size = WM_RAL_TABSIZE_82575;
   4391 		break;
   4392 	case WM_T_82576:
   4393 	case WM_T_82580:
   4394 		size = WM_RAL_TABSIZE_82576;
   4395 		break;
   4396 	case WM_T_I350:
   4397 	case WM_T_I354:
   4398 		size = WM_RAL_TABSIZE_I350;
   4399 		break;
   4400 	default:
   4401 		size = WM_RAL_TABSIZE;
   4402 	}
   4403 
   4404 	return size;
   4405 }
   4406 
   4407 /*
   4408  * wm_set_filter:
   4409  *
   4410  *	Set up the receive filter.
   4411  */
   4412 static void
   4413 wm_set_filter(struct wm_softc *sc)
   4414 {
   4415 	struct ethercom *ec = &sc->sc_ethercom;
   4416 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4417 	struct ether_multi *enm;
   4418 	struct ether_multistep step;
   4419 	bus_addr_t mta_reg;
   4420 	uint32_t hash, reg, bit;
   4421 	int i, size, ralmax, rv;
   4422 
   4423 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4424 		device_xname(sc->sc_dev), __func__));
   4425 	KASSERT(mutex_owned(sc->sc_core_lock));
   4426 
   4427 	if (sc->sc_type >= WM_T_82544)
   4428 		mta_reg = WMREG_CORDOVA_MTA;
   4429 	else
   4430 		mta_reg = WMREG_MTA;
   4431 
   4432 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4433 
   4434 	if (sc->sc_if_flags & IFF_BROADCAST)
   4435 		sc->sc_rctl |= RCTL_BAM;
   4436 	if (sc->sc_if_flags & IFF_PROMISC) {
   4437 		sc->sc_rctl |= RCTL_UPE;
   4438 		ETHER_LOCK(ec);
   4439 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4440 		ETHER_UNLOCK(ec);
   4441 		goto allmulti;
   4442 	}
   4443 
   4444 	/*
   4445 	 * Set the station address in the first RAL slot, and
   4446 	 * clear the remaining slots.
   4447 	 */
   4448 	size = wm_rar_count(sc);
   4449 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4450 
   4451 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4452 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4453 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4454 		switch (i) {
   4455 		case 0:
   4456 			/* We can use all entries */
   4457 			ralmax = size;
   4458 			break;
   4459 		case 1:
   4460 			/* Only RAR[0] */
   4461 			ralmax = 1;
   4462 			break;
   4463 		default:
   4464 			/* Available SHRA + RAR[0] */
   4465 			ralmax = i + 1;
   4466 		}
   4467 	} else
   4468 		ralmax = size;
   4469 	for (i = 1; i < size; i++) {
   4470 		if (i < ralmax)
   4471 			wm_set_ral(sc, NULL, i);
   4472 	}
   4473 
   4474 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4475 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4476 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4477 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4478 		size = WM_ICH8_MC_TABSIZE;
   4479 	else
   4480 		size = WM_MC_TABSIZE;
   4481 	/* Clear out the multicast table. */
   4482 	for (i = 0; i < size; i++) {
   4483 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4484 		CSR_WRITE_FLUSH(sc);
   4485 	}
   4486 
   4487 	ETHER_LOCK(ec);
   4488 	ETHER_FIRST_MULTI(step, ec, enm);
   4489 	while (enm != NULL) {
   4490 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4491 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4492 			ETHER_UNLOCK(ec);
   4493 			/*
   4494 			 * We must listen to a range of multicast addresses.
   4495 			 * For now, just accept all multicasts, rather than
   4496 			 * trying to set only those filter bits needed to match
   4497 			 * the range.  (At this time, the only use of address
   4498 			 * ranges is for IP multicast routing, for which the
   4499 			 * range is big enough to require all bits set.)
   4500 			 */
   4501 			goto allmulti;
   4502 		}
   4503 
   4504 		hash = wm_mchash(sc, enm->enm_addrlo);
   4505 
   4506 		reg = (hash >> 5);
   4507 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4508 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4509 		    || (sc->sc_type == WM_T_PCH2)
   4510 		    || (sc->sc_type == WM_T_PCH_LPT)
   4511 		    || (sc->sc_type == WM_T_PCH_SPT)
   4512 		    || (sc->sc_type == WM_T_PCH_CNP))
   4513 			reg &= 0x1f;
   4514 		else
   4515 			reg &= 0x7f;
   4516 		bit = hash & 0x1f;
   4517 
   4518 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4519 		hash |= 1U << bit;
   4520 
   4521 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4522 			/*
   4523 			 * 82544 Errata 9: Certain register cannot be written
   4524 			 * with particular alignments in PCI-X bus operation
   4525 			 * (FCAH, MTA and VFTA).
   4526 			 */
   4527 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4528 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4529 			CSR_WRITE_FLUSH(sc);
   4530 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4531 			CSR_WRITE_FLUSH(sc);
   4532 		} else {
   4533 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4534 			CSR_WRITE_FLUSH(sc);
   4535 		}
   4536 
   4537 		ETHER_NEXT_MULTI(step, enm);
   4538 	}
   4539 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4540 	ETHER_UNLOCK(ec);
   4541 
   4542 	goto setit;
   4543 
   4544 allmulti:
   4545 	sc->sc_rctl |= RCTL_MPE;
   4546 
   4547 setit:
   4548 	if (sc->sc_type >= WM_T_PCH2) {
   4549 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4550 		    && (ifp->if_mtu > ETHERMTU))
   4551 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4552 		else
   4553 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4554 		if (rv != 0)
   4555 			device_printf(sc->sc_dev,
   4556 			    "Failed to do workaround for jumbo frame.\n");
   4557 	}
   4558 
   4559 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4560 }
   4561 
   4562 /* Reset and init related */
   4563 
   4564 static void
   4565 wm_set_vlan(struct wm_softc *sc)
   4566 {
   4567 
   4568 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4569 		device_xname(sc->sc_dev), __func__));
   4570 
   4571 	/* Deal with VLAN enables. */
   4572 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4573 		sc->sc_ctrl |= CTRL_VME;
   4574 	else
   4575 		sc->sc_ctrl &= ~CTRL_VME;
   4576 
   4577 	/* Write the control registers. */
   4578 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4579 }
   4580 
   4581 static void
   4582 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4583 {
   4584 	uint32_t gcr;
   4585 	pcireg_t ctrl2;
   4586 
   4587 	gcr = CSR_READ(sc, WMREG_GCR);
   4588 
   4589 	/* Only take action if timeout value is defaulted to 0 */
   4590 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4591 		goto out;
   4592 
   4593 	if ((gcr & GCR_CAP_VER2) == 0) {
   4594 		gcr |= GCR_CMPL_TMOUT_10MS;
   4595 		goto out;
   4596 	}
   4597 
   4598 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4599 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4600 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4601 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4602 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4603 
   4604 out:
   4605 	/* Disable completion timeout resend */
   4606 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4607 
   4608 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4609 }
   4610 
   4611 void
   4612 wm_get_auto_rd_done(struct wm_softc *sc)
   4613 {
   4614 	int i;
   4615 
   4616 	/* wait for eeprom to reload */
   4617 	switch (sc->sc_type) {
   4618 	case WM_T_82571:
   4619 	case WM_T_82572:
   4620 	case WM_T_82573:
   4621 	case WM_T_82574:
   4622 	case WM_T_82583:
   4623 	case WM_T_82575:
   4624 	case WM_T_82576:
   4625 	case WM_T_82580:
   4626 	case WM_T_I350:
   4627 	case WM_T_I354:
   4628 	case WM_T_I210:
   4629 	case WM_T_I211:
   4630 	case WM_T_80003:
   4631 	case WM_T_ICH8:
   4632 	case WM_T_ICH9:
   4633 		for (i = 0; i < 10; i++) {
   4634 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4635 				break;
   4636 			delay(1000);
   4637 		}
   4638 		if (i == 10) {
   4639 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4640 			    "complete\n", device_xname(sc->sc_dev));
   4641 		}
   4642 		break;
   4643 	default:
   4644 		break;
   4645 	}
   4646 }
   4647 
   4648 void
   4649 wm_lan_init_done(struct wm_softc *sc)
   4650 {
   4651 	uint32_t reg = 0;
   4652 	int i;
   4653 
   4654 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4655 		device_xname(sc->sc_dev), __func__));
   4656 
   4657 	/* Wait for eeprom to reload */
   4658 	switch (sc->sc_type) {
   4659 	case WM_T_ICH10:
   4660 	case WM_T_PCH:
   4661 	case WM_T_PCH2:
   4662 	case WM_T_PCH_LPT:
   4663 	case WM_T_PCH_SPT:
   4664 	case WM_T_PCH_CNP:
   4665 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4666 			reg = CSR_READ(sc, WMREG_STATUS);
   4667 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4668 				break;
   4669 			delay(100);
   4670 		}
   4671 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4672 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4673 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4674 		}
   4675 		break;
   4676 	default:
   4677 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4678 		    __func__);
   4679 		break;
   4680 	}
   4681 
   4682 	reg &= ~STATUS_LAN_INIT_DONE;
   4683 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4684 }
   4685 
   4686 void
   4687 wm_get_cfg_done(struct wm_softc *sc)
   4688 {
   4689 	int mask;
   4690 	uint32_t reg;
   4691 	int i;
   4692 
   4693 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4694 		device_xname(sc->sc_dev), __func__));
   4695 
   4696 	/* Wait for eeprom to reload */
   4697 	switch (sc->sc_type) {
   4698 	case WM_T_82542_2_0:
   4699 	case WM_T_82542_2_1:
   4700 		/* null */
   4701 		break;
   4702 	case WM_T_82543:
   4703 	case WM_T_82544:
   4704 	case WM_T_82540:
   4705 	case WM_T_82545:
   4706 	case WM_T_82545_3:
   4707 	case WM_T_82546:
   4708 	case WM_T_82546_3:
   4709 	case WM_T_82541:
   4710 	case WM_T_82541_2:
   4711 	case WM_T_82547:
   4712 	case WM_T_82547_2:
   4713 	case WM_T_82573:
   4714 	case WM_T_82574:
   4715 	case WM_T_82583:
   4716 		/* generic */
   4717 		delay(10*1000);
   4718 		break;
   4719 	case WM_T_80003:
   4720 	case WM_T_82571:
   4721 	case WM_T_82572:
   4722 	case WM_T_82575:
   4723 	case WM_T_82576:
   4724 	case WM_T_82580:
   4725 	case WM_T_I350:
   4726 	case WM_T_I354:
   4727 	case WM_T_I210:
   4728 	case WM_T_I211:
   4729 		if (sc->sc_type == WM_T_82571) {
   4730 			/* Only 82571 shares port 0 */
   4731 			mask = EEMNGCTL_CFGDONE_0;
   4732 		} else
   4733 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4734 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4735 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4736 				break;
   4737 			delay(1000);
   4738 		}
   4739 		if (i >= WM_PHY_CFG_TIMEOUT)
   4740 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4741 				device_xname(sc->sc_dev), __func__));
   4742 		break;
   4743 	case WM_T_ICH8:
   4744 	case WM_T_ICH9:
   4745 	case WM_T_ICH10:
   4746 	case WM_T_PCH:
   4747 	case WM_T_PCH2:
   4748 	case WM_T_PCH_LPT:
   4749 	case WM_T_PCH_SPT:
   4750 	case WM_T_PCH_CNP:
   4751 		delay(10*1000);
   4752 		if (sc->sc_type >= WM_T_ICH10)
   4753 			wm_lan_init_done(sc);
   4754 		else
   4755 			wm_get_auto_rd_done(sc);
   4756 
   4757 		/* Clear PHY Reset Asserted bit */
   4758 		reg = CSR_READ(sc, WMREG_STATUS);
   4759 		if ((reg & STATUS_PHYRA) != 0)
   4760 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4761 		break;
   4762 	default:
   4763 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4764 		    __func__);
   4765 		break;
   4766 	}
   4767 }
   4768 
   4769 int
   4770 wm_phy_post_reset(struct wm_softc *sc)
   4771 {
   4772 	device_t dev = sc->sc_dev;
   4773 	uint16_t reg;
   4774 	int rv = 0;
   4775 
   4776 	/* This function is only for ICH8 and newer. */
   4777 	if (sc->sc_type < WM_T_ICH8)
   4778 		return 0;
   4779 
   4780 	if (wm_phy_resetisblocked(sc)) {
   4781 		/* XXX */
   4782 		device_printf(dev, "PHY is blocked\n");
   4783 		return -1;
   4784 	}
   4785 
   4786 	/* Allow time for h/w to get to quiescent state after reset */
   4787 	delay(10*1000);
   4788 
   4789 	/* Perform any necessary post-reset workarounds */
   4790 	if (sc->sc_type == WM_T_PCH)
   4791 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4792 	else if (sc->sc_type == WM_T_PCH2)
   4793 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4794 	if (rv != 0)
   4795 		return rv;
   4796 
   4797 	/* Clear the host wakeup bit after lcd reset */
   4798 	if (sc->sc_type >= WM_T_PCH) {
   4799 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4800 		reg &= ~BM_WUC_HOST_WU_BIT;
   4801 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4802 	}
   4803 
   4804 	/* Configure the LCD with the extended configuration region in NVM */
   4805 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4806 		return rv;
   4807 
   4808 	/* Configure the LCD with the OEM bits in NVM */
   4809 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4810 
   4811 	if (sc->sc_type == WM_T_PCH2) {
   4812 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4813 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4814 			delay(10 * 1000);
   4815 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4816 		}
   4817 		/* Set EEE LPI Update Timer to 200usec */
   4818 		rv = sc->phy.acquire(sc);
   4819 		if (rv)
   4820 			return rv;
   4821 		rv = wm_write_emi_reg_locked(dev,
   4822 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4823 		sc->phy.release(sc);
   4824 	}
   4825 
   4826 	return rv;
   4827 }
   4828 
   4829 /* Only for PCH and newer */
   4830 static int
   4831 wm_write_smbus_addr(struct wm_softc *sc)
   4832 {
   4833 	uint32_t strap, freq;
   4834 	uint16_t phy_data;
   4835 	int rv;
   4836 
   4837 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4838 		device_xname(sc->sc_dev), __func__));
   4839 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4840 
   4841 	strap = CSR_READ(sc, WMREG_STRAP);
   4842 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4843 
   4844 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4845 	if (rv != 0)
   4846 		return rv;
   4847 
   4848 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4849 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4850 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4851 
   4852 	if (sc->sc_phytype == WMPHY_I217) {
   4853 		/* Restore SMBus frequency */
   4854 		if (freq --) {
   4855 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4856 			    | HV_SMB_ADDR_FREQ_HIGH);
   4857 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4858 			    HV_SMB_ADDR_FREQ_LOW);
   4859 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4860 			    HV_SMB_ADDR_FREQ_HIGH);
   4861 		} else
   4862 			DPRINTF(sc, WM_DEBUG_INIT,
   4863 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4864 				device_xname(sc->sc_dev), __func__));
   4865 	}
   4866 
   4867 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4868 	    phy_data);
   4869 }
   4870 
   4871 static int
   4872 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4873 {
   4874 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4875 	uint16_t phy_page = 0;
   4876 	int rv = 0;
   4877 
   4878 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4879 		device_xname(sc->sc_dev), __func__));
   4880 
   4881 	switch (sc->sc_type) {
   4882 	case WM_T_ICH8:
   4883 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4884 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4885 			return 0;
   4886 
   4887 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4888 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4889 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4890 			break;
   4891 		}
   4892 		/* FALLTHROUGH */
   4893 	case WM_T_PCH:
   4894 	case WM_T_PCH2:
   4895 	case WM_T_PCH_LPT:
   4896 	case WM_T_PCH_SPT:
   4897 	case WM_T_PCH_CNP:
   4898 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4899 		break;
   4900 	default:
   4901 		return 0;
   4902 	}
   4903 
   4904 	if ((rv = sc->phy.acquire(sc)) != 0)
   4905 		return rv;
   4906 
   4907 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4908 	if ((reg & sw_cfg_mask) == 0)
   4909 		goto release;
   4910 
   4911 	/*
   4912 	 * Make sure HW does not configure LCD from PHY extended configuration
   4913 	 * before SW configuration
   4914 	 */
   4915 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4916 	if ((sc->sc_type < WM_T_PCH2)
   4917 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4918 		goto release;
   4919 
   4920 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4921 		device_xname(sc->sc_dev), __func__));
   4922 	/* word_addr is in DWORD */
   4923 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4924 
   4925 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4926 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4927 	if (cnf_size == 0)
   4928 		goto release;
   4929 
   4930 	if (((sc->sc_type == WM_T_PCH)
   4931 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4932 	    || (sc->sc_type > WM_T_PCH)) {
   4933 		/*
   4934 		 * HW configures the SMBus address and LEDs when the OEM and
   4935 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4936 		 * are cleared, SW will configure them instead.
   4937 		 */
   4938 		DPRINTF(sc, WM_DEBUG_INIT,
   4939 		    ("%s: %s: Configure SMBus and LED\n",
   4940 			device_xname(sc->sc_dev), __func__));
   4941 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4942 			goto release;
   4943 
   4944 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4945 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4946 		    (uint16_t)reg);
   4947 		if (rv != 0)
   4948 			goto release;
   4949 	}
   4950 
   4951 	/* Configure LCD from extended configuration region. */
   4952 	for (i = 0; i < cnf_size; i++) {
   4953 		uint16_t reg_data, reg_addr;
   4954 
   4955 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4956 			goto release;
   4957 
   4958 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4959 			goto release;
   4960 
   4961 		if (reg_addr == IGPHY_PAGE_SELECT)
   4962 			phy_page = reg_data;
   4963 
   4964 		reg_addr &= IGPHY_MAXREGADDR;
   4965 		reg_addr |= phy_page;
   4966 
   4967 		KASSERT(sc->phy.writereg_locked != NULL);
   4968 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4969 		    reg_data);
   4970 	}
   4971 
   4972 release:
   4973 	sc->phy.release(sc);
   4974 	return rv;
   4975 }
   4976 
   4977 /*
   4978  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4979  *  @sc:       pointer to the HW structure
   4980  *  @d0_state: boolean if entering d0 or d3 device state
   4981  *
   4982  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4983  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4984  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4985  */
   4986 int
   4987 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4988 {
   4989 	uint32_t mac_reg;
   4990 	uint16_t oem_reg;
   4991 	int rv;
   4992 
   4993 	if (sc->sc_type < WM_T_PCH)
   4994 		return 0;
   4995 
   4996 	rv = sc->phy.acquire(sc);
   4997 	if (rv != 0)
   4998 		return rv;
   4999 
   5000 	if (sc->sc_type == WM_T_PCH) {
   5001 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   5002 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   5003 			goto release;
   5004 	}
   5005 
   5006 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   5007 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   5008 		goto release;
   5009 
   5010 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   5011 
   5012 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   5013 	if (rv != 0)
   5014 		goto release;
   5015 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   5016 
   5017 	if (d0_state) {
   5018 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   5019 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5020 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   5021 			oem_reg |= HV_OEM_BITS_LPLU;
   5022 	} else {
   5023 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   5024 		    != 0)
   5025 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5026 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   5027 		    != 0)
   5028 			oem_reg |= HV_OEM_BITS_LPLU;
   5029 	}
   5030 
   5031 	/* Set Restart auto-neg to activate the bits */
   5032 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   5033 	    && (wm_phy_resetisblocked(sc) == false))
   5034 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   5035 
   5036 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   5037 
   5038 release:
   5039 	sc->phy.release(sc);
   5040 
   5041 	return rv;
   5042 }
   5043 
   5044 /* Init hardware bits */
   5045 void
   5046 wm_initialize_hardware_bits(struct wm_softc *sc)
   5047 {
   5048 	uint32_t tarc0, tarc1, reg;
   5049 
   5050 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5051 		device_xname(sc->sc_dev), __func__));
   5052 
   5053 	/* For 82571 variant, 80003 and ICHs */
   5054 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   5055 	    || WM_IS_ICHPCH(sc)) {
   5056 
   5057 		/* Transmit Descriptor Control 0 */
   5058 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   5059 		reg |= TXDCTL_COUNT_DESC;
   5060 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   5061 
   5062 		/* Transmit Descriptor Control 1 */
   5063 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   5064 		reg |= TXDCTL_COUNT_DESC;
   5065 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   5066 
   5067 		/* TARC0 */
   5068 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   5069 		switch (sc->sc_type) {
   5070 		case WM_T_82571:
   5071 		case WM_T_82572:
   5072 		case WM_T_82573:
   5073 		case WM_T_82574:
   5074 		case WM_T_82583:
   5075 		case WM_T_80003:
   5076 			/* Clear bits 30..27 */
   5077 			tarc0 &= ~__BITS(30, 27);
   5078 			break;
   5079 		default:
   5080 			break;
   5081 		}
   5082 
   5083 		switch (sc->sc_type) {
   5084 		case WM_T_82571:
   5085 		case WM_T_82572:
   5086 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5087 
   5088 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5089 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5090 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5091 			/* 8257[12] Errata No.7 */
   5092 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5093 
   5094 			/* TARC1 bit 28 */
   5095 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5096 				tarc1 &= ~__BIT(28);
   5097 			else
   5098 				tarc1 |= __BIT(28);
   5099 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5100 
   5101 			/*
   5102 			 * 8257[12] Errata No.13
   5103 			 * Disable Dyamic Clock Gating.
   5104 			 */
   5105 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5106 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5107 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5108 			break;
   5109 		case WM_T_82573:
   5110 		case WM_T_82574:
   5111 		case WM_T_82583:
   5112 			if ((sc->sc_type == WM_T_82574)
   5113 			    || (sc->sc_type == WM_T_82583))
   5114 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5115 
   5116 			/* Extended Device Control */
   5117 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5118 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5119 			reg |= __BIT(22);	/* Set bit 22 */
   5120 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5121 
   5122 			/* Device Control */
   5123 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5124 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5125 
   5126 			/* PCIe Control Register */
   5127 			/*
   5128 			 * 82573 Errata (unknown).
   5129 			 *
   5130 			 * 82574 Errata 25 and 82583 Errata 12
   5131 			 * "Dropped Rx Packets":
   5132 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5133 			 */
   5134 			reg = CSR_READ(sc, WMREG_GCR);
   5135 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5136 			CSR_WRITE(sc, WMREG_GCR, reg);
   5137 
   5138 			if ((sc->sc_type == WM_T_82574)
   5139 			    || (sc->sc_type == WM_T_82583)) {
   5140 				/*
   5141 				 * Document says this bit must be set for
   5142 				 * proper operation.
   5143 				 */
   5144 				reg = CSR_READ(sc, WMREG_GCR);
   5145 				reg |= __BIT(22);
   5146 				CSR_WRITE(sc, WMREG_GCR, reg);
   5147 
   5148 				/*
   5149 				 * Apply workaround for hardware errata
   5150 				 * documented in errata docs Fixes issue where
   5151 				 * some error prone or unreliable PCIe
   5152 				 * completions are occurring, particularly
   5153 				 * with ASPM enabled. Without fix, issue can
   5154 				 * cause Tx timeouts.
   5155 				 */
   5156 				reg = CSR_READ(sc, WMREG_GCR2);
   5157 				reg |= __BIT(0);
   5158 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5159 			}
   5160 			break;
   5161 		case WM_T_80003:
   5162 			/* TARC0 */
   5163 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5164 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5165 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5166 
   5167 			/* TARC1 bit 28 */
   5168 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5169 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5170 				tarc1 &= ~__BIT(28);
   5171 			else
   5172 				tarc1 |= __BIT(28);
   5173 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5174 			break;
   5175 		case WM_T_ICH8:
   5176 		case WM_T_ICH9:
   5177 		case WM_T_ICH10:
   5178 		case WM_T_PCH:
   5179 		case WM_T_PCH2:
   5180 		case WM_T_PCH_LPT:
   5181 		case WM_T_PCH_SPT:
   5182 		case WM_T_PCH_CNP:
   5183 			/* TARC0 */
   5184 			if (sc->sc_type == WM_T_ICH8) {
   5185 				/* Set TARC0 bits 29 and 28 */
   5186 				tarc0 |= __BITS(29, 28);
   5187 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5188 				tarc0 |= __BIT(29);
   5189 				/*
   5190 				 *  Drop bit 28. From Linux.
   5191 				 * See I218/I219 spec update
   5192 				 * "5. Buffer Overrun While the I219 is
   5193 				 * Processing DMA Transactions"
   5194 				 */
   5195 				tarc0 &= ~__BIT(28);
   5196 			}
   5197 			/* Set TARC0 bits 23,24,26,27 */
   5198 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5199 
   5200 			/* CTRL_EXT */
   5201 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5202 			reg |= __BIT(22);	/* Set bit 22 */
   5203 			/*
   5204 			 * Enable PHY low-power state when MAC is at D3
   5205 			 * w/o WoL
   5206 			 */
   5207 			if (sc->sc_type >= WM_T_PCH)
   5208 				reg |= CTRL_EXT_PHYPDEN;
   5209 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5210 
   5211 			/* TARC1 */
   5212 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5213 			/* bit 28 */
   5214 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5215 				tarc1 &= ~__BIT(28);
   5216 			else
   5217 				tarc1 |= __BIT(28);
   5218 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5219 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5220 
   5221 			/* Device Status */
   5222 			if (sc->sc_type == WM_T_ICH8) {
   5223 				reg = CSR_READ(sc, WMREG_STATUS);
   5224 				reg &= ~__BIT(31);
   5225 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5226 
   5227 			}
   5228 
   5229 			/* IOSFPC */
   5230 			if (sc->sc_type == WM_T_PCH_SPT) {
   5231 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5232 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5233 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5234 			}
   5235 			/*
   5236 			 * Work-around descriptor data corruption issue during
   5237 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5238 			 * capability.
   5239 			 */
   5240 			reg = CSR_READ(sc, WMREG_RFCTL);
   5241 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5242 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5243 			break;
   5244 		default:
   5245 			break;
   5246 		}
   5247 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5248 
   5249 		switch (sc->sc_type) {
   5250 		case WM_T_82571:
   5251 		case WM_T_82572:
   5252 		case WM_T_82573:
   5253 		case WM_T_80003:
   5254 		case WM_T_ICH8:
   5255 			/*
   5256 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
   5257 			 * others to avoid RSS Hash Value bug.
   5258 			 */
   5259 			reg = CSR_READ(sc, WMREG_RFCTL);
   5260 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5261 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5262 			break;
   5263 		case WM_T_82574:
   5264 			/* Use extened Rx descriptor. */
   5265 			reg = CSR_READ(sc, WMREG_RFCTL);
   5266 			reg |= WMREG_RFCTL_EXSTEN;
   5267 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5268 			break;
   5269 		default:
   5270 			break;
   5271 		}
   5272 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5273 		/*
   5274 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5275 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5276 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5277 		 * Correctly by the Device"
   5278 		 *
   5279 		 * I354(C2000) Errata AVR53:
   5280 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5281 		 * Hang"
   5282 		 */
   5283 		reg = CSR_READ(sc, WMREG_RFCTL);
   5284 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5285 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5286 	}
   5287 }
   5288 
   5289 static uint32_t
   5290 wm_rxpbs_adjust_82580(uint32_t val)
   5291 {
   5292 	uint32_t rv = 0;
   5293 
   5294 	if (val < __arraycount(wm_82580_rxpbs_table))
   5295 		rv = wm_82580_rxpbs_table[val];
   5296 
   5297 	return rv;
   5298 }
   5299 
   5300 /*
   5301  * wm_reset_phy:
   5302  *
   5303  *	generic PHY reset function.
   5304  *	Same as e1000_phy_hw_reset_generic()
   5305  */
   5306 static int
   5307 wm_reset_phy(struct wm_softc *sc)
   5308 {
   5309 	uint32_t reg;
   5310 	int rv;
   5311 
   5312 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5313 		device_xname(sc->sc_dev), __func__));
   5314 	if (wm_phy_resetisblocked(sc))
   5315 		return -1;
   5316 
   5317 	rv = sc->phy.acquire(sc);
   5318 	if (rv) {
   5319 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5320 		    __func__, rv);
   5321 		return rv;
   5322 	}
   5323 
   5324 	reg = CSR_READ(sc, WMREG_CTRL);
   5325 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5326 	CSR_WRITE_FLUSH(sc);
   5327 
   5328 	delay(sc->phy.reset_delay_us);
   5329 
   5330 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5331 	CSR_WRITE_FLUSH(sc);
   5332 
   5333 	delay(150);
   5334 
   5335 	sc->phy.release(sc);
   5336 
   5337 	wm_get_cfg_done(sc);
   5338 	wm_phy_post_reset(sc);
   5339 
   5340 	return 0;
   5341 }
   5342 
   5343 /*
   5344  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5345  *
   5346  * In i219, the descriptor rings must be emptied before resetting the HW
   5347  * or before changing the device state to D3 during runtime (runtime PM).
   5348  *
   5349  * Failure to do this will cause the HW to enter a unit hang state which can
   5350  * only be released by PCI reset on the device.
   5351  *
   5352  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5353  */
   5354 static void
   5355 wm_flush_desc_rings(struct wm_softc *sc)
   5356 {
   5357 	pcireg_t preg;
   5358 	uint32_t reg;
   5359 	struct wm_txqueue *txq;
   5360 	wiseman_txdesc_t *txd;
   5361 	int nexttx;
   5362 	uint32_t rctl;
   5363 
   5364 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5365 
   5366 	/* First, disable MULR fix in FEXTNVM11 */
   5367 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5368 	reg |= FEXTNVM11_DIS_MULRFIX;
   5369 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5370 
   5371 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5372 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5373 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5374 		return;
   5375 
   5376 	/*
   5377 	 * Remove all descriptors from the tx_ring.
   5378 	 *
   5379 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5380 	 * happens when the HW reads the regs. We assign the ring itself as
   5381 	 * the data of the next descriptor. We don't care about the data we are
   5382 	 * about to reset the HW.
   5383 	 */
   5384 #ifdef WM_DEBUG
   5385 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5386 #endif
   5387 	reg = CSR_READ(sc, WMREG_TCTL);
   5388 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5389 
   5390 	txq = &sc->sc_queue[0].wmq_txq;
   5391 	nexttx = txq->txq_next;
   5392 	txd = &txq->txq_descs[nexttx];
   5393 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5394 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5395 	txd->wtx_fields.wtxu_status = 0;
   5396 	txd->wtx_fields.wtxu_options = 0;
   5397 	txd->wtx_fields.wtxu_vlan = 0;
   5398 
   5399 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5400 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5401 
   5402 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5403 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5404 	CSR_WRITE_FLUSH(sc);
   5405 	delay(250);
   5406 
   5407 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5408 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5409 		return;
   5410 
   5411 	/*
   5412 	 * Mark all descriptors in the RX ring as consumed and disable the
   5413 	 * rx ring.
   5414 	 */
   5415 #ifdef WM_DEBUG
   5416 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5417 #endif
   5418 	rctl = CSR_READ(sc, WMREG_RCTL);
   5419 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5420 	CSR_WRITE_FLUSH(sc);
   5421 	delay(150);
   5422 
   5423 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5424 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5425 	reg &= 0xffffc000;
   5426 	/*
   5427 	 * Update thresholds: prefetch threshold to 31, host threshold
   5428 	 * to 1 and make sure the granularity is "descriptors" and not
   5429 	 * "cache lines"
   5430 	 */
   5431 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5432 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5433 
   5434 	/* Momentarily enable the RX ring for the changes to take effect */
   5435 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5436 	CSR_WRITE_FLUSH(sc);
   5437 	delay(150);
   5438 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5439 }
   5440 
   5441 /*
   5442  * wm_reset:
   5443  *
   5444  *	Reset the i82542 chip.
   5445  */
   5446 static void
   5447 wm_reset(struct wm_softc *sc)
   5448 {
   5449 	int phy_reset = 0;
   5450 	int i, error = 0;
   5451 	uint32_t reg;
   5452 	uint16_t kmreg;
   5453 	int rv;
   5454 
   5455 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5456 		device_xname(sc->sc_dev), __func__));
   5457 	KASSERT(sc->sc_type != 0);
   5458 
   5459 	/*
   5460 	 * Allocate on-chip memory according to the MTU size.
   5461 	 * The Packet Buffer Allocation register must be written
   5462 	 * before the chip is reset.
   5463 	 */
   5464 	switch (sc->sc_type) {
   5465 	case WM_T_82547:
   5466 	case WM_T_82547_2:
   5467 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5468 		    PBA_22K : PBA_30K;
   5469 		for (i = 0; i < sc->sc_nqueues; i++) {
   5470 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5471 			txq->txq_fifo_head = 0;
   5472 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5473 			txq->txq_fifo_size =
   5474 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5475 			txq->txq_fifo_stall = 0;
   5476 		}
   5477 		break;
   5478 	case WM_T_82571:
   5479 	case WM_T_82572:
   5480 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5481 	case WM_T_80003:
   5482 		sc->sc_pba = PBA_32K;
   5483 		break;
   5484 	case WM_T_82573:
   5485 		sc->sc_pba = PBA_12K;
   5486 		break;
   5487 	case WM_T_82574:
   5488 	case WM_T_82583:
   5489 		sc->sc_pba = PBA_20K;
   5490 		break;
   5491 	case WM_T_82576:
   5492 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5493 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5494 		break;
   5495 	case WM_T_82580:
   5496 	case WM_T_I350:
   5497 	case WM_T_I354:
   5498 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5499 		break;
   5500 	case WM_T_I210:
   5501 	case WM_T_I211:
   5502 		sc->sc_pba = PBA_34K;
   5503 		break;
   5504 	case WM_T_ICH8:
   5505 		/* Workaround for a bit corruption issue in FIFO memory */
   5506 		sc->sc_pba = PBA_8K;
   5507 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5508 		break;
   5509 	case WM_T_ICH9:
   5510 	case WM_T_ICH10:
   5511 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5512 		    PBA_14K : PBA_10K;
   5513 		break;
   5514 	case WM_T_PCH:
   5515 	case WM_T_PCH2:	/* XXX 14K? */
   5516 	case WM_T_PCH_LPT:
   5517 	case WM_T_PCH_SPT:
   5518 	case WM_T_PCH_CNP:
   5519 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5520 		    PBA_12K : PBA_26K;
   5521 		break;
   5522 	default:
   5523 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5524 		    PBA_40K : PBA_48K;
   5525 		break;
   5526 	}
   5527 	/*
   5528 	 * Only old or non-multiqueue devices have the PBA register
   5529 	 * XXX Need special handling for 82575.
   5530 	 */
   5531 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5532 	    || (sc->sc_type == WM_T_82575))
   5533 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5534 
   5535 	/* Prevent the PCI-E bus from sticking */
   5536 	if (sc->sc_flags & WM_F_PCIE) {
   5537 		int timeout = 800;
   5538 
   5539 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5540 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5541 
   5542 		while (timeout--) {
   5543 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5544 			    == 0)
   5545 				break;
   5546 			delay(100);
   5547 		}
   5548 		if (timeout == 0)
   5549 			device_printf(sc->sc_dev,
   5550 			    "failed to disable bus mastering\n");
   5551 	}
   5552 
   5553 	/* Set the completion timeout for interface */
   5554 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5555 	    || (sc->sc_type == WM_T_82580)
   5556 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5557 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5558 		wm_set_pcie_completion_timeout(sc);
   5559 
   5560 	/* Clear interrupt */
   5561 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5562 	if (wm_is_using_msix(sc)) {
   5563 		if (sc->sc_type != WM_T_82574) {
   5564 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5565 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5566 		} else
   5567 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5568 	}
   5569 
   5570 	/* Stop the transmit and receive processes. */
   5571 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5572 	sc->sc_rctl &= ~RCTL_EN;
   5573 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5574 	CSR_WRITE_FLUSH(sc);
   5575 
   5576 	/* XXX set_tbi_sbp_82543() */
   5577 
   5578 	delay(10*1000);
   5579 
   5580 	/* Must acquire the MDIO ownership before MAC reset */
   5581 	switch (sc->sc_type) {
   5582 	case WM_T_82573:
   5583 	case WM_T_82574:
   5584 	case WM_T_82583:
   5585 		error = wm_get_hw_semaphore_82573(sc);
   5586 		break;
   5587 	default:
   5588 		break;
   5589 	}
   5590 
   5591 	/*
   5592 	 * 82541 Errata 29? & 82547 Errata 28?
   5593 	 * See also the description about PHY_RST bit in CTRL register
   5594 	 * in 8254x_GBe_SDM.pdf.
   5595 	 */
   5596 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5597 		CSR_WRITE(sc, WMREG_CTRL,
   5598 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5599 		CSR_WRITE_FLUSH(sc);
   5600 		delay(5000);
   5601 	}
   5602 
   5603 	switch (sc->sc_type) {
   5604 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5605 	case WM_T_82541:
   5606 	case WM_T_82541_2:
   5607 	case WM_T_82547:
   5608 	case WM_T_82547_2:
   5609 		/*
   5610 		 * On some chipsets, a reset through a memory-mapped write
   5611 		 * cycle can cause the chip to reset before completing the
   5612 		 * write cycle. This causes major headache that can be avoided
   5613 		 * by issuing the reset via indirect register writes through
   5614 		 * I/O space.
   5615 		 *
   5616 		 * So, if we successfully mapped the I/O BAR at attach time,
   5617 		 * use that. Otherwise, try our luck with a memory-mapped
   5618 		 * reset.
   5619 		 */
   5620 		if (sc->sc_flags & WM_F_IOH_VALID)
   5621 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5622 		else
   5623 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5624 		break;
   5625 	case WM_T_82545_3:
   5626 	case WM_T_82546_3:
   5627 		/* Use the shadow control register on these chips. */
   5628 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5629 		break;
   5630 	case WM_T_80003:
   5631 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5632 		if (sc->phy.acquire(sc) != 0)
   5633 			break;
   5634 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5635 		sc->phy.release(sc);
   5636 		break;
   5637 	case WM_T_ICH8:
   5638 	case WM_T_ICH9:
   5639 	case WM_T_ICH10:
   5640 	case WM_T_PCH:
   5641 	case WM_T_PCH2:
   5642 	case WM_T_PCH_LPT:
   5643 	case WM_T_PCH_SPT:
   5644 	case WM_T_PCH_CNP:
   5645 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5646 		if (wm_phy_resetisblocked(sc) == false) {
   5647 			/*
   5648 			 * Gate automatic PHY configuration by hardware on
   5649 			 * non-managed 82579
   5650 			 */
   5651 			if ((sc->sc_type == WM_T_PCH2)
   5652 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5653 				== 0))
   5654 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5655 
   5656 			reg |= CTRL_PHY_RESET;
   5657 			phy_reset = 1;
   5658 		} else
   5659 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5660 		if (sc->phy.acquire(sc) != 0)
   5661 			break;
   5662 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5663 		/* Don't insert a completion barrier when reset */
   5664 		delay(20*1000);
   5665 		/*
   5666 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
   5667 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
   5668 		 * only. See also wm_get_swflag_ich8lan().
   5669 		 */
   5670 		mutex_exit(sc->sc_ich_phymtx);
   5671 		break;
   5672 	case WM_T_82580:
   5673 	case WM_T_I350:
   5674 	case WM_T_I354:
   5675 	case WM_T_I210:
   5676 	case WM_T_I211:
   5677 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5678 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5679 			CSR_WRITE_FLUSH(sc);
   5680 		delay(5000);
   5681 		break;
   5682 	case WM_T_82542_2_0:
   5683 	case WM_T_82542_2_1:
   5684 	case WM_T_82543:
   5685 	case WM_T_82540:
   5686 	case WM_T_82545:
   5687 	case WM_T_82546:
   5688 	case WM_T_82571:
   5689 	case WM_T_82572:
   5690 	case WM_T_82573:
   5691 	case WM_T_82574:
   5692 	case WM_T_82575:
   5693 	case WM_T_82576:
   5694 	case WM_T_82583:
   5695 	default:
   5696 		/* Everything else can safely use the documented method. */
   5697 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5698 		break;
   5699 	}
   5700 
   5701 	/* Must release the MDIO ownership after MAC reset */
   5702 	switch (sc->sc_type) {
   5703 	case WM_T_82573:
   5704 	case WM_T_82574:
   5705 	case WM_T_82583:
   5706 		if (error == 0)
   5707 			wm_put_hw_semaphore_82573(sc);
   5708 		break;
   5709 	default:
   5710 		break;
   5711 	}
   5712 
   5713 	/* Set Phy Config Counter to 50msec */
   5714 	if (sc->sc_type == WM_T_PCH2) {
   5715 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5716 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5717 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5718 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5719 	}
   5720 
   5721 	if (phy_reset != 0)
   5722 		wm_get_cfg_done(sc);
   5723 
   5724 	/* Reload EEPROM */
   5725 	switch (sc->sc_type) {
   5726 	case WM_T_82542_2_0:
   5727 	case WM_T_82542_2_1:
   5728 	case WM_T_82543:
   5729 	case WM_T_82544:
   5730 		delay(10);
   5731 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5732 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5733 		CSR_WRITE_FLUSH(sc);
   5734 		delay(2000);
   5735 		break;
   5736 	case WM_T_82540:
   5737 	case WM_T_82545:
   5738 	case WM_T_82545_3:
   5739 	case WM_T_82546:
   5740 	case WM_T_82546_3:
   5741 		delay(5*1000);
   5742 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5743 		break;
   5744 	case WM_T_82541:
   5745 	case WM_T_82541_2:
   5746 	case WM_T_82547:
   5747 	case WM_T_82547_2:
   5748 		delay(20000);
   5749 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5750 		break;
   5751 	case WM_T_82571:
   5752 	case WM_T_82572:
   5753 	case WM_T_82573:
   5754 	case WM_T_82574:
   5755 	case WM_T_82583:
   5756 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5757 			delay(10);
   5758 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5759 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5760 			CSR_WRITE_FLUSH(sc);
   5761 		}
   5762 		/* check EECD_EE_AUTORD */
   5763 		wm_get_auto_rd_done(sc);
   5764 		/*
   5765 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5766 		 * is set.
   5767 		 */
   5768 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5769 		    || (sc->sc_type == WM_T_82583))
   5770 			delay(25*1000);
   5771 		break;
   5772 	case WM_T_82575:
   5773 	case WM_T_82576:
   5774 	case WM_T_82580:
   5775 	case WM_T_I350:
   5776 	case WM_T_I354:
   5777 	case WM_T_I210:
   5778 	case WM_T_I211:
   5779 	case WM_T_80003:
   5780 		/* check EECD_EE_AUTORD */
   5781 		wm_get_auto_rd_done(sc);
   5782 		break;
   5783 	case WM_T_ICH8:
   5784 	case WM_T_ICH9:
   5785 	case WM_T_ICH10:
   5786 	case WM_T_PCH:
   5787 	case WM_T_PCH2:
   5788 	case WM_T_PCH_LPT:
   5789 	case WM_T_PCH_SPT:
   5790 	case WM_T_PCH_CNP:
   5791 		break;
   5792 	default:
   5793 		panic("%s: unknown type\n", __func__);
   5794 	}
   5795 
   5796 	/* Check whether EEPROM is present or not */
   5797 	switch (sc->sc_type) {
   5798 	case WM_T_82575:
   5799 	case WM_T_82576:
   5800 	case WM_T_82580:
   5801 	case WM_T_I350:
   5802 	case WM_T_I354:
   5803 	case WM_T_ICH8:
   5804 	case WM_T_ICH9:
   5805 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5806 			/* Not found */
   5807 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5808 			if (sc->sc_type == WM_T_82575)
   5809 				wm_reset_init_script_82575(sc);
   5810 		}
   5811 		break;
   5812 	default:
   5813 		break;
   5814 	}
   5815 
   5816 	if (phy_reset != 0)
   5817 		wm_phy_post_reset(sc);
   5818 
   5819 	if ((sc->sc_type == WM_T_82580)
   5820 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5821 		/* Clear global device reset status bit */
   5822 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5823 	}
   5824 
   5825 	/* Clear any pending interrupt events. */
   5826 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5827 	reg = CSR_READ(sc, WMREG_ICR);
   5828 	if (wm_is_using_msix(sc)) {
   5829 		if (sc->sc_type != WM_T_82574) {
   5830 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5831 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5832 		} else
   5833 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5834 	}
   5835 
   5836 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5837 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5838 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5839 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5840 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5841 		reg |= KABGTXD_BGSQLBIAS;
   5842 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5843 	}
   5844 
   5845 	/* Reload sc_ctrl */
   5846 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5847 
   5848 	wm_set_eee(sc);
   5849 
   5850 	/*
   5851 	 * For PCH, this write will make sure that any noise will be detected
   5852 	 * as a CRC error and be dropped rather than show up as a bad packet
   5853 	 * to the DMA engine
   5854 	 */
   5855 	if (sc->sc_type == WM_T_PCH)
   5856 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5857 
   5858 	if (sc->sc_type >= WM_T_82544)
   5859 		CSR_WRITE(sc, WMREG_WUC, 0);
   5860 
   5861 	if (sc->sc_type < WM_T_82575)
   5862 		wm_disable_aspm(sc); /* Workaround for some chips */
   5863 
   5864 	wm_reset_mdicnfg_82580(sc);
   5865 
   5866 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5867 		wm_pll_workaround_i210(sc);
   5868 
   5869 	if (sc->sc_type == WM_T_80003) {
   5870 		/* Default to TRUE to enable the MDIC W/A */
   5871 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5872 
   5873 		rv = wm_kmrn_readreg(sc,
   5874 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5875 		if (rv == 0) {
   5876 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5877 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5878 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5879 			else
   5880 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5881 		}
   5882 	}
   5883 }
   5884 
   5885 /*
   5886  * wm_add_rxbuf:
   5887  *
   5888  *	Add a receive buffer to the indiciated descriptor.
   5889  */
   5890 static int
   5891 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5892 {
   5893 	struct wm_softc *sc = rxq->rxq_sc;
   5894 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5895 	struct mbuf *m;
   5896 	int error;
   5897 
   5898 	KASSERT(mutex_owned(rxq->rxq_lock));
   5899 
   5900 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5901 	if (m == NULL)
   5902 		return ENOBUFS;
   5903 
   5904 	MCLGET(m, M_DONTWAIT);
   5905 	if ((m->m_flags & M_EXT) == 0) {
   5906 		m_freem(m);
   5907 		return ENOBUFS;
   5908 	}
   5909 
   5910 	if (rxs->rxs_mbuf != NULL)
   5911 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5912 
   5913 	rxs->rxs_mbuf = m;
   5914 
   5915 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5916 	/*
   5917 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5918 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5919 	 */
   5920 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5921 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5922 	if (error) {
   5923 		/* XXX XXX XXX */
   5924 		aprint_error_dev(sc->sc_dev,
   5925 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5926 		panic("wm_add_rxbuf");
   5927 	}
   5928 
   5929 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5930 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5931 
   5932 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5933 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5934 			wm_init_rxdesc(rxq, idx);
   5935 	} else
   5936 		wm_init_rxdesc(rxq, idx);
   5937 
   5938 	return 0;
   5939 }
   5940 
   5941 /*
   5942  * wm_rxdrain:
   5943  *
   5944  *	Drain the receive queue.
   5945  */
   5946 static void
   5947 wm_rxdrain(struct wm_rxqueue *rxq)
   5948 {
   5949 	struct wm_softc *sc = rxq->rxq_sc;
   5950 	struct wm_rxsoft *rxs;
   5951 	int i;
   5952 
   5953 	KASSERT(mutex_owned(rxq->rxq_lock));
   5954 
   5955 	for (i = 0; i < WM_NRXDESC; i++) {
   5956 		rxs = &rxq->rxq_soft[i];
   5957 		if (rxs->rxs_mbuf != NULL) {
   5958 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5959 			m_freem(rxs->rxs_mbuf);
   5960 			rxs->rxs_mbuf = NULL;
   5961 		}
   5962 	}
   5963 }
   5964 
   5965 /*
   5966  * Setup registers for RSS.
   5967  *
   5968  * XXX not yet VMDq support
   5969  */
   5970 static void
   5971 wm_init_rss(struct wm_softc *sc)
   5972 {
   5973 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5974 	int i;
   5975 
   5976 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5977 
   5978 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5979 		unsigned int qid, reta_ent;
   5980 
   5981 		qid  = i % sc->sc_nqueues;
   5982 		switch (sc->sc_type) {
   5983 		case WM_T_82574:
   5984 			reta_ent = __SHIFTIN(qid,
   5985 			    RETA_ENT_QINDEX_MASK_82574);
   5986 			break;
   5987 		case WM_T_82575:
   5988 			reta_ent = __SHIFTIN(qid,
   5989 			    RETA_ENT_QINDEX1_MASK_82575);
   5990 			break;
   5991 		default:
   5992 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5993 			break;
   5994 		}
   5995 
   5996 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5997 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5998 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5999 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   6000 	}
   6001 
   6002 	rss_getkey((uint8_t *)rss_key);
   6003 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   6004 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   6005 
   6006 	if (sc->sc_type == WM_T_82574)
   6007 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   6008 	else
   6009 		mrqc = MRQC_ENABLE_RSS_MQ;
   6010 
   6011 	/*
   6012 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   6013 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   6014 	 */
   6015 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   6016 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   6017 #if 0
   6018 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   6019 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   6020 #endif
   6021 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   6022 
   6023 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   6024 }
   6025 
   6026 /*
   6027  * Adjust TX and RX queue numbers which the system actulally uses.
   6028  *
   6029  * The numbers are affected by below parameters.
   6030  *     - The nubmer of hardware queues
   6031  *     - The number of MSI-X vectors (= "nvectors" argument)
   6032  *     - ncpu
   6033  */
   6034 static void
   6035 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   6036 {
   6037 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   6038 
   6039 	if (nvectors < 2) {
   6040 		sc->sc_nqueues = 1;
   6041 		return;
   6042 	}
   6043 
   6044 	switch (sc->sc_type) {
   6045 	case WM_T_82572:
   6046 		hw_ntxqueues = 2;
   6047 		hw_nrxqueues = 2;
   6048 		break;
   6049 	case WM_T_82574:
   6050 		hw_ntxqueues = 2;
   6051 		hw_nrxqueues = 2;
   6052 		break;
   6053 	case WM_T_82575:
   6054 		hw_ntxqueues = 4;
   6055 		hw_nrxqueues = 4;
   6056 		break;
   6057 	case WM_T_82576:
   6058 		hw_ntxqueues = 16;
   6059 		hw_nrxqueues = 16;
   6060 		break;
   6061 	case WM_T_82580:
   6062 	case WM_T_I350:
   6063 	case WM_T_I354:
   6064 		hw_ntxqueues = 8;
   6065 		hw_nrxqueues = 8;
   6066 		break;
   6067 	case WM_T_I210:
   6068 		hw_ntxqueues = 4;
   6069 		hw_nrxqueues = 4;
   6070 		break;
   6071 	case WM_T_I211:
   6072 		hw_ntxqueues = 2;
   6073 		hw_nrxqueues = 2;
   6074 		break;
   6075 		/*
   6076 		 * The below Ethernet controllers do not support MSI-X;
   6077 		 * this driver doesn't let them use multiqueue.
   6078 		 *     - WM_T_80003
   6079 		 *     - WM_T_ICH8
   6080 		 *     - WM_T_ICH9
   6081 		 *     - WM_T_ICH10
   6082 		 *     - WM_T_PCH
   6083 		 *     - WM_T_PCH2
   6084 		 *     - WM_T_PCH_LPT
   6085 		 */
   6086 	default:
   6087 		hw_ntxqueues = 1;
   6088 		hw_nrxqueues = 1;
   6089 		break;
   6090 	}
   6091 
   6092 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6093 
   6094 	/*
   6095 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6096 	 * the number of queues used actually.
   6097 	 */
   6098 	if (nvectors < hw_nqueues + 1)
   6099 		sc->sc_nqueues = nvectors - 1;
   6100 	else
   6101 		sc->sc_nqueues = hw_nqueues;
   6102 
   6103 	/*
   6104 	 * As queues more than CPUs cannot improve scaling, we limit
   6105 	 * the number of queues used actually.
   6106 	 */
   6107 	if (ncpu < sc->sc_nqueues)
   6108 		sc->sc_nqueues = ncpu;
   6109 }
   6110 
   6111 static inline bool
   6112 wm_is_using_msix(struct wm_softc *sc)
   6113 {
   6114 
   6115 	return (sc->sc_nintrs > 1);
   6116 }
   6117 
   6118 static inline bool
   6119 wm_is_using_multiqueue(struct wm_softc *sc)
   6120 {
   6121 
   6122 	return (sc->sc_nqueues > 1);
   6123 }
   6124 
   6125 static int
   6126 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6127 {
   6128 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6129 
   6130 	wmq->wmq_id = qidx;
   6131 	wmq->wmq_intr_idx = intr_idx;
   6132 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6133 	    wm_handle_queue, wmq);
   6134 	if (wmq->wmq_si != NULL)
   6135 		return 0;
   6136 
   6137 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6138 	    wmq->wmq_id);
   6139 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6140 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6141 	return ENOMEM;
   6142 }
   6143 
   6144 /*
   6145  * Both single interrupt MSI and INTx can use this function.
   6146  */
   6147 static int
   6148 wm_setup_legacy(struct wm_softc *sc)
   6149 {
   6150 	pci_chipset_tag_t pc = sc->sc_pc;
   6151 	const char *intrstr = NULL;
   6152 	char intrbuf[PCI_INTRSTR_LEN];
   6153 	int error;
   6154 
   6155 	error = wm_alloc_txrx_queues(sc);
   6156 	if (error) {
   6157 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6158 		    error);
   6159 		return ENOMEM;
   6160 	}
   6161 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6162 	    sizeof(intrbuf));
   6163 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6164 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6165 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6166 	if (sc->sc_ihs[0] == NULL) {
   6167 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6168 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6169 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6170 		return ENOMEM;
   6171 	}
   6172 
   6173 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6174 	sc->sc_nintrs = 1;
   6175 
   6176 	return wm_softint_establish_queue(sc, 0, 0);
   6177 }
   6178 
   6179 static int
   6180 wm_setup_msix(struct wm_softc *sc)
   6181 {
   6182 	void *vih;
   6183 	kcpuset_t *affinity;
   6184 	int qidx, error, intr_idx, txrx_established;
   6185 	pci_chipset_tag_t pc = sc->sc_pc;
   6186 	const char *intrstr = NULL;
   6187 	char intrbuf[PCI_INTRSTR_LEN];
   6188 	char intr_xname[INTRDEVNAMEBUF];
   6189 
   6190 	if (sc->sc_nqueues < ncpu) {
   6191 		/*
   6192 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6193 		 * interrupts start from CPU#1.
   6194 		 */
   6195 		sc->sc_affinity_offset = 1;
   6196 	} else {
   6197 		/*
   6198 		 * In this case, this device use all CPUs. So, we unify
   6199 		 * affinitied cpu_index to msix vector number for readability.
   6200 		 */
   6201 		sc->sc_affinity_offset = 0;
   6202 	}
   6203 
   6204 	error = wm_alloc_txrx_queues(sc);
   6205 	if (error) {
   6206 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6207 		    error);
   6208 		return ENOMEM;
   6209 	}
   6210 
   6211 	kcpuset_create(&affinity, false);
   6212 	intr_idx = 0;
   6213 
   6214 	/*
   6215 	 * TX and RX
   6216 	 */
   6217 	txrx_established = 0;
   6218 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6219 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6220 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6221 
   6222 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6223 		    sizeof(intrbuf));
   6224 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6225 		    PCI_INTR_MPSAFE, true);
   6226 		memset(intr_xname, 0, sizeof(intr_xname));
   6227 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6228 		    device_xname(sc->sc_dev), qidx);
   6229 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6230 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6231 		if (vih == NULL) {
   6232 			aprint_error_dev(sc->sc_dev,
   6233 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6234 			    intrstr ? " at " : "",
   6235 			    intrstr ? intrstr : "");
   6236 
   6237 			goto fail;
   6238 		}
   6239 		kcpuset_zero(affinity);
   6240 		/* Round-robin affinity */
   6241 		kcpuset_set(affinity, affinity_to);
   6242 		error = interrupt_distribute(vih, affinity, NULL);
   6243 		if (error == 0) {
   6244 			aprint_normal_dev(sc->sc_dev,
   6245 			    "for TX and RX interrupting at %s affinity to %u\n",
   6246 			    intrstr, affinity_to);
   6247 		} else {
   6248 			aprint_normal_dev(sc->sc_dev,
   6249 			    "for TX and RX interrupting at %s\n", intrstr);
   6250 		}
   6251 		sc->sc_ihs[intr_idx] = vih;
   6252 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6253 			goto fail;
   6254 		txrx_established++;
   6255 		intr_idx++;
   6256 	}
   6257 
   6258 	/* LINK */
   6259 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6260 	    sizeof(intrbuf));
   6261 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6262 	memset(intr_xname, 0, sizeof(intr_xname));
   6263 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6264 	    device_xname(sc->sc_dev));
   6265 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6266 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6267 	if (vih == NULL) {
   6268 		aprint_error_dev(sc->sc_dev,
   6269 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6270 		    intrstr ? " at " : "",
   6271 		    intrstr ? intrstr : "");
   6272 
   6273 		goto fail;
   6274 	}
   6275 	/* Keep default affinity to LINK interrupt */
   6276 	aprint_normal_dev(sc->sc_dev,
   6277 	    "for LINK interrupting at %s\n", intrstr);
   6278 	sc->sc_ihs[intr_idx] = vih;
   6279 	sc->sc_link_intr_idx = intr_idx;
   6280 
   6281 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6282 	kcpuset_destroy(affinity);
   6283 	return 0;
   6284 
   6285 fail:
   6286 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6287 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6288 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6289 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6290 	}
   6291 
   6292 	kcpuset_destroy(affinity);
   6293 	return ENOMEM;
   6294 }
   6295 
   6296 static void
   6297 wm_unset_stopping_flags(struct wm_softc *sc)
   6298 {
   6299 	int i;
   6300 
   6301 	KASSERT(mutex_owned(sc->sc_core_lock));
   6302 
   6303 	/* Must unset stopping flags in ascending order. */
   6304 	for (i = 0; i < sc->sc_nqueues; i++) {
   6305 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6306 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6307 
   6308 		mutex_enter(txq->txq_lock);
   6309 		txq->txq_stopping = false;
   6310 		mutex_exit(txq->txq_lock);
   6311 
   6312 		mutex_enter(rxq->rxq_lock);
   6313 		rxq->rxq_stopping = false;
   6314 		mutex_exit(rxq->rxq_lock);
   6315 	}
   6316 
   6317 	sc->sc_core_stopping = false;
   6318 }
   6319 
   6320 static void
   6321 wm_set_stopping_flags(struct wm_softc *sc)
   6322 {
   6323 	int i;
   6324 
   6325 	KASSERT(mutex_owned(sc->sc_core_lock));
   6326 
   6327 	sc->sc_core_stopping = true;
   6328 
   6329 	/* Must set stopping flags in ascending order. */
   6330 	for (i = 0; i < sc->sc_nqueues; i++) {
   6331 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6332 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6333 
   6334 		mutex_enter(rxq->rxq_lock);
   6335 		rxq->rxq_stopping = true;
   6336 		mutex_exit(rxq->rxq_lock);
   6337 
   6338 		mutex_enter(txq->txq_lock);
   6339 		txq->txq_stopping = true;
   6340 		mutex_exit(txq->txq_lock);
   6341 	}
   6342 }
   6343 
   6344 /*
   6345  * Write interrupt interval value to ITR or EITR
   6346  */
   6347 static void
   6348 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6349 {
   6350 
   6351 	if (!wmq->wmq_set_itr)
   6352 		return;
   6353 
   6354 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6355 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6356 
   6357 		/*
   6358 		 * 82575 doesn't have CNT_INGR field.
   6359 		 * So, overwrite counter field by software.
   6360 		 */
   6361 		if (sc->sc_type == WM_T_82575)
   6362 			eitr |= __SHIFTIN(wmq->wmq_itr,
   6363 			    EITR_COUNTER_MASK_82575);
   6364 		else
   6365 			eitr |= EITR_CNT_INGR;
   6366 
   6367 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6368 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6369 		/*
   6370 		 * 82574 has both ITR and EITR. SET EITR when we use
   6371 		 * the multi queue function with MSI-X.
   6372 		 */
   6373 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6374 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6375 	} else {
   6376 		KASSERT(wmq->wmq_id == 0);
   6377 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6378 	}
   6379 
   6380 	wmq->wmq_set_itr = false;
   6381 }
   6382 
   6383 /*
   6384  * TODO
   6385  * Below dynamic calculation of itr is almost the same as Linux igb,
   6386  * however it does not fit to wm(4). So, we will have been disable AIM
   6387  * until we will find appropriate calculation of itr.
   6388  */
   6389 /*
   6390  * Calculate interrupt interval value to be going to write register in
   6391  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6392  */
   6393 static void
   6394 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6395 {
   6396 #ifdef NOTYET
   6397 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6398 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6399 	uint32_t avg_size = 0;
   6400 	uint32_t new_itr;
   6401 
   6402 	if (rxq->rxq_packets)
   6403 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6404 	if (txq->txq_packets)
   6405 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6406 
   6407 	if (avg_size == 0) {
   6408 		new_itr = 450; /* restore default value */
   6409 		goto out;
   6410 	}
   6411 
   6412 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6413 	avg_size += 24;
   6414 
   6415 	/* Don't starve jumbo frames */
   6416 	avg_size = uimin(avg_size, 3000);
   6417 
   6418 	/* Give a little boost to mid-size frames */
   6419 	if ((avg_size > 300) && (avg_size < 1200))
   6420 		new_itr = avg_size / 3;
   6421 	else
   6422 		new_itr = avg_size / 2;
   6423 
   6424 out:
   6425 	/*
   6426 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6427 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6428 	 */
   6429 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6430 		new_itr *= 4;
   6431 
   6432 	if (new_itr != wmq->wmq_itr) {
   6433 		wmq->wmq_itr = new_itr;
   6434 		wmq->wmq_set_itr = true;
   6435 	} else
   6436 		wmq->wmq_set_itr = false;
   6437 
   6438 	rxq->rxq_packets = 0;
   6439 	rxq->rxq_bytes = 0;
   6440 	txq->txq_packets = 0;
   6441 	txq->txq_bytes = 0;
   6442 #endif
   6443 }
   6444 
   6445 static void
   6446 wm_init_sysctls(struct wm_softc *sc)
   6447 {
   6448 	struct sysctllog **log;
   6449 	const struct sysctlnode *rnode, *qnode, *cnode;
   6450 	int i, rv;
   6451 	const char *dvname;
   6452 
   6453 	log = &sc->sc_sysctllog;
   6454 	dvname = device_xname(sc->sc_dev);
   6455 
   6456 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6457 	    0, CTLTYPE_NODE, dvname,
   6458 	    SYSCTL_DESCR("wm information and settings"),
   6459 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6460 	if (rv != 0)
   6461 		goto err;
   6462 
   6463 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6464 	    CTLTYPE_BOOL, "txrx_workqueue",
   6465 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   6466 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6467 	if (rv != 0)
   6468 		goto teardown;
   6469 
   6470 	for (i = 0; i < sc->sc_nqueues; i++) {
   6471 		struct wm_queue *wmq = &sc->sc_queue[i];
   6472 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6473 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6474 
   6475 		snprintf(sc->sc_queue[i].sysctlname,
   6476 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6477 
   6478 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6479 		    0, CTLTYPE_NODE,
   6480 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6481 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6482 			break;
   6483 
   6484 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6485 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6486 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6487 		    NULL, 0, &txq->txq_free,
   6488 		    0, CTL_CREATE, CTL_EOL) != 0)
   6489 			break;
   6490 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6491 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6492 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6493 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6494 		    0, CTL_CREATE, CTL_EOL) != 0)
   6495 			break;
   6496 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6497 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6498 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6499 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6500 		    0, CTL_CREATE, CTL_EOL) != 0)
   6501 			break;
   6502 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6503 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6504 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6505 		    NULL, 0, &txq->txq_next,
   6506 		    0, CTL_CREATE, CTL_EOL) != 0)
   6507 			break;
   6508 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6509 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6510 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6511 		    NULL, 0, &txq->txq_sfree,
   6512 		    0, CTL_CREATE, CTL_EOL) != 0)
   6513 			break;
   6514 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6515 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6516 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6517 		    NULL, 0, &txq->txq_snext,
   6518 		    0, CTL_CREATE, CTL_EOL) != 0)
   6519 			break;
   6520 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6521 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6522 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6523 		    NULL, 0, &txq->txq_sdirty,
   6524 		    0, CTL_CREATE, CTL_EOL) != 0)
   6525 			break;
   6526 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6527 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6528 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6529 		    NULL, 0, &txq->txq_flags,
   6530 		    0, CTL_CREATE, CTL_EOL) != 0)
   6531 			break;
   6532 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6533 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6534 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6535 		    NULL, 0, &txq->txq_stopping,
   6536 		    0, CTL_CREATE, CTL_EOL) != 0)
   6537 			break;
   6538 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6539 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6540 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6541 		    NULL, 0, &txq->txq_sending,
   6542 		    0, CTL_CREATE, CTL_EOL) != 0)
   6543 			break;
   6544 
   6545 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6546 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6547 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6548 		    NULL, 0, &rxq->rxq_ptr,
   6549 		    0, CTL_CREATE, CTL_EOL) != 0)
   6550 			break;
   6551 	}
   6552 
   6553 #ifdef WM_DEBUG
   6554 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6555 	    CTLTYPE_INT, "debug_flags",
   6556 	    SYSCTL_DESCR(
   6557 		    "Debug flags:\n"	\
   6558 		    "\t0x01 LINK\n"	\
   6559 		    "\t0x02 TX\n"	\
   6560 		    "\t0x04 RX\n"	\
   6561 		    "\t0x08 GMII\n"	\
   6562 		    "\t0x10 MANAGE\n"	\
   6563 		    "\t0x20 NVM\n"	\
   6564 		    "\t0x40 INIT\n"	\
   6565 		    "\t0x80 LOCK"),
   6566 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6567 	if (rv != 0)
   6568 		goto teardown;
   6569 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6570 	    CTLTYPE_BOOL, "trigger_reset",
   6571 	    SYSCTL_DESCR("Trigger an interface reset"),
   6572 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6573 	if (rv != 0)
   6574 		goto teardown;
   6575 #endif
   6576 
   6577 	return;
   6578 
   6579 teardown:
   6580 	sysctl_teardown(log);
   6581 err:
   6582 	sc->sc_sysctllog = NULL;
   6583 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6584 	    __func__, rv);
   6585 }
   6586 
   6587 static void
   6588 wm_update_stats(struct wm_softc *sc)
   6589 {
   6590 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6591 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   6592 	    cexterr;
   6593 
   6594 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   6595 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   6596 	mpc = CSR_READ(sc, WMREG_MPC);
   6597 	colc = CSR_READ(sc, WMREG_COLC);
   6598 	sec = CSR_READ(sc, WMREG_SEC);
   6599 	rlec = CSR_READ(sc, WMREG_RLEC);
   6600 
   6601 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   6602 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   6603 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   6604 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   6605 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   6606 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   6607 
   6608 	if (sc->sc_type >= WM_T_82543) {
   6609 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   6610 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   6611 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   6612 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   6613 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
   6614 			cexterr = CSR_READ(sc, WMREG_CEXTERR);
   6615 			WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   6616 		} else {
   6617 			cexterr = 0;
   6618 			/* Excessive collision + Link down */
   6619 			WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
   6620 			    CSR_READ(sc, WMREG_HTDPMC));
   6621 		}
   6622 
   6623 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   6624 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   6625 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6626 			WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
   6627 			    CSR_READ(sc, WMREG_TSCTFC));
   6628 		else {
   6629 			WM_EVCNT_ADD(&sc->sc_ev_cbrdpc,
   6630 			    CSR_READ(sc, WMREG_CBRDPC));
   6631 			WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
   6632 			    CSR_READ(sc, WMREG_CBRMPC));
   6633 		}
   6634 	} else
   6635 		algnerrc = rxerrc = cexterr = 0;
   6636 
   6637 	if (sc->sc_type >= WM_T_82542_2_1) {
   6638 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   6639 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   6640 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   6641 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   6642 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   6643 	}
   6644 
   6645 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   6646 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   6647 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   6648 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   6649 
   6650 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6651 		WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
   6652 	}
   6653 
   6654 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   6655 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   6656 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   6657 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   6658 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   6659 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   6660 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   6661 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   6662 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   6663 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   6664 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   6665 
   6666 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   6667 	    CSR_READ(sc, WMREG_GORCL) +
   6668 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
   6669 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   6670 	    CSR_READ(sc, WMREG_GOTCL) +
   6671 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
   6672 
   6673 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   6674 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   6675 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   6676 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   6677 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   6678 
   6679 	if (sc->sc_type >= WM_T_82540) {
   6680 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   6681 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   6682 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   6683 	}
   6684 
   6685 	/*
   6686 	 * The TOR(L) register includes:
   6687 	 *  - Error
   6688 	 *  - Flow control
   6689 	 *  - Broadcast rejected (This note is described in 82574 and newer
   6690 	 *    datasheets. What does "broadcast rejected" mean?)
   6691 	 */
   6692 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   6693 	    CSR_READ(sc, WMREG_TORL) +
   6694 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
   6695 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   6696 	    CSR_READ(sc, WMREG_TOTL) +
   6697 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
   6698 
   6699 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   6700 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   6701 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   6702 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   6703 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   6704 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   6705 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   6706 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   6707 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   6708 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   6709 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   6710 	if (sc->sc_type < WM_T_82575) {
   6711 		WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   6712 		WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   6713 		WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   6714 		WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC));
   6715 		WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   6716 		WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
   6717 		    CSR_READ(sc, WMREG_ICTXQMTC));
   6718 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
   6719 		    CSR_READ(sc, WMREG_ICRXDMTC));
   6720 		WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   6721 	} else if (!WM_IS_ICHPCH(sc)) {
   6722 		WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
   6723 		WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
   6724 		WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
   6725 		WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
   6726 		WM_EVCNT_ADD(&sc->sc_ev_hgptc,  CSR_READ(sc, WMREG_HGPTC));
   6727 		WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
   6728 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
   6729 		WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
   6730 
   6731 		WM_EVCNT_ADD(&sc->sc_ev_hgorc,
   6732 		    CSR_READ(sc, WMREG_HGORCL) +
   6733 		    ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
   6734 		WM_EVCNT_ADD(&sc->sc_ev_hgotc,
   6735 		    CSR_READ(sc, WMREG_HGOTCL) +
   6736 		    ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
   6737 		WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
   6738 		WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
   6739 		WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
   6740 	}
   6741 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6742 		WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
   6743 		WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
   6744 		if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
   6745 			WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
   6746 			    CSR_READ(sc, WMREG_B2OGPRC));
   6747 			WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
   6748 			    CSR_READ(sc, WMREG_O2BSPC));
   6749 			WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
   6750 			    CSR_READ(sc, WMREG_B2OSPC));
   6751 			WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
   6752 			    CSR_READ(sc, WMREG_O2BGPTC));
   6753 		}
   6754 	}
   6755 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   6756 	if_statadd_ref(nsr, if_collisions, colc);
   6757 	if_statadd_ref(nsr, if_ierrors,
   6758 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   6759 	/*
   6760 	 * WMREG_RNBC is incremented when there are no available buffers in
   6761 	 * host memory. It does not mean the number of dropped packets, because
   6762 	 * an Ethernet controller can receive packets in such case if there is
   6763 	 * space in the phy's FIFO.
   6764 	 *
   6765 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   6766 	 * own EVCNT instead of if_iqdrops.
   6767 	 */
   6768 	if_statadd_ref(nsr, if_iqdrops, mpc);
   6769 	IF_STAT_PUTREF(ifp);
   6770 }
   6771 
   6772 void
   6773 wm_clear_evcnt(struct wm_softc *sc)
   6774 {
   6775 #ifdef WM_EVENT_COUNTERS
   6776 	int i;
   6777 
   6778 	/* RX queues */
   6779 	for (i = 0; i < sc->sc_nqueues; i++) {
   6780 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6781 
   6782 		WM_Q_EVCNT_STORE(rxq, intr, 0);
   6783 		WM_Q_EVCNT_STORE(rxq, defer, 0);
   6784 		WM_Q_EVCNT_STORE(rxq, ipsum, 0);
   6785 		WM_Q_EVCNT_STORE(rxq, tusum, 0);
   6786 	}
   6787 
   6788 	/* TX queues */
   6789 	for (i = 0; i < sc->sc_nqueues; i++) {
   6790 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6791 		int j;
   6792 
   6793 		WM_Q_EVCNT_STORE(txq, txsstall, 0);
   6794 		WM_Q_EVCNT_STORE(txq, txdstall, 0);
   6795 		WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
   6796 		WM_Q_EVCNT_STORE(txq, txdw, 0);
   6797 		WM_Q_EVCNT_STORE(txq, txqe, 0);
   6798 		WM_Q_EVCNT_STORE(txq, ipsum, 0);
   6799 		WM_Q_EVCNT_STORE(txq, tusum, 0);
   6800 		WM_Q_EVCNT_STORE(txq, tusum6, 0);
   6801 		WM_Q_EVCNT_STORE(txq, tso, 0);
   6802 		WM_Q_EVCNT_STORE(txq, tso6, 0);
   6803 		WM_Q_EVCNT_STORE(txq, tsopain, 0);
   6804 
   6805 		for (j = 0; j < WM_NTXSEGS; j++)
   6806 			WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
   6807 
   6808 		WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
   6809 		WM_Q_EVCNT_STORE(txq, descdrop, 0);
   6810 		WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
   6811 		WM_Q_EVCNT_STORE(txq, defrag, 0);
   6812 		if (sc->sc_type <= WM_T_82544)
   6813 			WM_Q_EVCNT_STORE(txq, underrun, 0);
   6814 		WM_Q_EVCNT_STORE(txq, skipcontext, 0);
   6815 	}
   6816 
   6817 	/* Miscs */
   6818 	WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0);
   6819 
   6820 	WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0);
   6821 	WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0);
   6822 	WM_EVCNT_STORE(&sc->sc_ev_mpc, 0);
   6823 	WM_EVCNT_STORE(&sc->sc_ev_colc, 0);
   6824 	WM_EVCNT_STORE(&sc->sc_ev_sec, 0);
   6825 	WM_EVCNT_STORE(&sc->sc_ev_rlec, 0);
   6826 
   6827 	if (sc->sc_type >= WM_T_82543) {
   6828 		WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0);
   6829 		WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0);
   6830 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6831 			WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0);
   6832 		else
   6833 			WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0);
   6834 
   6835 		WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0);
   6836 		WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0);
   6837 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6838 			WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0);
   6839 		else {
   6840 			WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0);
   6841 			WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0);
   6842 		}
   6843 	}
   6844 
   6845 	if (sc->sc_type >= WM_T_82542_2_1) {
   6846 		WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0);
   6847 		WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0);
   6848 		WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0);
   6849 		WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0);
   6850 		WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0);
   6851 	}
   6852 
   6853 	WM_EVCNT_STORE(&sc->sc_ev_scc, 0);
   6854 	WM_EVCNT_STORE(&sc->sc_ev_ecol, 0);
   6855 	WM_EVCNT_STORE(&sc->sc_ev_mcc, 0);
   6856 	WM_EVCNT_STORE(&sc->sc_ev_latecol, 0);
   6857 
   6858 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   6859 		WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0);
   6860 
   6861 	WM_EVCNT_STORE(&sc->sc_ev_dc, 0);
   6862 	WM_EVCNT_STORE(&sc->sc_ev_prc64, 0);
   6863 	WM_EVCNT_STORE(&sc->sc_ev_prc127, 0);
   6864 	WM_EVCNT_STORE(&sc->sc_ev_prc255, 0);
   6865 	WM_EVCNT_STORE(&sc->sc_ev_prc511, 0);
   6866 	WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0);
   6867 	WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0);
   6868 	WM_EVCNT_STORE(&sc->sc_ev_gprc, 0);
   6869 	WM_EVCNT_STORE(&sc->sc_ev_bprc, 0);
   6870 	WM_EVCNT_STORE(&sc->sc_ev_mprc, 0);
   6871 	WM_EVCNT_STORE(&sc->sc_ev_gptc, 0);
   6872 	WM_EVCNT_STORE(&sc->sc_ev_gorc, 0);
   6873 	WM_EVCNT_STORE(&sc->sc_ev_gotc, 0);
   6874 	WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0);
   6875 	WM_EVCNT_STORE(&sc->sc_ev_ruc, 0);
   6876 	WM_EVCNT_STORE(&sc->sc_ev_rfc, 0);
   6877 	WM_EVCNT_STORE(&sc->sc_ev_roc, 0);
   6878 	WM_EVCNT_STORE(&sc->sc_ev_rjc, 0);
   6879 	if (sc->sc_type >= WM_T_82540) {
   6880 		WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0);
   6881 		WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0);
   6882 		WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0);
   6883 	}
   6884 	WM_EVCNT_STORE(&sc->sc_ev_tor, 0);
   6885 	WM_EVCNT_STORE(&sc->sc_ev_tot, 0);
   6886 	WM_EVCNT_STORE(&sc->sc_ev_tpr, 0);
   6887 	WM_EVCNT_STORE(&sc->sc_ev_tpt, 0);
   6888 	WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0);
   6889 	WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0);
   6890 	WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0);
   6891 	WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0);
   6892 	WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0);
   6893 	WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0);
   6894 	WM_EVCNT_STORE(&sc->sc_ev_mptc, 0);
   6895 	WM_EVCNT_STORE(&sc->sc_ev_bptc, 0);
   6896 	WM_EVCNT_STORE(&sc->sc_ev_iac, 0);
   6897 	if (sc->sc_type < WM_T_82575) {
   6898 		WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0);
   6899 		WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0);
   6900 		WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0);
   6901 		WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0);
   6902 		WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0);
   6903 		WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0);
   6904 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6905 		WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0);
   6906 	} else if (!WM_IS_ICHPCH(sc)) {
   6907 		WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0);
   6908 		WM_EVCNT_STORE(&sc->sc_ev_debug1, 0);
   6909 		WM_EVCNT_STORE(&sc->sc_ev_debug2, 0);
   6910 		WM_EVCNT_STORE(&sc->sc_ev_debug3, 0);
   6911 		WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0);
   6912 		WM_EVCNT_STORE(&sc->sc_ev_debug4, 0);
   6913 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6914 		WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0);
   6915 
   6916 		WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0);
   6917 		WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0);
   6918 		WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0);
   6919 		WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0);
   6920 		WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0);
   6921 	}
   6922 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6923 		WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0);
   6924 		WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0);
   6925 		WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0);
   6926 		WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0);
   6927 		WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0);
   6928 		WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0);
   6929 	}
   6930 #endif
   6931 }
   6932 
   6933 /*
   6934  * wm_init:		[ifnet interface function]
   6935  *
   6936  *	Initialize the interface.
   6937  */
   6938 static int
   6939 wm_init(struct ifnet *ifp)
   6940 {
   6941 	struct wm_softc *sc = ifp->if_softc;
   6942 	int ret;
   6943 
   6944 	KASSERT(IFNET_LOCKED(ifp));
   6945 
   6946 	if (sc->sc_dying)
   6947 		return ENXIO;
   6948 
   6949 	mutex_enter(sc->sc_core_lock);
   6950 	ret = wm_init_locked(ifp);
   6951 	mutex_exit(sc->sc_core_lock);
   6952 
   6953 	return ret;
   6954 }
   6955 
   6956 static int
   6957 wm_init_locked(struct ifnet *ifp)
   6958 {
   6959 	struct wm_softc *sc = ifp->if_softc;
   6960 	struct ethercom *ec = &sc->sc_ethercom;
   6961 	int i, j, trynum, error = 0;
   6962 	uint32_t reg, sfp_mask = 0;
   6963 
   6964 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6965 		device_xname(sc->sc_dev), __func__));
   6966 	KASSERT(IFNET_LOCKED(ifp));
   6967 	KASSERT(mutex_owned(sc->sc_core_lock));
   6968 
   6969 	/*
   6970 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6971 	 * There is a small but measurable benefit to avoiding the adjusment
   6972 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6973 	 * on such platforms.  One possibility is that the DMA itself is
   6974 	 * slightly more efficient if the front of the entire packet (instead
   6975 	 * of the front of the headers) is aligned.
   6976 	 *
   6977 	 * Note we must always set align_tweak to 0 if we are using
   6978 	 * jumbo frames.
   6979 	 */
   6980 #ifdef __NO_STRICT_ALIGNMENT
   6981 	sc->sc_align_tweak = 0;
   6982 #else
   6983 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6984 		sc->sc_align_tweak = 0;
   6985 	else
   6986 		sc->sc_align_tweak = 2;
   6987 #endif /* __NO_STRICT_ALIGNMENT */
   6988 
   6989 	/* Cancel any pending I/O. */
   6990 	wm_stop_locked(ifp, false, false);
   6991 
   6992 	/* Update statistics before reset */
   6993 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6994 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6995 
   6996 	/* >= PCH_SPT hardware workaround before reset. */
   6997 	if (sc->sc_type >= WM_T_PCH_SPT)
   6998 		wm_flush_desc_rings(sc);
   6999 
   7000 	/* Reset the chip to a known state. */
   7001 	wm_reset(sc);
   7002 
   7003 	/*
   7004 	 * AMT based hardware can now take control from firmware
   7005 	 * Do this after reset.
   7006 	 */
   7007 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   7008 		wm_get_hw_control(sc);
   7009 
   7010 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   7011 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   7012 		wm_legacy_irq_quirk_spt(sc);
   7013 
   7014 	/* Init hardware bits */
   7015 	wm_initialize_hardware_bits(sc);
   7016 
   7017 	/* Reset the PHY. */
   7018 	if (sc->sc_flags & WM_F_HAS_MII)
   7019 		wm_gmii_reset(sc);
   7020 
   7021 	if (sc->sc_type >= WM_T_ICH8) {
   7022 		reg = CSR_READ(sc, WMREG_GCR);
   7023 		/*
   7024 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   7025 		 * default after reset.
   7026 		 */
   7027 		if (sc->sc_type == WM_T_ICH8)
   7028 			reg |= GCR_NO_SNOOP_ALL;
   7029 		else
   7030 			reg &= ~GCR_NO_SNOOP_ALL;
   7031 		CSR_WRITE(sc, WMREG_GCR, reg);
   7032 	}
   7033 
   7034 	if ((sc->sc_type >= WM_T_ICH8)
   7035 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   7036 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   7037 
   7038 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7039 		reg |= CTRL_EXT_RO_DIS;
   7040 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7041 	}
   7042 
   7043 	/* Calculate (E)ITR value */
   7044 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   7045 		/*
   7046 		 * For NEWQUEUE's EITR (except for 82575).
   7047 		 * 82575's EITR should be set same throttling value as other
   7048 		 * old controllers' ITR because the interrupt/sec calculation
   7049 		 * is the same, that is, 1,000,000,000 / (N * 256).
   7050 		 *
   7051 		 * 82574's EITR should be set same throttling value as ITR.
   7052 		 *
   7053 		 * For N interrupts/sec, set this value to:
   7054 		 * 1,000,000 / N in contrast to ITR throttling value.
   7055 		 */
   7056 		sc->sc_itr_init = 450;
   7057 	} else if (sc->sc_type >= WM_T_82543) {
   7058 		/*
   7059 		 * Set up the interrupt throttling register (units of 256ns)
   7060 		 * Note that a footnote in Intel's documentation says this
   7061 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   7062 		 * or 10Mbit mode.  Empirically, it appears to be the case
   7063 		 * that that is also true for the 1024ns units of the other
   7064 		 * interrupt-related timer registers -- so, really, we ought
   7065 		 * to divide this value by 4 when the link speed is low.
   7066 		 *
   7067 		 * XXX implement this division at link speed change!
   7068 		 */
   7069 
   7070 		/*
   7071 		 * For N interrupts/sec, set this value to:
   7072 		 * 1,000,000,000 / (N * 256).  Note that we set the
   7073 		 * absolute and packet timer values to this value
   7074 		 * divided by 4 to get "simple timer" behavior.
   7075 		 */
   7076 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   7077 	}
   7078 
   7079 	error = wm_init_txrx_queues(sc);
   7080 	if (error)
   7081 		goto out;
   7082 
   7083 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   7084 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   7085 	    (sc->sc_type >= WM_T_82575))
   7086 		wm_serdes_power_up_link_82575(sc);
   7087 
   7088 	/* Clear out the VLAN table -- we don't use it (yet). */
   7089 	CSR_WRITE(sc, WMREG_VET, 0);
   7090 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   7091 		trynum = 10; /* Due to hw errata */
   7092 	else
   7093 		trynum = 1;
   7094 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   7095 		for (j = 0; j < trynum; j++)
   7096 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   7097 
   7098 	/*
   7099 	 * Set up flow-control parameters.
   7100 	 *
   7101 	 * XXX Values could probably stand some tuning.
   7102 	 */
   7103 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   7104 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   7105 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   7106 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   7107 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   7108 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   7109 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   7110 	}
   7111 
   7112 	sc->sc_fcrtl = FCRTL_DFLT;
   7113 	if (sc->sc_type < WM_T_82543) {
   7114 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   7115 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   7116 	} else {
   7117 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   7118 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   7119 	}
   7120 
   7121 	if (sc->sc_type == WM_T_80003)
   7122 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   7123 	else
   7124 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   7125 
   7126 	/* Writes the control register. */
   7127 	wm_set_vlan(sc);
   7128 
   7129 	if (sc->sc_flags & WM_F_HAS_MII) {
   7130 		uint16_t kmreg;
   7131 
   7132 		switch (sc->sc_type) {
   7133 		case WM_T_80003:
   7134 		case WM_T_ICH8:
   7135 		case WM_T_ICH9:
   7136 		case WM_T_ICH10:
   7137 		case WM_T_PCH:
   7138 		case WM_T_PCH2:
   7139 		case WM_T_PCH_LPT:
   7140 		case WM_T_PCH_SPT:
   7141 		case WM_T_PCH_CNP:
   7142 			/*
   7143 			 * Set the mac to wait the maximum time between each
   7144 			 * iteration and increase the max iterations when
   7145 			 * polling the phy; this fixes erroneous timeouts at
   7146 			 * 10Mbps.
   7147 			 */
   7148 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   7149 			    0xFFFF);
   7150 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7151 			    &kmreg);
   7152 			kmreg |= 0x3F;
   7153 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7154 			    kmreg);
   7155 			break;
   7156 		default:
   7157 			break;
   7158 		}
   7159 
   7160 		if (sc->sc_type == WM_T_80003) {
   7161 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7162 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   7163 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7164 
   7165 			/* Bypass RX and TX FIFOs */
   7166 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   7167 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   7168 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   7169 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   7170 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   7171 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   7172 		}
   7173 	}
   7174 #if 0
   7175 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   7176 #endif
   7177 
   7178 	/* Set up checksum offload parameters. */
   7179 	reg = CSR_READ(sc, WMREG_RXCSUM);
   7180 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   7181 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   7182 		reg |= RXCSUM_IPOFL;
   7183 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   7184 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   7185 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   7186 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   7187 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7188 
   7189 	/* Set registers about MSI-X */
   7190 	if (wm_is_using_msix(sc)) {
   7191 		uint32_t ivar, qintr_idx;
   7192 		struct wm_queue *wmq;
   7193 		unsigned int qid;
   7194 
   7195 		if (sc->sc_type == WM_T_82575) {
   7196 			/* Interrupt control */
   7197 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7198 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   7199 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7200 
   7201 			/* TX and RX */
   7202 			for (i = 0; i < sc->sc_nqueues; i++) {
   7203 				wmq = &sc->sc_queue[i];
   7204 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   7205 				    EITR_TX_QUEUE(wmq->wmq_id)
   7206 				    | EITR_RX_QUEUE(wmq->wmq_id));
   7207 			}
   7208 			/* Link status */
   7209 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   7210 			    EITR_OTHER);
   7211 		} else if (sc->sc_type == WM_T_82574) {
   7212 			/* Interrupt control */
   7213 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7214 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   7215 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7216 
   7217 			/*
   7218 			 * Work around issue with spurious interrupts
   7219 			 * in MSI-X mode.
   7220 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   7221 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   7222 			 */
   7223 			reg = CSR_READ(sc, WMREG_RFCTL);
   7224 			reg |= WMREG_RFCTL_ACKDIS;
   7225 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   7226 
   7227 			ivar = 0;
   7228 			/* TX and RX */
   7229 			for (i = 0; i < sc->sc_nqueues; i++) {
   7230 				wmq = &sc->sc_queue[i];
   7231 				qid = wmq->wmq_id;
   7232 				qintr_idx = wmq->wmq_intr_idx;
   7233 
   7234 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7235 				    IVAR_TX_MASK_Q_82574(qid));
   7236 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7237 				    IVAR_RX_MASK_Q_82574(qid));
   7238 			}
   7239 			/* Link status */
   7240 			ivar |= __SHIFTIN((IVAR_VALID_82574
   7241 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   7242 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   7243 		} else {
   7244 			/* Interrupt control */
   7245 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   7246 			    | GPIE_EIAME | GPIE_PBA);
   7247 
   7248 			switch (sc->sc_type) {
   7249 			case WM_T_82580:
   7250 			case WM_T_I350:
   7251 			case WM_T_I354:
   7252 			case WM_T_I210:
   7253 			case WM_T_I211:
   7254 				/* TX and RX */
   7255 				for (i = 0; i < sc->sc_nqueues; i++) {
   7256 					wmq = &sc->sc_queue[i];
   7257 					qid = wmq->wmq_id;
   7258 					qintr_idx = wmq->wmq_intr_idx;
   7259 
   7260 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   7261 					ivar &= ~IVAR_TX_MASK_Q(qid);
   7262 					ivar |= __SHIFTIN((qintr_idx
   7263 						| IVAR_VALID),
   7264 					    IVAR_TX_MASK_Q(qid));
   7265 					ivar &= ~IVAR_RX_MASK_Q(qid);
   7266 					ivar |= __SHIFTIN((qintr_idx
   7267 						| IVAR_VALID),
   7268 					    IVAR_RX_MASK_Q(qid));
   7269 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   7270 				}
   7271 				break;
   7272 			case WM_T_82576:
   7273 				/* TX and RX */
   7274 				for (i = 0; i < sc->sc_nqueues; i++) {
   7275 					wmq = &sc->sc_queue[i];
   7276 					qid = wmq->wmq_id;
   7277 					qintr_idx = wmq->wmq_intr_idx;
   7278 
   7279 					ivar = CSR_READ(sc,
   7280 					    WMREG_IVAR_Q_82576(qid));
   7281 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   7282 					ivar |= __SHIFTIN((qintr_idx
   7283 						| IVAR_VALID),
   7284 					    IVAR_TX_MASK_Q_82576(qid));
   7285 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   7286 					ivar |= __SHIFTIN((qintr_idx
   7287 						| IVAR_VALID),
   7288 					    IVAR_RX_MASK_Q_82576(qid));
   7289 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   7290 					    ivar);
   7291 				}
   7292 				break;
   7293 			default:
   7294 				break;
   7295 			}
   7296 
   7297 			/* Link status */
   7298 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   7299 			    IVAR_MISC_OTHER);
   7300 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   7301 		}
   7302 
   7303 		if (wm_is_using_multiqueue(sc)) {
   7304 			wm_init_rss(sc);
   7305 
   7306 			/*
   7307 			** NOTE: Receive Full-Packet Checksum Offload
   7308 			** is mutually exclusive with Multiqueue. However
   7309 			** this is not the same as TCP/IP checksums which
   7310 			** still work.
   7311 			*/
   7312 			reg = CSR_READ(sc, WMREG_RXCSUM);
   7313 			reg |= RXCSUM_PCSD;
   7314 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7315 		}
   7316 	}
   7317 
   7318 	/* Set up the interrupt registers. */
   7319 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7320 
   7321 	/* Enable SFP module insertion interrupt if it's required */
   7322 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   7323 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   7324 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7325 		sfp_mask = ICR_GPI(0);
   7326 	}
   7327 
   7328 	if (wm_is_using_msix(sc)) {
   7329 		uint32_t mask;
   7330 		struct wm_queue *wmq;
   7331 
   7332 		switch (sc->sc_type) {
   7333 		case WM_T_82574:
   7334 			mask = 0;
   7335 			for (i = 0; i < sc->sc_nqueues; i++) {
   7336 				wmq = &sc->sc_queue[i];
   7337 				mask |= ICR_TXQ(wmq->wmq_id);
   7338 				mask |= ICR_RXQ(wmq->wmq_id);
   7339 			}
   7340 			mask |= ICR_OTHER;
   7341 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   7342 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   7343 			break;
   7344 		default:
   7345 			if (sc->sc_type == WM_T_82575) {
   7346 				mask = 0;
   7347 				for (i = 0; i < sc->sc_nqueues; i++) {
   7348 					wmq = &sc->sc_queue[i];
   7349 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   7350 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   7351 				}
   7352 				mask |= EITR_OTHER;
   7353 			} else {
   7354 				mask = 0;
   7355 				for (i = 0; i < sc->sc_nqueues; i++) {
   7356 					wmq = &sc->sc_queue[i];
   7357 					mask |= 1 << wmq->wmq_intr_idx;
   7358 				}
   7359 				mask |= 1 << sc->sc_link_intr_idx;
   7360 			}
   7361 			CSR_WRITE(sc, WMREG_EIAC, mask);
   7362 			CSR_WRITE(sc, WMREG_EIAM, mask);
   7363 			CSR_WRITE(sc, WMREG_EIMS, mask);
   7364 
   7365 			/* For other interrupts */
   7366 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   7367 			break;
   7368 		}
   7369 	} else {
   7370 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   7371 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   7372 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   7373 	}
   7374 
   7375 	/* Set up the inter-packet gap. */
   7376 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7377 
   7378 	if (sc->sc_type >= WM_T_82543) {
   7379 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7380 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   7381 			wm_itrs_writereg(sc, wmq);
   7382 		}
   7383 		/*
   7384 		 * Link interrupts occur much less than TX
   7385 		 * interrupts and RX interrupts. So, we don't
   7386 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   7387 		 * FreeBSD's if_igb.
   7388 		 */
   7389 	}
   7390 
   7391 	/* Set the VLAN EtherType. */
   7392 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   7393 
   7394 	/*
   7395 	 * Set up the transmit control register; we start out with
   7396 	 * a collision distance suitable for FDX, but update it when
   7397 	 * we resolve the media type.
   7398 	 */
   7399 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   7400 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   7401 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7402 	if (sc->sc_type >= WM_T_82571)
   7403 		sc->sc_tctl |= TCTL_MULR;
   7404 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7405 
   7406 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7407 		/* Write TDT after TCTL.EN is set. See the document. */
   7408 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   7409 	}
   7410 
   7411 	if (sc->sc_type == WM_T_80003) {
   7412 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   7413 		reg &= ~TCTL_EXT_GCEX_MASK;
   7414 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   7415 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   7416 	}
   7417 
   7418 	/* Set the media. */
   7419 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   7420 		goto out;
   7421 
   7422 	/* Configure for OS presence */
   7423 	wm_init_manageability(sc);
   7424 
   7425 	/*
   7426 	 * Set up the receive control register; we actually program the
   7427 	 * register when we set the receive filter. Use multicast address
   7428 	 * offset type 0.
   7429 	 *
   7430 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7431 	 * don't enable that feature.
   7432 	 */
   7433 	sc->sc_mchash_type = 0;
   7434 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7435 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7436 
   7437 	/* 82574 use one buffer extended Rx descriptor. */
   7438 	if (sc->sc_type == WM_T_82574)
   7439 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7440 
   7441 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7442 		sc->sc_rctl |= RCTL_SECRC;
   7443 
   7444 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7445 	    && (ifp->if_mtu > ETHERMTU)) {
   7446 		sc->sc_rctl |= RCTL_LPE;
   7447 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7448 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7449 	}
   7450 
   7451 	if (MCLBYTES == 2048)
   7452 		sc->sc_rctl |= RCTL_2k;
   7453 	else {
   7454 		if (sc->sc_type >= WM_T_82543) {
   7455 			switch (MCLBYTES) {
   7456 			case 4096:
   7457 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7458 				break;
   7459 			case 8192:
   7460 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7461 				break;
   7462 			case 16384:
   7463 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7464 				break;
   7465 			default:
   7466 				panic("wm_init: MCLBYTES %d unsupported",
   7467 				    MCLBYTES);
   7468 				break;
   7469 			}
   7470 		} else
   7471 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7472 	}
   7473 
   7474 	/* Enable ECC */
   7475 	switch (sc->sc_type) {
   7476 	case WM_T_82571:
   7477 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7478 		reg |= PBA_ECC_CORR_EN;
   7479 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7480 		break;
   7481 	case WM_T_PCH_LPT:
   7482 	case WM_T_PCH_SPT:
   7483 	case WM_T_PCH_CNP:
   7484 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7485 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7486 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7487 
   7488 		sc->sc_ctrl |= CTRL_MEHE;
   7489 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7490 		break;
   7491 	default:
   7492 		break;
   7493 	}
   7494 
   7495 	/*
   7496 	 * Set the receive filter.
   7497 	 *
   7498 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7499 	 * the setting of RCTL.EN in wm_set_filter()
   7500 	 */
   7501 	wm_set_filter(sc);
   7502 
   7503 	/* On 575 and later set RDT only if RX enabled */
   7504 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7505 		int qidx;
   7506 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7507 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7508 			for (i = 0; i < WM_NRXDESC; i++) {
   7509 				mutex_enter(rxq->rxq_lock);
   7510 				wm_init_rxdesc(rxq, i);
   7511 				mutex_exit(rxq->rxq_lock);
   7512 
   7513 			}
   7514 		}
   7515 	}
   7516 
   7517 	wm_unset_stopping_flags(sc);
   7518 
   7519 	/* Start the one second link check clock. */
   7520 	callout_schedule(&sc->sc_tick_ch, hz);
   7521 
   7522 	/*
   7523 	 * ...all done! (IFNET_LOCKED asserted above.)
   7524 	 */
   7525 	ifp->if_flags |= IFF_RUNNING;
   7526 
   7527 out:
   7528 	/* Save last flags for the callback */
   7529 	sc->sc_if_flags = ifp->if_flags;
   7530 	sc->sc_ec_capenable = ec->ec_capenable;
   7531 	if (error)
   7532 		log(LOG_ERR, "%s: interface not running\n",
   7533 		    device_xname(sc->sc_dev));
   7534 	return error;
   7535 }
   7536 
   7537 /*
   7538  * wm_stop:		[ifnet interface function]
   7539  *
   7540  *	Stop transmission on the interface.
   7541  */
   7542 static void
   7543 wm_stop(struct ifnet *ifp, int disable)
   7544 {
   7545 	struct wm_softc *sc = ifp->if_softc;
   7546 
   7547 	ASSERT_SLEEPABLE();
   7548 	KASSERT(IFNET_LOCKED(ifp));
   7549 
   7550 	mutex_enter(sc->sc_core_lock);
   7551 	wm_stop_locked(ifp, disable ? true : false, true);
   7552 	mutex_exit(sc->sc_core_lock);
   7553 
   7554 	/*
   7555 	 * After wm_set_stopping_flags(), it is guaranteed that
   7556 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7557 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7558 	 * because it can sleep...
   7559 	 * so, call workqueue_wait() here.
   7560 	 */
   7561 	for (int i = 0; i < sc->sc_nqueues; i++)
   7562 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7563 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7564 }
   7565 
   7566 static void
   7567 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7568 {
   7569 	struct wm_softc *sc = ifp->if_softc;
   7570 	struct wm_txsoft *txs;
   7571 	int i, qidx;
   7572 
   7573 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7574 		device_xname(sc->sc_dev), __func__));
   7575 	KASSERT(IFNET_LOCKED(ifp));
   7576 	KASSERT(mutex_owned(sc->sc_core_lock));
   7577 
   7578 	wm_set_stopping_flags(sc);
   7579 
   7580 	if (sc->sc_flags & WM_F_HAS_MII) {
   7581 		/* Down the MII. */
   7582 		mii_down(&sc->sc_mii);
   7583 	} else {
   7584 #if 0
   7585 		/* Should we clear PHY's status properly? */
   7586 		wm_reset(sc);
   7587 #endif
   7588 	}
   7589 
   7590 	/* Stop the transmit and receive processes. */
   7591 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7592 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7593 	sc->sc_rctl &= ~RCTL_EN;
   7594 
   7595 	/*
   7596 	 * Clear the interrupt mask to ensure the device cannot assert its
   7597 	 * interrupt line.
   7598 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7599 	 * service any currently pending or shared interrupt.
   7600 	 */
   7601 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7602 	sc->sc_icr = 0;
   7603 	if (wm_is_using_msix(sc)) {
   7604 		if (sc->sc_type != WM_T_82574) {
   7605 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7606 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7607 		} else
   7608 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7609 	}
   7610 
   7611 	/*
   7612 	 * Stop callouts after interrupts are disabled; if we have
   7613 	 * to wait for them, we will be releasing the CORE_LOCK
   7614 	 * briefly, which will unblock interrupts on the current CPU.
   7615 	 */
   7616 
   7617 	/* Stop the one second clock. */
   7618 	if (wait)
   7619 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7620 	else
   7621 		callout_stop(&sc->sc_tick_ch);
   7622 
   7623 	/* Stop the 82547 Tx FIFO stall check timer. */
   7624 	if (sc->sc_type == WM_T_82547) {
   7625 		if (wait)
   7626 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7627 		else
   7628 			callout_stop(&sc->sc_txfifo_ch);
   7629 	}
   7630 
   7631 	/* Release any queued transmit buffers. */
   7632 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7633 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7634 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7635 		struct mbuf *m;
   7636 
   7637 		mutex_enter(txq->txq_lock);
   7638 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7639 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7640 			txs = &txq->txq_soft[i];
   7641 			if (txs->txs_mbuf != NULL) {
   7642 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7643 				m_freem(txs->txs_mbuf);
   7644 				txs->txs_mbuf = NULL;
   7645 			}
   7646 		}
   7647 		/* Drain txq_interq */
   7648 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7649 			m_freem(m);
   7650 		mutex_exit(txq->txq_lock);
   7651 	}
   7652 
   7653 	/* Mark the interface as down and cancel the watchdog timer. */
   7654 	ifp->if_flags &= ~IFF_RUNNING;
   7655 	sc->sc_if_flags = ifp->if_flags;
   7656 
   7657 	if (disable) {
   7658 		for (i = 0; i < sc->sc_nqueues; i++) {
   7659 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7660 			mutex_enter(rxq->rxq_lock);
   7661 			wm_rxdrain(rxq);
   7662 			mutex_exit(rxq->rxq_lock);
   7663 		}
   7664 	}
   7665 
   7666 #if 0 /* notyet */
   7667 	if (sc->sc_type >= WM_T_82544)
   7668 		CSR_WRITE(sc, WMREG_WUC, 0);
   7669 #endif
   7670 }
   7671 
   7672 static void
   7673 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7674 {
   7675 	struct mbuf *m;
   7676 	int i;
   7677 
   7678 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7679 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7680 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7681 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7682 		    m->m_data, m->m_len, m->m_flags);
   7683 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7684 	    i, i == 1 ? "" : "s");
   7685 }
   7686 
   7687 /*
   7688  * wm_82547_txfifo_stall:
   7689  *
   7690  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7691  *	reset the FIFO pointers, and restart packet transmission.
   7692  */
   7693 static void
   7694 wm_82547_txfifo_stall(void *arg)
   7695 {
   7696 	struct wm_softc *sc = arg;
   7697 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7698 
   7699 	mutex_enter(txq->txq_lock);
   7700 
   7701 	if (txq->txq_stopping)
   7702 		goto out;
   7703 
   7704 	if (txq->txq_fifo_stall) {
   7705 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7706 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7707 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7708 			/*
   7709 			 * Packets have drained.  Stop transmitter, reset
   7710 			 * FIFO pointers, restart transmitter, and kick
   7711 			 * the packet queue.
   7712 			 */
   7713 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7714 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7715 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7716 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7717 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7718 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7719 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7720 			CSR_WRITE_FLUSH(sc);
   7721 
   7722 			txq->txq_fifo_head = 0;
   7723 			txq->txq_fifo_stall = 0;
   7724 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7725 		} else {
   7726 			/*
   7727 			 * Still waiting for packets to drain; try again in
   7728 			 * another tick.
   7729 			 */
   7730 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7731 		}
   7732 	}
   7733 
   7734 out:
   7735 	mutex_exit(txq->txq_lock);
   7736 }
   7737 
   7738 /*
   7739  * wm_82547_txfifo_bugchk:
   7740  *
   7741  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7742  *	prevent enqueueing a packet that would wrap around the end
   7743  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7744  *
   7745  *	We do this by checking the amount of space before the end
   7746  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7747  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7748  *	the internal FIFO pointers to the beginning, and restart
   7749  *	transmission on the interface.
   7750  */
   7751 #define	WM_FIFO_HDR		0x10
   7752 #define	WM_82547_PAD_LEN	0x3e0
   7753 static int
   7754 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7755 {
   7756 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7757 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7758 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7759 
   7760 	/* Just return if already stalled. */
   7761 	if (txq->txq_fifo_stall)
   7762 		return 1;
   7763 
   7764 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7765 		/* Stall only occurs in half-duplex mode. */
   7766 		goto send_packet;
   7767 	}
   7768 
   7769 	if (len >= WM_82547_PAD_LEN + space) {
   7770 		txq->txq_fifo_stall = 1;
   7771 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7772 		return 1;
   7773 	}
   7774 
   7775 send_packet:
   7776 	txq->txq_fifo_head += len;
   7777 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7778 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7779 
   7780 	return 0;
   7781 }
   7782 
   7783 static int
   7784 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7785 {
   7786 	int error;
   7787 
   7788 	/*
   7789 	 * Allocate the control data structures, and create and load the
   7790 	 * DMA map for it.
   7791 	 *
   7792 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7793 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7794 	 * both sets within the same 4G segment.
   7795 	 */
   7796 	if (sc->sc_type < WM_T_82544)
   7797 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7798 	else
   7799 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7800 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7801 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7802 	else
   7803 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7804 
   7805 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7806 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7807 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7808 		aprint_error_dev(sc->sc_dev,
   7809 		    "unable to allocate TX control data, error = %d\n",
   7810 		    error);
   7811 		goto fail_0;
   7812 	}
   7813 
   7814 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7815 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7816 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7817 		aprint_error_dev(sc->sc_dev,
   7818 		    "unable to map TX control data, error = %d\n", error);
   7819 		goto fail_1;
   7820 	}
   7821 
   7822 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7823 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7824 		aprint_error_dev(sc->sc_dev,
   7825 		    "unable to create TX control data DMA map, error = %d\n",
   7826 		    error);
   7827 		goto fail_2;
   7828 	}
   7829 
   7830 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7831 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7832 		aprint_error_dev(sc->sc_dev,
   7833 		    "unable to load TX control data DMA map, error = %d\n",
   7834 		    error);
   7835 		goto fail_3;
   7836 	}
   7837 
   7838 	return 0;
   7839 
   7840 fail_3:
   7841 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7842 fail_2:
   7843 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7844 	    WM_TXDESCS_SIZE(txq));
   7845 fail_1:
   7846 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7847 fail_0:
   7848 	return error;
   7849 }
   7850 
   7851 static void
   7852 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7853 {
   7854 
   7855 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7856 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7857 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7858 	    WM_TXDESCS_SIZE(txq));
   7859 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7860 }
   7861 
   7862 static int
   7863 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7864 {
   7865 	int error;
   7866 	size_t rxq_descs_size;
   7867 
   7868 	/*
   7869 	 * Allocate the control data structures, and create and load the
   7870 	 * DMA map for it.
   7871 	 *
   7872 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7873 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7874 	 * both sets within the same 4G segment.
   7875 	 */
   7876 	rxq->rxq_ndesc = WM_NRXDESC;
   7877 	if (sc->sc_type == WM_T_82574)
   7878 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7879 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7880 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7881 	else
   7882 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7883 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7884 
   7885 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7886 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7887 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7888 		aprint_error_dev(sc->sc_dev,
   7889 		    "unable to allocate RX control data, error = %d\n",
   7890 		    error);
   7891 		goto fail_0;
   7892 	}
   7893 
   7894 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7895 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7896 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7897 		aprint_error_dev(sc->sc_dev,
   7898 		    "unable to map RX control data, error = %d\n", error);
   7899 		goto fail_1;
   7900 	}
   7901 
   7902 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7903 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7904 		aprint_error_dev(sc->sc_dev,
   7905 		    "unable to create RX control data DMA map, error = %d\n",
   7906 		    error);
   7907 		goto fail_2;
   7908 	}
   7909 
   7910 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7911 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7912 		aprint_error_dev(sc->sc_dev,
   7913 		    "unable to load RX control data DMA map, error = %d\n",
   7914 		    error);
   7915 		goto fail_3;
   7916 	}
   7917 
   7918 	return 0;
   7919 
   7920  fail_3:
   7921 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7922  fail_2:
   7923 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7924 	    rxq_descs_size);
   7925  fail_1:
   7926 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7927  fail_0:
   7928 	return error;
   7929 }
   7930 
   7931 static void
   7932 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7933 {
   7934 
   7935 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7936 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7937 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7938 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7939 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7940 }
   7941 
   7942 
   7943 static int
   7944 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7945 {
   7946 	int i, error;
   7947 
   7948 	/* Create the transmit buffer DMA maps. */
   7949 	WM_TXQUEUELEN(txq) =
   7950 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7951 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7952 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7953 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7954 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7955 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7956 			aprint_error_dev(sc->sc_dev,
   7957 			    "unable to create Tx DMA map %d, error = %d\n",
   7958 			    i, error);
   7959 			goto fail;
   7960 		}
   7961 	}
   7962 
   7963 	return 0;
   7964 
   7965 fail:
   7966 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7967 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7968 			bus_dmamap_destroy(sc->sc_dmat,
   7969 			    txq->txq_soft[i].txs_dmamap);
   7970 	}
   7971 	return error;
   7972 }
   7973 
   7974 static void
   7975 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7976 {
   7977 	int i;
   7978 
   7979 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7980 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7981 			bus_dmamap_destroy(sc->sc_dmat,
   7982 			    txq->txq_soft[i].txs_dmamap);
   7983 	}
   7984 }
   7985 
   7986 static int
   7987 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7988 {
   7989 	int i, error;
   7990 
   7991 	/* Create the receive buffer DMA maps. */
   7992 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7993 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7994 			    MCLBYTES, 0, 0,
   7995 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7996 			aprint_error_dev(sc->sc_dev,
   7997 			    "unable to create Rx DMA map %d error = %d\n",
   7998 			    i, error);
   7999 			goto fail;
   8000 		}
   8001 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   8002 	}
   8003 
   8004 	return 0;
   8005 
   8006  fail:
   8007 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8008 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   8009 			bus_dmamap_destroy(sc->sc_dmat,
   8010 			    rxq->rxq_soft[i].rxs_dmamap);
   8011 	}
   8012 	return error;
   8013 }
   8014 
   8015 static void
   8016 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8017 {
   8018 	int i;
   8019 
   8020 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8021 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   8022 			bus_dmamap_destroy(sc->sc_dmat,
   8023 			    rxq->rxq_soft[i].rxs_dmamap);
   8024 	}
   8025 }
   8026 
   8027 /*
   8028  * wm_alloc_quques:
   8029  *	Allocate {tx,rx}descs and {tx,rx} buffers
   8030  */
   8031 static int
   8032 wm_alloc_txrx_queues(struct wm_softc *sc)
   8033 {
   8034 	int i, error, tx_done, rx_done;
   8035 
   8036 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   8037 	    KM_SLEEP);
   8038 	if (sc->sc_queue == NULL) {
   8039 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   8040 		error = ENOMEM;
   8041 		goto fail_0;
   8042 	}
   8043 
   8044 	/* For transmission */
   8045 	error = 0;
   8046 	tx_done = 0;
   8047 	for (i = 0; i < sc->sc_nqueues; i++) {
   8048 #ifdef WM_EVENT_COUNTERS
   8049 		int j;
   8050 		const char *xname;
   8051 #endif
   8052 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8053 		txq->txq_sc = sc;
   8054 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8055 
   8056 		error = wm_alloc_tx_descs(sc, txq);
   8057 		if (error)
   8058 			break;
   8059 		error = wm_alloc_tx_buffer(sc, txq);
   8060 		if (error) {
   8061 			wm_free_tx_descs(sc, txq);
   8062 			break;
   8063 		}
   8064 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   8065 		if (txq->txq_interq == NULL) {
   8066 			wm_free_tx_descs(sc, txq);
   8067 			wm_free_tx_buffer(sc, txq);
   8068 			error = ENOMEM;
   8069 			break;
   8070 		}
   8071 
   8072 #ifdef WM_EVENT_COUNTERS
   8073 		xname = device_xname(sc->sc_dev);
   8074 
   8075 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   8076 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   8077 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   8078 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   8079 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   8080 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   8081 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   8082 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   8083 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   8084 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   8085 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   8086 
   8087 		for (j = 0; j < WM_NTXSEGS; j++) {
   8088 			snprintf(txq->txq_txseg_evcnt_names[j],
   8089 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   8090 			    "txq%02dtxseg%d", i, j);
   8091 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   8092 			    EVCNT_TYPE_MISC,
   8093 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   8094 		}
   8095 
   8096 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   8097 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   8098 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   8099 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   8100 		/* Only for 82544 (and earlier?) */
   8101 		if (sc->sc_type <= WM_T_82544)
   8102 			WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   8103 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   8104 #endif /* WM_EVENT_COUNTERS */
   8105 
   8106 		tx_done++;
   8107 	}
   8108 	if (error)
   8109 		goto fail_1;
   8110 
   8111 	/* For receive */
   8112 	error = 0;
   8113 	rx_done = 0;
   8114 	for (i = 0; i < sc->sc_nqueues; i++) {
   8115 #ifdef WM_EVENT_COUNTERS
   8116 		const char *xname;
   8117 #endif
   8118 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8119 		rxq->rxq_sc = sc;
   8120 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8121 
   8122 		error = wm_alloc_rx_descs(sc, rxq);
   8123 		if (error)
   8124 			break;
   8125 
   8126 		error = wm_alloc_rx_buffer(sc, rxq);
   8127 		if (error) {
   8128 			wm_free_rx_descs(sc, rxq);
   8129 			break;
   8130 		}
   8131 
   8132 #ifdef WM_EVENT_COUNTERS
   8133 		xname = device_xname(sc->sc_dev);
   8134 
   8135 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   8136 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   8137 
   8138 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   8139 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   8140 #endif /* WM_EVENT_COUNTERS */
   8141 
   8142 		rx_done++;
   8143 	}
   8144 	if (error)
   8145 		goto fail_2;
   8146 
   8147 	return 0;
   8148 
   8149 fail_2:
   8150 	for (i = 0; i < rx_done; i++) {
   8151 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8152 		wm_free_rx_buffer(sc, rxq);
   8153 		wm_free_rx_descs(sc, rxq);
   8154 		if (rxq->rxq_lock)
   8155 			mutex_obj_free(rxq->rxq_lock);
   8156 	}
   8157 fail_1:
   8158 	for (i = 0; i < tx_done; i++) {
   8159 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8160 		pcq_destroy(txq->txq_interq);
   8161 		wm_free_tx_buffer(sc, txq);
   8162 		wm_free_tx_descs(sc, txq);
   8163 		if (txq->txq_lock)
   8164 			mutex_obj_free(txq->txq_lock);
   8165 	}
   8166 
   8167 	kmem_free(sc->sc_queue,
   8168 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   8169 fail_0:
   8170 	return error;
   8171 }
   8172 
   8173 /*
   8174  * wm_free_quques:
   8175  *	Free {tx,rx}descs and {tx,rx} buffers
   8176  */
   8177 static void
   8178 wm_free_txrx_queues(struct wm_softc *sc)
   8179 {
   8180 	int i;
   8181 
   8182 	for (i = 0; i < sc->sc_nqueues; i++) {
   8183 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8184 
   8185 #ifdef WM_EVENT_COUNTERS
   8186 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   8187 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   8188 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   8189 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   8190 #endif /* WM_EVENT_COUNTERS */
   8191 
   8192 		wm_free_rx_buffer(sc, rxq);
   8193 		wm_free_rx_descs(sc, rxq);
   8194 		if (rxq->rxq_lock)
   8195 			mutex_obj_free(rxq->rxq_lock);
   8196 	}
   8197 
   8198 	for (i = 0; i < sc->sc_nqueues; i++) {
   8199 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8200 		struct mbuf *m;
   8201 #ifdef WM_EVENT_COUNTERS
   8202 		int j;
   8203 
   8204 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   8205 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   8206 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   8207 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   8208 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   8209 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   8210 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   8211 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   8212 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   8213 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   8214 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   8215 
   8216 		for (j = 0; j < WM_NTXSEGS; j++)
   8217 			evcnt_detach(&txq->txq_ev_txseg[j]);
   8218 
   8219 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   8220 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   8221 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   8222 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   8223 		if (sc->sc_type <= WM_T_82544)
   8224 			WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   8225 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   8226 #endif /* WM_EVENT_COUNTERS */
   8227 
   8228 		/* Drain txq_interq */
   8229 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   8230 			m_freem(m);
   8231 		pcq_destroy(txq->txq_interq);
   8232 
   8233 		wm_free_tx_buffer(sc, txq);
   8234 		wm_free_tx_descs(sc, txq);
   8235 		if (txq->txq_lock)
   8236 			mutex_obj_free(txq->txq_lock);
   8237 	}
   8238 
   8239 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   8240 }
   8241 
   8242 static void
   8243 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8244 {
   8245 
   8246 	KASSERT(mutex_owned(txq->txq_lock));
   8247 
   8248 	/* Initialize the transmit descriptor ring. */
   8249 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   8250 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   8251 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8252 	txq->txq_free = WM_NTXDESC(txq);
   8253 	txq->txq_next = 0;
   8254 }
   8255 
   8256 static void
   8257 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8258     struct wm_txqueue *txq)
   8259 {
   8260 
   8261 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8262 		device_xname(sc->sc_dev), __func__));
   8263 	KASSERT(mutex_owned(txq->txq_lock));
   8264 
   8265 	if (sc->sc_type < WM_T_82543) {
   8266 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   8267 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   8268 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   8269 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   8270 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   8271 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   8272 	} else {
   8273 		int qid = wmq->wmq_id;
   8274 
   8275 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   8276 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   8277 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   8278 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   8279 
   8280 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8281 			/*
   8282 			 * Don't write TDT before TCTL.EN is set.
   8283 			 * See the document.
   8284 			 */
   8285 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   8286 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   8287 			    | TXDCTL_WTHRESH(0));
   8288 		else {
   8289 			/* XXX should update with AIM? */
   8290 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   8291 			if (sc->sc_type >= WM_T_82540) {
   8292 				/* Should be the same */
   8293 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   8294 			}
   8295 
   8296 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   8297 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   8298 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   8299 		}
   8300 	}
   8301 }
   8302 
   8303 static void
   8304 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8305 {
   8306 	int i;
   8307 
   8308 	KASSERT(mutex_owned(txq->txq_lock));
   8309 
   8310 	/* Initialize the transmit job descriptors. */
   8311 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   8312 		txq->txq_soft[i].txs_mbuf = NULL;
   8313 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   8314 	txq->txq_snext = 0;
   8315 	txq->txq_sdirty = 0;
   8316 }
   8317 
   8318 static void
   8319 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8320     struct wm_txqueue *txq)
   8321 {
   8322 
   8323 	KASSERT(mutex_owned(txq->txq_lock));
   8324 
   8325 	/*
   8326 	 * Set up some register offsets that are different between
   8327 	 * the i82542 and the i82543 and later chips.
   8328 	 */
   8329 	if (sc->sc_type < WM_T_82543)
   8330 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   8331 	else
   8332 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   8333 
   8334 	wm_init_tx_descs(sc, txq);
   8335 	wm_init_tx_regs(sc, wmq, txq);
   8336 	wm_init_tx_buffer(sc, txq);
   8337 
   8338 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   8339 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   8340 
   8341 	txq->txq_sending = false;
   8342 }
   8343 
   8344 static void
   8345 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8346     struct wm_rxqueue *rxq)
   8347 {
   8348 
   8349 	KASSERT(mutex_owned(rxq->rxq_lock));
   8350 
   8351 	/*
   8352 	 * Initialize the receive descriptor and receive job
   8353 	 * descriptor rings.
   8354 	 */
   8355 	if (sc->sc_type < WM_T_82543) {
   8356 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   8357 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   8358 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   8359 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8360 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   8361 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   8362 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   8363 
   8364 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   8365 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   8366 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   8367 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   8368 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   8369 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   8370 	} else {
   8371 		int qid = wmq->wmq_id;
   8372 
   8373 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   8374 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   8375 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   8376 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8377 
   8378 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8379 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   8380 				panic("%s: MCLBYTES %d unsupported for 82575 "
   8381 				    "or higher\n", __func__, MCLBYTES);
   8382 
   8383 			/*
   8384 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   8385 			 * only.
   8386 			 */
   8387 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   8388 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   8389 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   8390 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   8391 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   8392 			    | RXDCTL_WTHRESH(1));
   8393 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8394 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8395 		} else {
   8396 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8397 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8398 			/* XXX should update with AIM? */
   8399 			CSR_WRITE(sc, WMREG_RDTR,
   8400 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   8401 			/* MUST be same */
   8402 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   8403 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   8404 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   8405 		}
   8406 	}
   8407 }
   8408 
   8409 static int
   8410 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8411 {
   8412 	struct wm_rxsoft *rxs;
   8413 	int error, i;
   8414 
   8415 	KASSERT(mutex_owned(rxq->rxq_lock));
   8416 
   8417 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8418 		rxs = &rxq->rxq_soft[i];
   8419 		if (rxs->rxs_mbuf == NULL) {
   8420 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   8421 				log(LOG_ERR, "%s: unable to allocate or map "
   8422 				    "rx buffer %d, error = %d\n",
   8423 				    device_xname(sc->sc_dev), i, error);
   8424 				/*
   8425 				 * XXX Should attempt to run with fewer receive
   8426 				 * XXX buffers instead of just failing.
   8427 				 */
   8428 				wm_rxdrain(rxq);
   8429 				return ENOMEM;
   8430 			}
   8431 		} else {
   8432 			/*
   8433 			 * For 82575 and 82576, the RX descriptors must be
   8434 			 * initialized after the setting of RCTL.EN in
   8435 			 * wm_set_filter()
   8436 			 */
   8437 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8438 				wm_init_rxdesc(rxq, i);
   8439 		}
   8440 	}
   8441 	rxq->rxq_ptr = 0;
   8442 	rxq->rxq_discard = 0;
   8443 	WM_RXCHAIN_RESET(rxq);
   8444 
   8445 	return 0;
   8446 }
   8447 
   8448 static int
   8449 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8450     struct wm_rxqueue *rxq)
   8451 {
   8452 
   8453 	KASSERT(mutex_owned(rxq->rxq_lock));
   8454 
   8455 	/*
   8456 	 * Set up some register offsets that are different between
   8457 	 * the i82542 and the i82543 and later chips.
   8458 	 */
   8459 	if (sc->sc_type < WM_T_82543)
   8460 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8461 	else
   8462 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8463 
   8464 	wm_init_rx_regs(sc, wmq, rxq);
   8465 	return wm_init_rx_buffer(sc, rxq);
   8466 }
   8467 
   8468 /*
   8469  * wm_init_quques:
   8470  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8471  */
   8472 static int
   8473 wm_init_txrx_queues(struct wm_softc *sc)
   8474 {
   8475 	int i, error = 0;
   8476 
   8477 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8478 		device_xname(sc->sc_dev), __func__));
   8479 
   8480 	for (i = 0; i < sc->sc_nqueues; i++) {
   8481 		struct wm_queue *wmq = &sc->sc_queue[i];
   8482 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8483 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8484 
   8485 		/*
   8486 		 * TODO
   8487 		 * Currently, use constant variable instead of AIM.
   8488 		 * Furthermore, the interrupt interval of multiqueue which use
   8489 		 * polling mode is less than default value.
   8490 		 * More tuning and AIM are required.
   8491 		 */
   8492 		if (wm_is_using_multiqueue(sc))
   8493 			wmq->wmq_itr = 50;
   8494 		else
   8495 			wmq->wmq_itr = sc->sc_itr_init;
   8496 		wmq->wmq_set_itr = true;
   8497 
   8498 		mutex_enter(txq->txq_lock);
   8499 		wm_init_tx_queue(sc, wmq, txq);
   8500 		mutex_exit(txq->txq_lock);
   8501 
   8502 		mutex_enter(rxq->rxq_lock);
   8503 		error = wm_init_rx_queue(sc, wmq, rxq);
   8504 		mutex_exit(rxq->rxq_lock);
   8505 		if (error)
   8506 			break;
   8507 	}
   8508 
   8509 	return error;
   8510 }
   8511 
   8512 /*
   8513  * wm_tx_offload:
   8514  *
   8515  *	Set up TCP/IP checksumming parameters for the
   8516  *	specified packet.
   8517  */
   8518 static void
   8519 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8520     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8521 {
   8522 	struct mbuf *m0 = txs->txs_mbuf;
   8523 	struct livengood_tcpip_ctxdesc *t;
   8524 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8525 	uint32_t ipcse;
   8526 	struct ether_header *eh;
   8527 	int offset, iphl;
   8528 	uint8_t fields;
   8529 
   8530 	/*
   8531 	 * XXX It would be nice if the mbuf pkthdr had offset
   8532 	 * fields for the protocol headers.
   8533 	 */
   8534 
   8535 	eh = mtod(m0, struct ether_header *);
   8536 	switch (htons(eh->ether_type)) {
   8537 	case ETHERTYPE_IP:
   8538 	case ETHERTYPE_IPV6:
   8539 		offset = ETHER_HDR_LEN;
   8540 		break;
   8541 
   8542 	case ETHERTYPE_VLAN:
   8543 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8544 		break;
   8545 
   8546 	default:
   8547 		/* Don't support this protocol or encapsulation. */
   8548 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8549 		txq->txq_last_hw_ipcs = 0;
   8550 		txq->txq_last_hw_tucs = 0;
   8551 		*fieldsp = 0;
   8552 		*cmdp = 0;
   8553 		return;
   8554 	}
   8555 
   8556 	if ((m0->m_pkthdr.csum_flags &
   8557 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8558 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8559 	} else
   8560 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8561 
   8562 	ipcse = offset + iphl - 1;
   8563 
   8564 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8565 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8566 	seg = 0;
   8567 	fields = 0;
   8568 
   8569 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8570 		int hlen = offset + iphl;
   8571 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8572 
   8573 		if (__predict_false(m0->m_len <
   8574 				    (hlen + sizeof(struct tcphdr)))) {
   8575 			/*
   8576 			 * TCP/IP headers are not in the first mbuf; we need
   8577 			 * to do this the slow and painful way. Let's just
   8578 			 * hope this doesn't happen very often.
   8579 			 */
   8580 			struct tcphdr th;
   8581 
   8582 			WM_Q_EVCNT_INCR(txq, tsopain);
   8583 
   8584 			m_copydata(m0, hlen, sizeof(th), &th);
   8585 			if (v4) {
   8586 				struct ip ip;
   8587 
   8588 				m_copydata(m0, offset, sizeof(ip), &ip);
   8589 				ip.ip_len = 0;
   8590 				m_copyback(m0,
   8591 				    offset + offsetof(struct ip, ip_len),
   8592 				    sizeof(ip.ip_len), &ip.ip_len);
   8593 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8594 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8595 			} else {
   8596 				struct ip6_hdr ip6;
   8597 
   8598 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8599 				ip6.ip6_plen = 0;
   8600 				m_copyback(m0,
   8601 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8602 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8603 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8604 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8605 			}
   8606 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8607 			    sizeof(th.th_sum), &th.th_sum);
   8608 
   8609 			hlen += th.th_off << 2;
   8610 		} else {
   8611 			/*
   8612 			 * TCP/IP headers are in the first mbuf; we can do
   8613 			 * this the easy way.
   8614 			 */
   8615 			struct tcphdr *th;
   8616 
   8617 			if (v4) {
   8618 				struct ip *ip =
   8619 				    (void *)(mtod(m0, char *) + offset);
   8620 				th = (void *)(mtod(m0, char *) + hlen);
   8621 
   8622 				ip->ip_len = 0;
   8623 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8624 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8625 			} else {
   8626 				struct ip6_hdr *ip6 =
   8627 				    (void *)(mtod(m0, char *) + offset);
   8628 				th = (void *)(mtod(m0, char *) + hlen);
   8629 
   8630 				ip6->ip6_plen = 0;
   8631 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8632 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8633 			}
   8634 			hlen += th->th_off << 2;
   8635 		}
   8636 
   8637 		if (v4) {
   8638 			WM_Q_EVCNT_INCR(txq, tso);
   8639 			cmdlen |= WTX_TCPIP_CMD_IP;
   8640 		} else {
   8641 			WM_Q_EVCNT_INCR(txq, tso6);
   8642 			ipcse = 0;
   8643 		}
   8644 		cmd |= WTX_TCPIP_CMD_TSE;
   8645 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8646 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8647 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8648 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8649 	}
   8650 
   8651 	/*
   8652 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8653 	 * offload feature, if we load the context descriptor, we
   8654 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8655 	 */
   8656 
   8657 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8658 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8659 	    WTX_TCPIP_IPCSE(ipcse);
   8660 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8661 		WM_Q_EVCNT_INCR(txq, ipsum);
   8662 		fields |= WTX_IXSM;
   8663 	}
   8664 
   8665 	offset += iphl;
   8666 
   8667 	if (m0->m_pkthdr.csum_flags &
   8668 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8669 		WM_Q_EVCNT_INCR(txq, tusum);
   8670 		fields |= WTX_TXSM;
   8671 		tucs = WTX_TCPIP_TUCSS(offset) |
   8672 		    WTX_TCPIP_TUCSO(offset +
   8673 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8674 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8675 	} else if ((m0->m_pkthdr.csum_flags &
   8676 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8677 		WM_Q_EVCNT_INCR(txq, tusum6);
   8678 		fields |= WTX_TXSM;
   8679 		tucs = WTX_TCPIP_TUCSS(offset) |
   8680 		    WTX_TCPIP_TUCSO(offset +
   8681 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8682 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8683 	} else {
   8684 		/* Just initialize it to a valid TCP context. */
   8685 		tucs = WTX_TCPIP_TUCSS(offset) |
   8686 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8687 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8688 	}
   8689 
   8690 	*cmdp = cmd;
   8691 	*fieldsp = fields;
   8692 
   8693 	/*
   8694 	 * We don't have to write context descriptor for every packet
   8695 	 * except for 82574. For 82574, we must write context descriptor
   8696 	 * for every packet when we use two descriptor queues.
   8697 	 *
   8698 	 * The 82574L can only remember the *last* context used
   8699 	 * regardless of queue that it was use for.  We cannot reuse
   8700 	 * contexts on this hardware platform and must generate a new
   8701 	 * context every time.  82574L hardware spec, section 7.2.6,
   8702 	 * second note.
   8703 	 */
   8704 	if (sc->sc_nqueues < 2) {
   8705 		/*
   8706 		 * Setting up new checksum offload context for every
   8707 		 * frames takes a lot of processing time for hardware.
   8708 		 * This also reduces performance a lot for small sized
   8709 		 * frames so avoid it if driver can use previously
   8710 		 * configured checksum offload context.
   8711 		 * For TSO, in theory we can use the same TSO context only if
   8712 		 * frame is the same type(IP/TCP) and the same MSS. However
   8713 		 * checking whether a frame has the same IP/TCP structure is a
   8714 		 * hard thing so just ignore that and always restablish a
   8715 		 * new TSO context.
   8716 		 */
   8717 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8718 		    == 0) {
   8719 			if (txq->txq_last_hw_cmd == cmd &&
   8720 			    txq->txq_last_hw_fields == fields &&
   8721 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8722 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8723 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8724 				return;
   8725 			}
   8726 		}
   8727 
   8728 		txq->txq_last_hw_cmd = cmd;
   8729 		txq->txq_last_hw_fields = fields;
   8730 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8731 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8732 	}
   8733 
   8734 	/* Fill in the context descriptor. */
   8735 	t = (struct livengood_tcpip_ctxdesc *)
   8736 	    &txq->txq_descs[txq->txq_next];
   8737 	t->tcpip_ipcs = htole32(ipcs);
   8738 	t->tcpip_tucs = htole32(tucs);
   8739 	t->tcpip_cmdlen = htole32(cmdlen);
   8740 	t->tcpip_seg = htole32(seg);
   8741 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8742 
   8743 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8744 	txs->txs_ndesc++;
   8745 }
   8746 
   8747 static inline int
   8748 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8749 {
   8750 	struct wm_softc *sc = ifp->if_softc;
   8751 	u_int cpuid = cpu_index(curcpu());
   8752 
   8753 	/*
   8754 	 * Currently, simple distribute strategy.
   8755 	 * TODO:
   8756 	 * distribute by flowid(RSS has value).
   8757 	 */
   8758 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8759 }
   8760 
   8761 static inline bool
   8762 wm_linkdown_discard(struct wm_txqueue *txq)
   8763 {
   8764 
   8765 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8766 		return true;
   8767 
   8768 	return false;
   8769 }
   8770 
   8771 /*
   8772  * wm_start:		[ifnet interface function]
   8773  *
   8774  *	Start packet transmission on the interface.
   8775  */
   8776 static void
   8777 wm_start(struct ifnet *ifp)
   8778 {
   8779 	struct wm_softc *sc = ifp->if_softc;
   8780 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8781 
   8782 	KASSERT(if_is_mpsafe(ifp));
   8783 	/*
   8784 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8785 	 */
   8786 
   8787 	mutex_enter(txq->txq_lock);
   8788 	if (!txq->txq_stopping)
   8789 		wm_start_locked(ifp);
   8790 	mutex_exit(txq->txq_lock);
   8791 }
   8792 
   8793 static void
   8794 wm_start_locked(struct ifnet *ifp)
   8795 {
   8796 	struct wm_softc *sc = ifp->if_softc;
   8797 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8798 
   8799 	wm_send_common_locked(ifp, txq, false);
   8800 }
   8801 
   8802 static int
   8803 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8804 {
   8805 	int qid;
   8806 	struct wm_softc *sc = ifp->if_softc;
   8807 	struct wm_txqueue *txq;
   8808 
   8809 	qid = wm_select_txqueue(ifp, m);
   8810 	txq = &sc->sc_queue[qid].wmq_txq;
   8811 
   8812 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8813 		m_freem(m);
   8814 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8815 		return ENOBUFS;
   8816 	}
   8817 
   8818 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8819 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8820 	if (m->m_flags & M_MCAST)
   8821 		if_statinc_ref(nsr, if_omcasts);
   8822 	IF_STAT_PUTREF(ifp);
   8823 
   8824 	if (mutex_tryenter(txq->txq_lock)) {
   8825 		if (!txq->txq_stopping)
   8826 			wm_transmit_locked(ifp, txq);
   8827 		mutex_exit(txq->txq_lock);
   8828 	}
   8829 
   8830 	return 0;
   8831 }
   8832 
   8833 static void
   8834 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8835 {
   8836 
   8837 	wm_send_common_locked(ifp, txq, true);
   8838 }
   8839 
   8840 static void
   8841 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8842     bool is_transmit)
   8843 {
   8844 	struct wm_softc *sc = ifp->if_softc;
   8845 	struct mbuf *m0;
   8846 	struct wm_txsoft *txs;
   8847 	bus_dmamap_t dmamap;
   8848 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8849 	bus_addr_t curaddr;
   8850 	bus_size_t seglen, curlen;
   8851 	uint32_t cksumcmd;
   8852 	uint8_t cksumfields;
   8853 	bool remap = true;
   8854 
   8855 	KASSERT(mutex_owned(txq->txq_lock));
   8856 	KASSERT(!txq->txq_stopping);
   8857 
   8858 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8859 		return;
   8860 
   8861 	if (__predict_false(wm_linkdown_discard(txq))) {
   8862 		do {
   8863 			if (is_transmit)
   8864 				m0 = pcq_get(txq->txq_interq);
   8865 			else
   8866 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8867 			/*
   8868 			 * increment successed packet counter as in the case
   8869 			 * which the packet is discarded by link down PHY.
   8870 			 */
   8871 			if (m0 != NULL) {
   8872 				if_statinc(ifp, if_opackets);
   8873 				m_freem(m0);
   8874 			}
   8875 		} while (m0 != NULL);
   8876 		return;
   8877 	}
   8878 
   8879 	/* Remember the previous number of free descriptors. */
   8880 	ofree = txq->txq_free;
   8881 
   8882 	/*
   8883 	 * Loop through the send queue, setting up transmit descriptors
   8884 	 * until we drain the queue, or use up all available transmit
   8885 	 * descriptors.
   8886 	 */
   8887 	for (;;) {
   8888 		m0 = NULL;
   8889 
   8890 		/* Get a work queue entry. */
   8891 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8892 			wm_txeof(txq, UINT_MAX);
   8893 			if (txq->txq_sfree == 0) {
   8894 				DPRINTF(sc, WM_DEBUG_TX,
   8895 				    ("%s: TX: no free job descriptors\n",
   8896 					device_xname(sc->sc_dev)));
   8897 				WM_Q_EVCNT_INCR(txq, txsstall);
   8898 				break;
   8899 			}
   8900 		}
   8901 
   8902 		/* Grab a packet off the queue. */
   8903 		if (is_transmit)
   8904 			m0 = pcq_get(txq->txq_interq);
   8905 		else
   8906 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8907 		if (m0 == NULL)
   8908 			break;
   8909 
   8910 		DPRINTF(sc, WM_DEBUG_TX,
   8911 		    ("%s: TX: have packet to transmit: %p\n",
   8912 			device_xname(sc->sc_dev), m0));
   8913 
   8914 		txs = &txq->txq_soft[txq->txq_snext];
   8915 		dmamap = txs->txs_dmamap;
   8916 
   8917 		use_tso = (m0->m_pkthdr.csum_flags &
   8918 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8919 
   8920 		/*
   8921 		 * So says the Linux driver:
   8922 		 * The controller does a simple calculation to make sure
   8923 		 * there is enough room in the FIFO before initiating the
   8924 		 * DMA for each buffer. The calc is:
   8925 		 *	4 = ceil(buffer len / MSS)
   8926 		 * To make sure we don't overrun the FIFO, adjust the max
   8927 		 * buffer len if the MSS drops.
   8928 		 */
   8929 		dmamap->dm_maxsegsz =
   8930 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8931 		    ? m0->m_pkthdr.segsz << 2
   8932 		    : WTX_MAX_LEN;
   8933 
   8934 		/*
   8935 		 * Load the DMA map.  If this fails, the packet either
   8936 		 * didn't fit in the allotted number of segments, or we
   8937 		 * were short on resources.  For the too-many-segments
   8938 		 * case, we simply report an error and drop the packet,
   8939 		 * since we can't sanely copy a jumbo packet to a single
   8940 		 * buffer.
   8941 		 */
   8942 retry:
   8943 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8944 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8945 		if (__predict_false(error)) {
   8946 			if (error == EFBIG) {
   8947 				if (remap == true) {
   8948 					struct mbuf *m;
   8949 
   8950 					remap = false;
   8951 					m = m_defrag(m0, M_NOWAIT);
   8952 					if (m != NULL) {
   8953 						WM_Q_EVCNT_INCR(txq, defrag);
   8954 						m0 = m;
   8955 						goto retry;
   8956 					}
   8957 				}
   8958 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8959 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8960 				    "DMA segments, dropping...\n",
   8961 				    device_xname(sc->sc_dev));
   8962 				wm_dump_mbuf_chain(sc, m0);
   8963 				m_freem(m0);
   8964 				continue;
   8965 			}
   8966 			/* Short on resources, just stop for now. */
   8967 			DPRINTF(sc, WM_DEBUG_TX,
   8968 			    ("%s: TX: dmamap load failed: %d\n",
   8969 				device_xname(sc->sc_dev), error));
   8970 			break;
   8971 		}
   8972 
   8973 		segs_needed = dmamap->dm_nsegs;
   8974 		if (use_tso) {
   8975 			/* For sentinel descriptor; see below. */
   8976 			segs_needed++;
   8977 		}
   8978 
   8979 		/*
   8980 		 * Ensure we have enough descriptors free to describe
   8981 		 * the packet. Note, we always reserve one descriptor
   8982 		 * at the end of the ring due to the semantics of the
   8983 		 * TDT register, plus one more in the event we need
   8984 		 * to load offload context.
   8985 		 */
   8986 		if (segs_needed > txq->txq_free - 2) {
   8987 			/*
   8988 			 * Not enough free descriptors to transmit this
   8989 			 * packet.  We haven't committed anything yet,
   8990 			 * so just unload the DMA map, put the packet
   8991 			 * pack on the queue, and punt. Notify the upper
   8992 			 * layer that there are no more slots left.
   8993 			 */
   8994 			DPRINTF(sc, WM_DEBUG_TX,
   8995 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8996 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8997 				segs_needed, txq->txq_free - 1));
   8998 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8999 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9000 			WM_Q_EVCNT_INCR(txq, txdstall);
   9001 			break;
   9002 		}
   9003 
   9004 		/*
   9005 		 * Check for 82547 Tx FIFO bug. We need to do this
   9006 		 * once we know we can transmit the packet, since we
   9007 		 * do some internal FIFO space accounting here.
   9008 		 */
   9009 		if (sc->sc_type == WM_T_82547 &&
   9010 		    wm_82547_txfifo_bugchk(sc, m0)) {
   9011 			DPRINTF(sc, WM_DEBUG_TX,
   9012 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   9013 				device_xname(sc->sc_dev)));
   9014 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9015 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9016 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   9017 			break;
   9018 		}
   9019 
   9020 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9021 
   9022 		DPRINTF(sc, WM_DEBUG_TX,
   9023 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9024 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9025 
   9026 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9027 
   9028 		/*
   9029 		 * Store a pointer to the packet so that we can free it
   9030 		 * later.
   9031 		 *
   9032 		 * Initially, we consider the number of descriptors the
   9033 		 * packet uses the number of DMA segments.  This may be
   9034 		 * incremented by 1 if we do checksum offload (a descriptor
   9035 		 * is used to set the checksum context).
   9036 		 */
   9037 		txs->txs_mbuf = m0;
   9038 		txs->txs_firstdesc = txq->txq_next;
   9039 		txs->txs_ndesc = segs_needed;
   9040 
   9041 		/* Set up offload parameters for this packet. */
   9042 		if (m0->m_pkthdr.csum_flags &
   9043 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9044 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9045 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9046 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   9047 		} else {
   9048 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   9049 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   9050 			cksumcmd = 0;
   9051 			cksumfields = 0;
   9052 		}
   9053 
   9054 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   9055 
   9056 		/* Sync the DMA map. */
   9057 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9058 		    BUS_DMASYNC_PREWRITE);
   9059 
   9060 		/* Initialize the transmit descriptor. */
   9061 		for (nexttx = txq->txq_next, seg = 0;
   9062 		     seg < dmamap->dm_nsegs; seg++) {
   9063 			for (seglen = dmamap->dm_segs[seg].ds_len,
   9064 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   9065 			     seglen != 0;
   9066 			     curaddr += curlen, seglen -= curlen,
   9067 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   9068 				curlen = seglen;
   9069 
   9070 				/*
   9071 				 * So says the Linux driver:
   9072 				 * Work around for premature descriptor
   9073 				 * write-backs in TSO mode.  Append a
   9074 				 * 4-byte sentinel descriptor.
   9075 				 */
   9076 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   9077 				    curlen > 8)
   9078 					curlen -= 4;
   9079 
   9080 				wm_set_dma_addr(
   9081 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   9082 				txq->txq_descs[nexttx].wtx_cmdlen
   9083 				    = htole32(cksumcmd | curlen);
   9084 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   9085 				    = 0;
   9086 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   9087 				    = cksumfields;
   9088 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9089 				lasttx = nexttx;
   9090 
   9091 				DPRINTF(sc, WM_DEBUG_TX,
   9092 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   9093 					"len %#04zx\n",
   9094 					device_xname(sc->sc_dev), nexttx,
   9095 					(uint64_t)curaddr, curlen));
   9096 			}
   9097 		}
   9098 
   9099 		KASSERT(lasttx != -1);
   9100 
   9101 		/*
   9102 		 * Set up the command byte on the last descriptor of
   9103 		 * the packet. If we're in the interrupt delay window,
   9104 		 * delay the interrupt.
   9105 		 */
   9106 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9107 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9108 
   9109 		/*
   9110 		 * If VLANs are enabled and the packet has a VLAN tag, set
   9111 		 * up the descriptor to encapsulate the packet for us.
   9112 		 *
   9113 		 * This is only valid on the last descriptor of the packet.
   9114 		 */
   9115 		if (vlan_has_tag(m0)) {
   9116 			txq->txq_descs[lasttx].wtx_cmdlen |=
   9117 			    htole32(WTX_CMD_VLE);
   9118 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   9119 			    = htole16(vlan_get_tag(m0));
   9120 		}
   9121 
   9122 		txs->txs_lastdesc = lasttx;
   9123 
   9124 		DPRINTF(sc, WM_DEBUG_TX,
   9125 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9126 			device_xname(sc->sc_dev),
   9127 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9128 
   9129 		/* Sync the descriptors we're using. */
   9130 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9131 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9132 
   9133 		/* Give the packet to the chip. */
   9134 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9135 
   9136 		DPRINTF(sc, WM_DEBUG_TX,
   9137 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9138 
   9139 		DPRINTF(sc, WM_DEBUG_TX,
   9140 		    ("%s: TX: finished transmitting packet, job %d\n",
   9141 			device_xname(sc->sc_dev), txq->txq_snext));
   9142 
   9143 		/* Advance the tx pointer. */
   9144 		txq->txq_free -= txs->txs_ndesc;
   9145 		txq->txq_next = nexttx;
   9146 
   9147 		txq->txq_sfree--;
   9148 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9149 
   9150 		/* Pass the packet to any BPF listeners. */
   9151 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9152 	}
   9153 
   9154 	if (m0 != NULL) {
   9155 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9156 		WM_Q_EVCNT_INCR(txq, descdrop);
   9157 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9158 			__func__));
   9159 		m_freem(m0);
   9160 	}
   9161 
   9162 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9163 		/* No more slots; notify upper layer. */
   9164 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9165 	}
   9166 
   9167 	if (txq->txq_free != ofree) {
   9168 		/* Set a watchdog timer in case the chip flakes out. */
   9169 		txq->txq_lastsent = time_uptime;
   9170 		txq->txq_sending = true;
   9171 	}
   9172 }
   9173 
   9174 /*
   9175  * wm_nq_tx_offload:
   9176  *
   9177  *	Set up TCP/IP checksumming parameters for the
   9178  *	specified packet, for NEWQUEUE devices
   9179  */
   9180 static void
   9181 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   9182     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   9183 {
   9184 	struct mbuf *m0 = txs->txs_mbuf;
   9185 	uint32_t vl_len, mssidx, cmdc;
   9186 	struct ether_header *eh;
   9187 	int offset, iphl;
   9188 
   9189 	/*
   9190 	 * XXX It would be nice if the mbuf pkthdr had offset
   9191 	 * fields for the protocol headers.
   9192 	 */
   9193 	*cmdlenp = 0;
   9194 	*fieldsp = 0;
   9195 
   9196 	eh = mtod(m0, struct ether_header *);
   9197 	switch (htons(eh->ether_type)) {
   9198 	case ETHERTYPE_IP:
   9199 	case ETHERTYPE_IPV6:
   9200 		offset = ETHER_HDR_LEN;
   9201 		break;
   9202 
   9203 	case ETHERTYPE_VLAN:
   9204 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   9205 		break;
   9206 
   9207 	default:
   9208 		/* Don't support this protocol or encapsulation. */
   9209 		*do_csum = false;
   9210 		return;
   9211 	}
   9212 	*do_csum = true;
   9213 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   9214 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   9215 
   9216 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   9217 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   9218 
   9219 	if ((m0->m_pkthdr.csum_flags &
   9220 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   9221 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   9222 	} else {
   9223 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   9224 	}
   9225 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   9226 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   9227 
   9228 	if (vlan_has_tag(m0)) {
   9229 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   9230 		    << NQTXC_VLLEN_VLAN_SHIFT);
   9231 		*cmdlenp |= NQTX_CMD_VLE;
   9232 	}
   9233 
   9234 	mssidx = 0;
   9235 
   9236 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   9237 		int hlen = offset + iphl;
   9238 		int tcp_hlen;
   9239 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   9240 
   9241 		if (__predict_false(m0->m_len <
   9242 				    (hlen + sizeof(struct tcphdr)))) {
   9243 			/*
   9244 			 * TCP/IP headers are not in the first mbuf; we need
   9245 			 * to do this the slow and painful way. Let's just
   9246 			 * hope this doesn't happen very often.
   9247 			 */
   9248 			struct tcphdr th;
   9249 
   9250 			WM_Q_EVCNT_INCR(txq, tsopain);
   9251 
   9252 			m_copydata(m0, hlen, sizeof(th), &th);
   9253 			if (v4) {
   9254 				struct ip ip;
   9255 
   9256 				m_copydata(m0, offset, sizeof(ip), &ip);
   9257 				ip.ip_len = 0;
   9258 				m_copyback(m0,
   9259 				    offset + offsetof(struct ip, ip_len),
   9260 				    sizeof(ip.ip_len), &ip.ip_len);
   9261 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   9262 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   9263 			} else {
   9264 				struct ip6_hdr ip6;
   9265 
   9266 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   9267 				ip6.ip6_plen = 0;
   9268 				m_copyback(m0,
   9269 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   9270 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   9271 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   9272 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   9273 			}
   9274 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   9275 			    sizeof(th.th_sum), &th.th_sum);
   9276 
   9277 			tcp_hlen = th.th_off << 2;
   9278 		} else {
   9279 			/*
   9280 			 * TCP/IP headers are in the first mbuf; we can do
   9281 			 * this the easy way.
   9282 			 */
   9283 			struct tcphdr *th;
   9284 
   9285 			if (v4) {
   9286 				struct ip *ip =
   9287 				    (void *)(mtod(m0, char *) + offset);
   9288 				th = (void *)(mtod(m0, char *) + hlen);
   9289 
   9290 				ip->ip_len = 0;
   9291 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   9292 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   9293 			} else {
   9294 				struct ip6_hdr *ip6 =
   9295 				    (void *)(mtod(m0, char *) + offset);
   9296 				th = (void *)(mtod(m0, char *) + hlen);
   9297 
   9298 				ip6->ip6_plen = 0;
   9299 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   9300 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   9301 			}
   9302 			tcp_hlen = th->th_off << 2;
   9303 		}
   9304 		hlen += tcp_hlen;
   9305 		*cmdlenp |= NQTX_CMD_TSE;
   9306 
   9307 		if (v4) {
   9308 			WM_Q_EVCNT_INCR(txq, tso);
   9309 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   9310 		} else {
   9311 			WM_Q_EVCNT_INCR(txq, tso6);
   9312 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   9313 		}
   9314 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   9315 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9316 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   9317 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   9318 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   9319 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   9320 	} else {
   9321 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   9322 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9323 	}
   9324 
   9325 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   9326 		*fieldsp |= NQTXD_FIELDS_IXSM;
   9327 		cmdc |= NQTXC_CMD_IP4;
   9328 	}
   9329 
   9330 	if (m0->m_pkthdr.csum_flags &
   9331 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   9332 		WM_Q_EVCNT_INCR(txq, tusum);
   9333 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   9334 			cmdc |= NQTXC_CMD_TCP;
   9335 		else
   9336 			cmdc |= NQTXC_CMD_UDP;
   9337 
   9338 		cmdc |= NQTXC_CMD_IP4;
   9339 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9340 	}
   9341 	if (m0->m_pkthdr.csum_flags &
   9342 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   9343 		WM_Q_EVCNT_INCR(txq, tusum6);
   9344 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   9345 			cmdc |= NQTXC_CMD_TCP;
   9346 		else
   9347 			cmdc |= NQTXC_CMD_UDP;
   9348 
   9349 		cmdc |= NQTXC_CMD_IP6;
   9350 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9351 	}
   9352 
   9353 	/*
   9354 	 * We don't have to write context descriptor for every packet to
   9355 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   9356 	 * I210 and I211. It is enough to write once per a Tx queue for these
   9357 	 * controllers.
   9358 	 * It would be overhead to write context descriptor for every packet,
   9359 	 * however it does not cause problems.
   9360 	 */
   9361 	/* Fill in the context descriptor. */
   9362 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
   9363 	    htole32(vl_len);
   9364 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
   9365 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
   9366 	    htole32(cmdc);
   9367 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
   9368 	    htole32(mssidx);
   9369 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   9370 	DPRINTF(sc, WM_DEBUG_TX,
   9371 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   9372 		txq->txq_next, 0, vl_len));
   9373 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   9374 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   9375 	txs->txs_ndesc++;
   9376 }
   9377 
   9378 /*
   9379  * wm_nq_start:		[ifnet interface function]
   9380  *
   9381  *	Start packet transmission on the interface for NEWQUEUE devices
   9382  */
   9383 static void
   9384 wm_nq_start(struct ifnet *ifp)
   9385 {
   9386 	struct wm_softc *sc = ifp->if_softc;
   9387 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9388 
   9389 	KASSERT(if_is_mpsafe(ifp));
   9390 	/*
   9391 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   9392 	 */
   9393 
   9394 	mutex_enter(txq->txq_lock);
   9395 	if (!txq->txq_stopping)
   9396 		wm_nq_start_locked(ifp);
   9397 	mutex_exit(txq->txq_lock);
   9398 }
   9399 
   9400 static void
   9401 wm_nq_start_locked(struct ifnet *ifp)
   9402 {
   9403 	struct wm_softc *sc = ifp->if_softc;
   9404 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9405 
   9406 	wm_nq_send_common_locked(ifp, txq, false);
   9407 }
   9408 
   9409 static int
   9410 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   9411 {
   9412 	int qid;
   9413 	struct wm_softc *sc = ifp->if_softc;
   9414 	struct wm_txqueue *txq;
   9415 
   9416 	qid = wm_select_txqueue(ifp, m);
   9417 	txq = &sc->sc_queue[qid].wmq_txq;
   9418 
   9419 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   9420 		m_freem(m);
   9421 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   9422 		return ENOBUFS;
   9423 	}
   9424 
   9425 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   9426 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9427 	if (m->m_flags & M_MCAST)
   9428 		if_statinc_ref(nsr, if_omcasts);
   9429 	IF_STAT_PUTREF(ifp);
   9430 
   9431 	/*
   9432 	 * The situations which this mutex_tryenter() fails at running time
   9433 	 * are below two patterns.
   9434 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9435 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9436 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9437 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9438 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9439 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9440 	 * stuck, either.
   9441 	 */
   9442 	if (mutex_tryenter(txq->txq_lock)) {
   9443 		if (!txq->txq_stopping)
   9444 			wm_nq_transmit_locked(ifp, txq);
   9445 		mutex_exit(txq->txq_lock);
   9446 	}
   9447 
   9448 	return 0;
   9449 }
   9450 
   9451 static void
   9452 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9453 {
   9454 
   9455 	wm_nq_send_common_locked(ifp, txq, true);
   9456 }
   9457 
   9458 static void
   9459 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9460     bool is_transmit)
   9461 {
   9462 	struct wm_softc *sc = ifp->if_softc;
   9463 	struct mbuf *m0;
   9464 	struct wm_txsoft *txs;
   9465 	bus_dmamap_t dmamap;
   9466 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9467 	bool do_csum, sent;
   9468 	bool remap = true;
   9469 
   9470 	KASSERT(mutex_owned(txq->txq_lock));
   9471 	KASSERT(!txq->txq_stopping);
   9472 
   9473 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9474 		return;
   9475 
   9476 	if (__predict_false(wm_linkdown_discard(txq))) {
   9477 		do {
   9478 			if (is_transmit)
   9479 				m0 = pcq_get(txq->txq_interq);
   9480 			else
   9481 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9482 			/*
   9483 			 * increment successed packet counter as in the case
   9484 			 * which the packet is discarded by link down PHY.
   9485 			 */
   9486 			if (m0 != NULL) {
   9487 				if_statinc(ifp, if_opackets);
   9488 				m_freem(m0);
   9489 			}
   9490 		} while (m0 != NULL);
   9491 		return;
   9492 	}
   9493 
   9494 	sent = false;
   9495 
   9496 	/*
   9497 	 * Loop through the send queue, setting up transmit descriptors
   9498 	 * until we drain the queue, or use up all available transmit
   9499 	 * descriptors.
   9500 	 */
   9501 	for (;;) {
   9502 		m0 = NULL;
   9503 
   9504 		/* Get a work queue entry. */
   9505 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9506 			wm_txeof(txq, UINT_MAX);
   9507 			if (txq->txq_sfree == 0) {
   9508 				DPRINTF(sc, WM_DEBUG_TX,
   9509 				    ("%s: TX: no free job descriptors\n",
   9510 					device_xname(sc->sc_dev)));
   9511 				WM_Q_EVCNT_INCR(txq, txsstall);
   9512 				break;
   9513 			}
   9514 		}
   9515 
   9516 		/* Grab a packet off the queue. */
   9517 		if (is_transmit)
   9518 			m0 = pcq_get(txq->txq_interq);
   9519 		else
   9520 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9521 		if (m0 == NULL)
   9522 			break;
   9523 
   9524 		DPRINTF(sc, WM_DEBUG_TX,
   9525 		    ("%s: TX: have packet to transmit: %p\n",
   9526 			device_xname(sc->sc_dev), m0));
   9527 
   9528 		txs = &txq->txq_soft[txq->txq_snext];
   9529 		dmamap = txs->txs_dmamap;
   9530 
   9531 		/*
   9532 		 * Load the DMA map.  If this fails, the packet either
   9533 		 * didn't fit in the allotted number of segments, or we
   9534 		 * were short on resources.  For the too-many-segments
   9535 		 * case, we simply report an error and drop the packet,
   9536 		 * since we can't sanely copy a jumbo packet to a single
   9537 		 * buffer.
   9538 		 */
   9539 retry:
   9540 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9541 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9542 		if (__predict_false(error)) {
   9543 			if (error == EFBIG) {
   9544 				if (remap == true) {
   9545 					struct mbuf *m;
   9546 
   9547 					remap = false;
   9548 					m = m_defrag(m0, M_NOWAIT);
   9549 					if (m != NULL) {
   9550 						WM_Q_EVCNT_INCR(txq, defrag);
   9551 						m0 = m;
   9552 						goto retry;
   9553 					}
   9554 				}
   9555 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9556 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9557 				    "DMA segments, dropping...\n",
   9558 				    device_xname(sc->sc_dev));
   9559 				wm_dump_mbuf_chain(sc, m0);
   9560 				m_freem(m0);
   9561 				continue;
   9562 			}
   9563 			/* Short on resources, just stop for now. */
   9564 			DPRINTF(sc, WM_DEBUG_TX,
   9565 			    ("%s: TX: dmamap load failed: %d\n",
   9566 				device_xname(sc->sc_dev), error));
   9567 			break;
   9568 		}
   9569 
   9570 		segs_needed = dmamap->dm_nsegs;
   9571 
   9572 		/*
   9573 		 * Ensure we have enough descriptors free to describe
   9574 		 * the packet. Note, we always reserve one descriptor
   9575 		 * at the end of the ring due to the semantics of the
   9576 		 * TDT register, plus one more in the event we need
   9577 		 * to load offload context.
   9578 		 */
   9579 		if (segs_needed > txq->txq_free - 2) {
   9580 			/*
   9581 			 * Not enough free descriptors to transmit this
   9582 			 * packet.  We haven't committed anything yet,
   9583 			 * so just unload the DMA map, put the packet
   9584 			 * pack on the queue, and punt. Notify the upper
   9585 			 * layer that there are no more slots left.
   9586 			 */
   9587 			DPRINTF(sc, WM_DEBUG_TX,
   9588 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9589 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9590 				segs_needed, txq->txq_free - 1));
   9591 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9592 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9593 			WM_Q_EVCNT_INCR(txq, txdstall);
   9594 			break;
   9595 		}
   9596 
   9597 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9598 
   9599 		DPRINTF(sc, WM_DEBUG_TX,
   9600 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9601 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9602 
   9603 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9604 
   9605 		/*
   9606 		 * Store a pointer to the packet so that we can free it
   9607 		 * later.
   9608 		 *
   9609 		 * Initially, we consider the number of descriptors the
   9610 		 * packet uses the number of DMA segments.  This may be
   9611 		 * incremented by 1 if we do checksum offload (a descriptor
   9612 		 * is used to set the checksum context).
   9613 		 */
   9614 		txs->txs_mbuf = m0;
   9615 		txs->txs_firstdesc = txq->txq_next;
   9616 		txs->txs_ndesc = segs_needed;
   9617 
   9618 		/* Set up offload parameters for this packet. */
   9619 		uint32_t cmdlen, fields, dcmdlen;
   9620 		if (m0->m_pkthdr.csum_flags &
   9621 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9622 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9623 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9624 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9625 			    &do_csum);
   9626 		} else {
   9627 			do_csum = false;
   9628 			cmdlen = 0;
   9629 			fields = 0;
   9630 		}
   9631 
   9632 		/* Sync the DMA map. */
   9633 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9634 		    BUS_DMASYNC_PREWRITE);
   9635 
   9636 		/* Initialize the first transmit descriptor. */
   9637 		nexttx = txq->txq_next;
   9638 		if (!do_csum) {
   9639 			/* Set up a legacy descriptor */
   9640 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9641 			    dmamap->dm_segs[0].ds_addr);
   9642 			txq->txq_descs[nexttx].wtx_cmdlen =
   9643 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9644 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9645 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9646 			if (vlan_has_tag(m0)) {
   9647 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9648 				    htole32(WTX_CMD_VLE);
   9649 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9650 				    htole16(vlan_get_tag(m0));
   9651 			} else
   9652 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9653 
   9654 			dcmdlen = 0;
   9655 		} else {
   9656 			/* Set up an advanced data descriptor */
   9657 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9658 			    htole64(dmamap->dm_segs[0].ds_addr);
   9659 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9660 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9661 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9662 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9663 			    htole32(fields);
   9664 			DPRINTF(sc, WM_DEBUG_TX,
   9665 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9666 				device_xname(sc->sc_dev), nexttx,
   9667 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9668 			DPRINTF(sc, WM_DEBUG_TX,
   9669 			    ("\t 0x%08x%08x\n", fields,
   9670 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9671 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9672 		}
   9673 
   9674 		lasttx = nexttx;
   9675 		nexttx = WM_NEXTTX(txq, nexttx);
   9676 		/*
   9677 		 * Fill in the next descriptors. Legacy or advanced format
   9678 		 * is the same here.
   9679 		 */
   9680 		for (seg = 1; seg < dmamap->dm_nsegs;
   9681 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9682 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9683 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9684 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9685 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9686 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9687 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9688 			lasttx = nexttx;
   9689 
   9690 			DPRINTF(sc, WM_DEBUG_TX,
   9691 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9692 				device_xname(sc->sc_dev), nexttx,
   9693 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9694 				dmamap->dm_segs[seg].ds_len));
   9695 		}
   9696 
   9697 		KASSERT(lasttx != -1);
   9698 
   9699 		/*
   9700 		 * Set up the command byte on the last descriptor of
   9701 		 * the packet. If we're in the interrupt delay window,
   9702 		 * delay the interrupt.
   9703 		 */
   9704 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9705 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9706 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9707 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9708 
   9709 		txs->txs_lastdesc = lasttx;
   9710 
   9711 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9712 		    device_xname(sc->sc_dev),
   9713 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9714 
   9715 		/* Sync the descriptors we're using. */
   9716 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9717 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9718 
   9719 		/* Give the packet to the chip. */
   9720 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9721 		sent = true;
   9722 
   9723 		DPRINTF(sc, WM_DEBUG_TX,
   9724 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9725 
   9726 		DPRINTF(sc, WM_DEBUG_TX,
   9727 		    ("%s: TX: finished transmitting packet, job %d\n",
   9728 			device_xname(sc->sc_dev), txq->txq_snext));
   9729 
   9730 		/* Advance the tx pointer. */
   9731 		txq->txq_free -= txs->txs_ndesc;
   9732 		txq->txq_next = nexttx;
   9733 
   9734 		txq->txq_sfree--;
   9735 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9736 
   9737 		/* Pass the packet to any BPF listeners. */
   9738 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9739 	}
   9740 
   9741 	if (m0 != NULL) {
   9742 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9743 		WM_Q_EVCNT_INCR(txq, descdrop);
   9744 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9745 			__func__));
   9746 		m_freem(m0);
   9747 	}
   9748 
   9749 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9750 		/* No more slots; notify upper layer. */
   9751 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9752 	}
   9753 
   9754 	if (sent) {
   9755 		/* Set a watchdog timer in case the chip flakes out. */
   9756 		txq->txq_lastsent = time_uptime;
   9757 		txq->txq_sending = true;
   9758 	}
   9759 }
   9760 
   9761 static void
   9762 wm_deferred_start_locked(struct wm_txqueue *txq)
   9763 {
   9764 	struct wm_softc *sc = txq->txq_sc;
   9765 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9766 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9767 	int qid = wmq->wmq_id;
   9768 
   9769 	KASSERT(mutex_owned(txq->txq_lock));
   9770 	KASSERT(!txq->txq_stopping);
   9771 
   9772 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9773 		/* XXX need for ALTQ or one CPU system */
   9774 		if (qid == 0)
   9775 			wm_nq_start_locked(ifp);
   9776 		wm_nq_transmit_locked(ifp, txq);
   9777 	} else {
   9778 		/* XXX need for ALTQ or one CPU system */
   9779 		if (qid == 0)
   9780 			wm_start_locked(ifp);
   9781 		wm_transmit_locked(ifp, txq);
   9782 	}
   9783 }
   9784 
   9785 /* Interrupt */
   9786 
   9787 /*
   9788  * wm_txeof:
   9789  *
   9790  *	Helper; handle transmit interrupts.
   9791  */
   9792 static bool
   9793 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9794 {
   9795 	struct wm_softc *sc = txq->txq_sc;
   9796 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9797 	struct wm_txsoft *txs;
   9798 	int count = 0;
   9799 	int i;
   9800 	uint8_t status;
   9801 	bool more = false;
   9802 
   9803 	KASSERT(mutex_owned(txq->txq_lock));
   9804 
   9805 	if (txq->txq_stopping)
   9806 		return false;
   9807 
   9808 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9809 
   9810 	/*
   9811 	 * Go through the Tx list and free mbufs for those
   9812 	 * frames which have been transmitted.
   9813 	 */
   9814 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9815 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9816 		txs = &txq->txq_soft[i];
   9817 
   9818 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9819 			device_xname(sc->sc_dev), i));
   9820 
   9821 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9822 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9823 
   9824 		status =
   9825 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9826 		if ((status & WTX_ST_DD) == 0) {
   9827 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9828 			    BUS_DMASYNC_PREREAD);
   9829 			break;
   9830 		}
   9831 
   9832 		if (limit-- == 0) {
   9833 			more = true;
   9834 			DPRINTF(sc, WM_DEBUG_TX,
   9835 			    ("%s: TX: loop limited, job %d is not processed\n",
   9836 				device_xname(sc->sc_dev), i));
   9837 			break;
   9838 		}
   9839 
   9840 		count++;
   9841 		DPRINTF(sc, WM_DEBUG_TX,
   9842 		    ("%s: TX: job %d done: descs %d..%d\n",
   9843 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9844 		    txs->txs_lastdesc));
   9845 
   9846 #ifdef WM_EVENT_COUNTERS
   9847 		if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
   9848 			WM_Q_EVCNT_INCR(txq, underrun);
   9849 #endif /* WM_EVENT_COUNTERS */
   9850 
   9851 		/*
   9852 		 * 82574 and newer's document says the status field has neither
   9853 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9854 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9855 		 * Developer's Manual", 82574 datasheet and newer.
   9856 		 *
   9857 		 * XXX I saw the LC bit was set on I218 even though the media
   9858 		 * was full duplex, so the bit might be used for other
   9859 		 * meaning ...(I have no document).
   9860 		 */
   9861 
   9862 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9863 		    && ((sc->sc_type < WM_T_82574)
   9864 			|| (sc->sc_type == WM_T_80003))) {
   9865 			if_statinc(ifp, if_oerrors);
   9866 			if (status & WTX_ST_LC)
   9867 				log(LOG_WARNING, "%s: late collision\n",
   9868 				    device_xname(sc->sc_dev));
   9869 			else if (status & WTX_ST_EC) {
   9870 				if_statadd(ifp, if_collisions,
   9871 				    TX_COLLISION_THRESHOLD + 1);
   9872 				log(LOG_WARNING, "%s: excessive collisions\n",
   9873 				    device_xname(sc->sc_dev));
   9874 			}
   9875 		} else
   9876 			if_statinc(ifp, if_opackets);
   9877 
   9878 		txq->txq_packets++;
   9879 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9880 
   9881 		txq->txq_free += txs->txs_ndesc;
   9882 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9883 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9884 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9885 		m_freem(txs->txs_mbuf);
   9886 		txs->txs_mbuf = NULL;
   9887 	}
   9888 
   9889 	/* Update the dirty transmit buffer pointer. */
   9890 	txq->txq_sdirty = i;
   9891 	DPRINTF(sc, WM_DEBUG_TX,
   9892 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9893 
   9894 	if (count != 0)
   9895 		rnd_add_uint32(&sc->rnd_source, count);
   9896 
   9897 	/*
   9898 	 * If there are no more pending transmissions, cancel the watchdog
   9899 	 * timer.
   9900 	 */
   9901 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9902 		txq->txq_sending = false;
   9903 
   9904 	return more;
   9905 }
   9906 
   9907 static inline uint32_t
   9908 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9909 {
   9910 	struct wm_softc *sc = rxq->rxq_sc;
   9911 
   9912 	if (sc->sc_type == WM_T_82574)
   9913 		return EXTRXC_STATUS(
   9914 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9915 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9916 		return NQRXC_STATUS(
   9917 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9918 	else
   9919 		return rxq->rxq_descs[idx].wrx_status;
   9920 }
   9921 
   9922 static inline uint32_t
   9923 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9924 {
   9925 	struct wm_softc *sc = rxq->rxq_sc;
   9926 
   9927 	if (sc->sc_type == WM_T_82574)
   9928 		return EXTRXC_ERROR(
   9929 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9930 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9931 		return NQRXC_ERROR(
   9932 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9933 	else
   9934 		return rxq->rxq_descs[idx].wrx_errors;
   9935 }
   9936 
   9937 static inline uint16_t
   9938 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9939 {
   9940 	struct wm_softc *sc = rxq->rxq_sc;
   9941 
   9942 	if (sc->sc_type == WM_T_82574)
   9943 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9944 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9945 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9946 	else
   9947 		return rxq->rxq_descs[idx].wrx_special;
   9948 }
   9949 
   9950 static inline int
   9951 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9952 {
   9953 	struct wm_softc *sc = rxq->rxq_sc;
   9954 
   9955 	if (sc->sc_type == WM_T_82574)
   9956 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9957 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9958 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9959 	else
   9960 		return rxq->rxq_descs[idx].wrx_len;
   9961 }
   9962 
   9963 #ifdef WM_DEBUG
   9964 static inline uint32_t
   9965 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9966 {
   9967 	struct wm_softc *sc = rxq->rxq_sc;
   9968 
   9969 	if (sc->sc_type == WM_T_82574)
   9970 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9971 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9972 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9973 	else
   9974 		return 0;
   9975 }
   9976 
   9977 static inline uint8_t
   9978 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9979 {
   9980 	struct wm_softc *sc = rxq->rxq_sc;
   9981 
   9982 	if (sc->sc_type == WM_T_82574)
   9983 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9984 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9985 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9986 	else
   9987 		return 0;
   9988 }
   9989 #endif /* WM_DEBUG */
   9990 
   9991 static inline bool
   9992 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9993     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9994 {
   9995 
   9996 	if (sc->sc_type == WM_T_82574)
   9997 		return (status & ext_bit) != 0;
   9998 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9999 		return (status & nq_bit) != 0;
   10000 	else
   10001 		return (status & legacy_bit) != 0;
   10002 }
   10003 
   10004 static inline bool
   10005 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   10006     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   10007 {
   10008 
   10009 	if (sc->sc_type == WM_T_82574)
   10010 		return (error & ext_bit) != 0;
   10011 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10012 		return (error & nq_bit) != 0;
   10013 	else
   10014 		return (error & legacy_bit) != 0;
   10015 }
   10016 
   10017 static inline bool
   10018 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   10019 {
   10020 
   10021 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10022 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   10023 		return true;
   10024 	else
   10025 		return false;
   10026 }
   10027 
   10028 static inline bool
   10029 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   10030 {
   10031 	struct wm_softc *sc = rxq->rxq_sc;
   10032 
   10033 	/* XXX missing error bit for newqueue? */
   10034 	if (wm_rxdesc_is_set_error(sc, errors,
   10035 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   10036 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   10037 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   10038 		NQRXC_ERROR_RXE)) {
   10039 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   10040 		    EXTRXC_ERROR_SE, 0))
   10041 			log(LOG_WARNING, "%s: symbol error\n",
   10042 			    device_xname(sc->sc_dev));
   10043 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   10044 		    EXTRXC_ERROR_SEQ, 0))
   10045 			log(LOG_WARNING, "%s: receive sequence error\n",
   10046 			    device_xname(sc->sc_dev));
   10047 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   10048 		    EXTRXC_ERROR_CE, 0))
   10049 			log(LOG_WARNING, "%s: CRC error\n",
   10050 			    device_xname(sc->sc_dev));
   10051 		return true;
   10052 	}
   10053 
   10054 	return false;
   10055 }
   10056 
   10057 static inline bool
   10058 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   10059 {
   10060 	struct wm_softc *sc = rxq->rxq_sc;
   10061 
   10062 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   10063 		NQRXC_STATUS_DD)) {
   10064 		/* We have processed all of the receive descriptors. */
   10065 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   10066 		return false;
   10067 	}
   10068 
   10069 	return true;
   10070 }
   10071 
   10072 static inline bool
   10073 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   10074     uint16_t vlantag, struct mbuf *m)
   10075 {
   10076 
   10077 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10078 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   10079 		vlan_set_tag(m, le16toh(vlantag));
   10080 	}
   10081 
   10082 	return true;
   10083 }
   10084 
   10085 static inline void
   10086 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   10087     uint32_t errors, struct mbuf *m)
   10088 {
   10089 	struct wm_softc *sc = rxq->rxq_sc;
   10090 
   10091 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   10092 		if (wm_rxdesc_is_set_status(sc, status,
   10093 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   10094 			WM_Q_EVCNT_INCR(rxq, ipsum);
   10095 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   10096 			if (wm_rxdesc_is_set_error(sc, errors,
   10097 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   10098 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   10099 		}
   10100 		if (wm_rxdesc_is_set_status(sc, status,
   10101 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   10102 			/*
   10103 			 * Note: we don't know if this was TCP or UDP,
   10104 			 * so we just set both bits, and expect the
   10105 			 * upper layers to deal.
   10106 			 */
   10107 			WM_Q_EVCNT_INCR(rxq, tusum);
   10108 			m->m_pkthdr.csum_flags |=
   10109 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   10110 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   10111 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   10112 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   10113 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   10114 		}
   10115 	}
   10116 }
   10117 
   10118 /*
   10119  * wm_rxeof:
   10120  *
   10121  *	Helper; handle receive interrupts.
   10122  */
   10123 static bool
   10124 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   10125 {
   10126 	struct wm_softc *sc = rxq->rxq_sc;
   10127 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10128 	struct wm_rxsoft *rxs;
   10129 	struct mbuf *m;
   10130 	int i, len;
   10131 	int count = 0;
   10132 	uint32_t status, errors;
   10133 	uint16_t vlantag;
   10134 	bool more = false;
   10135 
   10136 	KASSERT(mutex_owned(rxq->rxq_lock));
   10137 
   10138 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   10139 		rxs = &rxq->rxq_soft[i];
   10140 
   10141 		DPRINTF(sc, WM_DEBUG_RX,
   10142 		    ("%s: RX: checking descriptor %d\n",
   10143 			device_xname(sc->sc_dev), i));
   10144 		wm_cdrxsync(rxq, i,
   10145 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   10146 
   10147 		status = wm_rxdesc_get_status(rxq, i);
   10148 		errors = wm_rxdesc_get_errors(rxq, i);
   10149 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   10150 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   10151 #ifdef WM_DEBUG
   10152 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   10153 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   10154 #endif
   10155 
   10156 		if (!wm_rxdesc_dd(rxq, i, status))
   10157 			break;
   10158 
   10159 		if (limit-- == 0) {
   10160 			more = true;
   10161 			DPRINTF(sc, WM_DEBUG_RX,
   10162 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   10163 				device_xname(sc->sc_dev), i));
   10164 			break;
   10165 		}
   10166 
   10167 		count++;
   10168 		if (__predict_false(rxq->rxq_discard)) {
   10169 			DPRINTF(sc, WM_DEBUG_RX,
   10170 			    ("%s: RX: discarding contents of descriptor %d\n",
   10171 				device_xname(sc->sc_dev), i));
   10172 			wm_init_rxdesc(rxq, i);
   10173 			if (wm_rxdesc_is_eop(rxq, status)) {
   10174 				/* Reset our state. */
   10175 				DPRINTF(sc, WM_DEBUG_RX,
   10176 				    ("%s: RX: resetting rxdiscard -> 0\n",
   10177 					device_xname(sc->sc_dev)));
   10178 				rxq->rxq_discard = 0;
   10179 			}
   10180 			continue;
   10181 		}
   10182 
   10183 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10184 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   10185 
   10186 		m = rxs->rxs_mbuf;
   10187 
   10188 		/*
   10189 		 * Add a new receive buffer to the ring, unless of
   10190 		 * course the length is zero. Treat the latter as a
   10191 		 * failed mapping.
   10192 		 */
   10193 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   10194 			/*
   10195 			 * Failed, throw away what we've done so
   10196 			 * far, and discard the rest of the packet.
   10197 			 */
   10198 			if_statinc(ifp, if_ierrors);
   10199 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10200 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   10201 			wm_init_rxdesc(rxq, i);
   10202 			if (!wm_rxdesc_is_eop(rxq, status))
   10203 				rxq->rxq_discard = 1;
   10204 			if (rxq->rxq_head != NULL)
   10205 				m_freem(rxq->rxq_head);
   10206 			WM_RXCHAIN_RESET(rxq);
   10207 			DPRINTF(sc, WM_DEBUG_RX,
   10208 			    ("%s: RX: Rx buffer allocation failed, "
   10209 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   10210 				rxq->rxq_discard ? " (discard)" : ""));
   10211 			continue;
   10212 		}
   10213 
   10214 		m->m_len = len;
   10215 		rxq->rxq_len += len;
   10216 		DPRINTF(sc, WM_DEBUG_RX,
   10217 		    ("%s: RX: buffer at %p len %d\n",
   10218 			device_xname(sc->sc_dev), m->m_data, len));
   10219 
   10220 		/* If this is not the end of the packet, keep looking. */
   10221 		if (!wm_rxdesc_is_eop(rxq, status)) {
   10222 			WM_RXCHAIN_LINK(rxq, m);
   10223 			DPRINTF(sc, WM_DEBUG_RX,
   10224 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   10225 				device_xname(sc->sc_dev), rxq->rxq_len));
   10226 			continue;
   10227 		}
   10228 
   10229 		/*
   10230 		 * Okay, we have the entire packet now. The chip is
   10231 		 * configured to include the FCS except I35[04], I21[01].
   10232 		 * (not all chips can be configured to strip it), so we need
   10233 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   10234 		 * in RCTL register is always set, so we don't trim it.
   10235 		 * PCH2 and newer chip also not include FCS when jumbo
   10236 		 * frame is used to do workaround an errata.
   10237 		 * May need to adjust length of previous mbuf in the
   10238 		 * chain if the current mbuf is too short.
   10239 		 */
   10240 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   10241 			if (m->m_len < ETHER_CRC_LEN) {
   10242 				rxq->rxq_tail->m_len
   10243 				    -= (ETHER_CRC_LEN - m->m_len);
   10244 				m->m_len = 0;
   10245 			} else
   10246 				m->m_len -= ETHER_CRC_LEN;
   10247 			len = rxq->rxq_len - ETHER_CRC_LEN;
   10248 		} else
   10249 			len = rxq->rxq_len;
   10250 
   10251 		WM_RXCHAIN_LINK(rxq, m);
   10252 
   10253 		*rxq->rxq_tailp = NULL;
   10254 		m = rxq->rxq_head;
   10255 
   10256 		WM_RXCHAIN_RESET(rxq);
   10257 
   10258 		DPRINTF(sc, WM_DEBUG_RX,
   10259 		    ("%s: RX: have entire packet, len -> %d\n",
   10260 			device_xname(sc->sc_dev), len));
   10261 
   10262 		/* If an error occurred, update stats and drop the packet. */
   10263 		if (wm_rxdesc_has_errors(rxq, errors)) {
   10264 			m_freem(m);
   10265 			continue;
   10266 		}
   10267 
   10268 		/* No errors.  Receive the packet. */
   10269 		m_set_rcvif(m, ifp);
   10270 		m->m_pkthdr.len = len;
   10271 		/*
   10272 		 * TODO
   10273 		 * should be save rsshash and rsstype to this mbuf.
   10274 		 */
   10275 		DPRINTF(sc, WM_DEBUG_RX,
   10276 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   10277 			device_xname(sc->sc_dev), rsstype, rsshash));
   10278 
   10279 		/*
   10280 		 * If VLANs are enabled, VLAN packets have been unwrapped
   10281 		 * for us.  Associate the tag with the packet.
   10282 		 */
   10283 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   10284 			continue;
   10285 
   10286 		/* Set up checksum info for this packet. */
   10287 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   10288 
   10289 		rxq->rxq_packets++;
   10290 		rxq->rxq_bytes += len;
   10291 		/* Pass it on. */
   10292 		if_percpuq_enqueue(sc->sc_ipq, m);
   10293 
   10294 		if (rxq->rxq_stopping)
   10295 			break;
   10296 	}
   10297 	rxq->rxq_ptr = i;
   10298 
   10299 	if (count != 0)
   10300 		rnd_add_uint32(&sc->rnd_source, count);
   10301 
   10302 	DPRINTF(sc, WM_DEBUG_RX,
   10303 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   10304 
   10305 	return more;
   10306 }
   10307 
   10308 /*
   10309  * wm_linkintr_gmii:
   10310  *
   10311  *	Helper; handle link interrupts for GMII.
   10312  */
   10313 static void
   10314 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   10315 {
   10316 	device_t dev = sc->sc_dev;
   10317 	uint32_t status, reg;
   10318 	bool link;
   10319 	bool dopoll = true;
   10320 	int rv;
   10321 
   10322 	KASSERT(mutex_owned(sc->sc_core_lock));
   10323 
   10324 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   10325 		__func__));
   10326 
   10327 	if ((icr & ICR_LSC) == 0) {
   10328 		if (icr & ICR_RXSEQ)
   10329 			DPRINTF(sc, WM_DEBUG_LINK,
   10330 			    ("%s: LINK Receive sequence error\n",
   10331 				device_xname(dev)));
   10332 		return;
   10333 	}
   10334 
   10335 	/* Link status changed */
   10336 	status = CSR_READ(sc, WMREG_STATUS);
   10337 	link = status & STATUS_LU;
   10338 	if (link) {
   10339 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10340 			device_xname(dev),
   10341 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10342 		if (wm_phy_need_linkdown_discard(sc)) {
   10343 			DPRINTF(sc, WM_DEBUG_LINK,
   10344 			    ("%s: linkintr: Clear linkdown discard flag\n",
   10345 				device_xname(dev)));
   10346 			wm_clear_linkdown_discard(sc);
   10347 		}
   10348 	} else {
   10349 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10350 			device_xname(dev)));
   10351 		if (wm_phy_need_linkdown_discard(sc)) {
   10352 			DPRINTF(sc, WM_DEBUG_LINK,
   10353 			    ("%s: linkintr: Set linkdown discard flag\n",
   10354 				device_xname(dev)));
   10355 			wm_set_linkdown_discard(sc);
   10356 		}
   10357 	}
   10358 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   10359 		wm_gig_downshift_workaround_ich8lan(sc);
   10360 
   10361 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   10362 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   10363 
   10364 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   10365 		device_xname(dev)));
   10366 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   10367 		if (link) {
   10368 			/*
   10369 			 * To workaround the problem, it's required to wait
   10370 			 * several hundred miliseconds. The time depend
   10371 			 * on the environment. Wait 1 second for the safety.
   10372 			 */
   10373 			dopoll = false;
   10374 			getmicrotime(&sc->sc_linkup_delay_time);
   10375 			sc->sc_linkup_delay_time.tv_sec += 1;
   10376 		} else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   10377 			/*
   10378 			 * Simplify by checking tv_sec only. It's enough.
   10379 			 *
   10380 			 * Currently, it's not required to clear the time.
   10381 			 * It's just to know the timer is stopped
   10382 			 * (for debugging).
   10383 			 */
   10384 
   10385 			sc->sc_linkup_delay_time.tv_sec = 0;
   10386 			sc->sc_linkup_delay_time.tv_usec = 0;
   10387 		}
   10388 	}
   10389 
   10390 	/*
   10391 	 * Call mii_pollstat().
   10392 	 *
   10393 	 * Some (not all) systems use I35[04] or I21[01] don't send packet soon
   10394 	 * after linkup. The MAC send a packet to the PHY and any error is not
   10395 	 * observed. This behavior causes a problem that gratuitous ARP and/or
   10396 	 * IPv6 DAD packet are silently dropped. To avoid this problem, don't
   10397 	 * call mii_pollstat() here which will send LINK_STATE_UP notification
   10398 	 * to the upper layer. Instead, mii_pollstat() will be called in
   10399 	 * wm_gmii_mediastatus() or mii_tick() will be called in wm_tick().
   10400 	 */
   10401 	if (dopoll)
   10402 		mii_pollstat(&sc->sc_mii);
   10403 
   10404 	/* Do some workarounds soon after link status is changed. */
   10405 
   10406 	if (sc->sc_type == WM_T_82543) {
   10407 		int miistatus, active;
   10408 
   10409 		/*
   10410 		 * With 82543, we need to force speed and
   10411 		 * duplex on the MAC equal to what the PHY
   10412 		 * speed and duplex configuration is.
   10413 		 */
   10414 		miistatus = sc->sc_mii.mii_media_status;
   10415 
   10416 		if (miistatus & IFM_ACTIVE) {
   10417 			active = sc->sc_mii.mii_media_active;
   10418 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10419 			switch (IFM_SUBTYPE(active)) {
   10420 			case IFM_10_T:
   10421 				sc->sc_ctrl |= CTRL_SPEED_10;
   10422 				break;
   10423 			case IFM_100_TX:
   10424 				sc->sc_ctrl |= CTRL_SPEED_100;
   10425 				break;
   10426 			case IFM_1000_T:
   10427 				sc->sc_ctrl |= CTRL_SPEED_1000;
   10428 				break;
   10429 			default:
   10430 				/*
   10431 				 * Fiber?
   10432 				 * Shoud not enter here.
   10433 				 */
   10434 				device_printf(dev, "unknown media (%x)\n",
   10435 				    active);
   10436 				break;
   10437 			}
   10438 			if (active & IFM_FDX)
   10439 				sc->sc_ctrl |= CTRL_FD;
   10440 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10441 		}
   10442 	} else if (sc->sc_type == WM_T_PCH) {
   10443 		wm_k1_gig_workaround_hv(sc,
   10444 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10445 	}
   10446 
   10447 	/*
   10448 	 * When connected at 10Mbps half-duplex, some parts are excessively
   10449 	 * aggressive resulting in many collisions. To avoid this, increase
   10450 	 * the IPG and reduce Rx latency in the PHY.
   10451 	 */
   10452 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   10453 	    && link) {
   10454 		uint32_t tipg_reg;
   10455 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   10456 		bool fdx;
   10457 		uint16_t emi_addr, emi_val;
   10458 
   10459 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   10460 		tipg_reg &= ~TIPG_IPGT_MASK;
   10461 		fdx = status & STATUS_FD;
   10462 
   10463 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10464 			tipg_reg |= 0xff;
   10465 			/* Reduce Rx latency in analog PHY */
   10466 			emi_val = 0;
   10467 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10468 		    fdx && speed != STATUS_SPEED_1000) {
   10469 			tipg_reg |= 0xc;
   10470 			emi_val = 1;
   10471 		} else {
   10472 			/* Roll back the default values */
   10473 			tipg_reg |= 0x08;
   10474 			emi_val = 1;
   10475 		}
   10476 
   10477 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10478 
   10479 		rv = sc->phy.acquire(sc);
   10480 		if (rv)
   10481 			return;
   10482 
   10483 		if (sc->sc_type == WM_T_PCH2)
   10484 			emi_addr = I82579_RX_CONFIG;
   10485 		else
   10486 			emi_addr = I217_RX_CONFIG;
   10487 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10488 
   10489 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10490 			uint16_t phy_reg;
   10491 
   10492 			sc->phy.readreg_locked(dev, 2,
   10493 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10494 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10495 			if (speed == STATUS_SPEED_100
   10496 			    || speed == STATUS_SPEED_10)
   10497 				phy_reg |= 0x3e8;
   10498 			else
   10499 				phy_reg |= 0xfa;
   10500 			sc->phy.writereg_locked(dev, 2,
   10501 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10502 
   10503 			if (speed == STATUS_SPEED_1000) {
   10504 				sc->phy.readreg_locked(dev, 2,
   10505 				    HV_PM_CTRL, &phy_reg);
   10506 
   10507 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10508 
   10509 				sc->phy.writereg_locked(dev, 2,
   10510 				    HV_PM_CTRL, phy_reg);
   10511 			}
   10512 		}
   10513 		sc->phy.release(sc);
   10514 
   10515 		if (rv)
   10516 			return;
   10517 
   10518 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10519 			uint16_t data, ptr_gap;
   10520 
   10521 			if (speed == STATUS_SPEED_1000) {
   10522 				rv = sc->phy.acquire(sc);
   10523 				if (rv)
   10524 					return;
   10525 
   10526 				rv = sc->phy.readreg_locked(dev, 2,
   10527 				    I82579_UNKNOWN1, &data);
   10528 				if (rv) {
   10529 					sc->phy.release(sc);
   10530 					return;
   10531 				}
   10532 
   10533 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10534 				if (ptr_gap < 0x18) {
   10535 					data &= ~(0x3ff << 2);
   10536 					data |= (0x18 << 2);
   10537 					rv = sc->phy.writereg_locked(dev,
   10538 					    2, I82579_UNKNOWN1, data);
   10539 				}
   10540 				sc->phy.release(sc);
   10541 				if (rv)
   10542 					return;
   10543 			} else {
   10544 				rv = sc->phy.acquire(sc);
   10545 				if (rv)
   10546 					return;
   10547 
   10548 				rv = sc->phy.writereg_locked(dev, 2,
   10549 				    I82579_UNKNOWN1, 0xc023);
   10550 				sc->phy.release(sc);
   10551 				if (rv)
   10552 					return;
   10553 
   10554 			}
   10555 		}
   10556 	}
   10557 
   10558 	/*
   10559 	 * I217 Packet Loss issue:
   10560 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10561 	 * on power up.
   10562 	 * Set the Beacon Duration for I217 to 8 usec
   10563 	 */
   10564 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10565 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10566 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10567 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10568 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10569 	}
   10570 
   10571 	/* Work-around I218 hang issue */
   10572 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10573 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10574 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10575 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10576 		wm_k1_workaround_lpt_lp(sc, link);
   10577 
   10578 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10579 		/*
   10580 		 * Set platform power management values for Latency
   10581 		 * Tolerance Reporting (LTR)
   10582 		 */
   10583 		wm_platform_pm_pch_lpt(sc,
   10584 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10585 	}
   10586 
   10587 	/* Clear link partner's EEE ability */
   10588 	sc->eee_lp_ability = 0;
   10589 
   10590 	/* FEXTNVM6 K1-off workaround */
   10591 	if (sc->sc_type == WM_T_PCH_SPT) {
   10592 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10593 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10594 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10595 		else
   10596 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10597 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10598 	}
   10599 
   10600 	if (!link)
   10601 		return;
   10602 
   10603 	switch (sc->sc_type) {
   10604 	case WM_T_PCH2:
   10605 		wm_k1_workaround_lv(sc);
   10606 		/* FALLTHROUGH */
   10607 	case WM_T_PCH:
   10608 		if (sc->sc_phytype == WMPHY_82578)
   10609 			wm_link_stall_workaround_hv(sc);
   10610 		break;
   10611 	default:
   10612 		break;
   10613 	}
   10614 
   10615 	/* Enable/Disable EEE after link up */
   10616 	if (sc->sc_phytype > WMPHY_82579)
   10617 		wm_set_eee_pchlan(sc);
   10618 }
   10619 
   10620 /*
   10621  * wm_linkintr_tbi:
   10622  *
   10623  *	Helper; handle link interrupts for TBI mode.
   10624  */
   10625 static void
   10626 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10627 {
   10628 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10629 	uint32_t status;
   10630 
   10631 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10632 		__func__));
   10633 
   10634 	status = CSR_READ(sc, WMREG_STATUS);
   10635 	if (icr & ICR_LSC) {
   10636 		wm_check_for_link(sc);
   10637 		if (status & STATUS_LU) {
   10638 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10639 				device_xname(sc->sc_dev),
   10640 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10641 			/*
   10642 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10643 			 * so we should update sc->sc_ctrl
   10644 			 */
   10645 
   10646 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10647 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10648 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10649 			if (status & STATUS_FD)
   10650 				sc->sc_tctl |=
   10651 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10652 			else
   10653 				sc->sc_tctl |=
   10654 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10655 			if (sc->sc_ctrl & CTRL_TFCE)
   10656 				sc->sc_fcrtl |= FCRTL_XONE;
   10657 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10658 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10659 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10660 			sc->sc_tbi_linkup = 1;
   10661 			if_link_state_change(ifp, LINK_STATE_UP);
   10662 		} else {
   10663 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10664 				device_xname(sc->sc_dev)));
   10665 			sc->sc_tbi_linkup = 0;
   10666 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10667 		}
   10668 		/* Update LED */
   10669 		wm_tbi_serdes_set_linkled(sc);
   10670 	} else if (icr & ICR_RXSEQ)
   10671 		DPRINTF(sc, WM_DEBUG_LINK,
   10672 		    ("%s: LINK: Receive sequence error\n",
   10673 			device_xname(sc->sc_dev)));
   10674 }
   10675 
   10676 /*
   10677  * wm_linkintr_serdes:
   10678  *
   10679  *	Helper; handle link interrupts for TBI mode.
   10680  */
   10681 static void
   10682 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10683 {
   10684 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10685 	struct mii_data *mii = &sc->sc_mii;
   10686 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10687 	uint32_t pcs_adv, pcs_lpab, reg;
   10688 
   10689 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10690 		__func__));
   10691 
   10692 	if (icr & ICR_LSC) {
   10693 		/* Check PCS */
   10694 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10695 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10696 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10697 				device_xname(sc->sc_dev)));
   10698 			mii->mii_media_status |= IFM_ACTIVE;
   10699 			sc->sc_tbi_linkup = 1;
   10700 			if_link_state_change(ifp, LINK_STATE_UP);
   10701 		} else {
   10702 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10703 				device_xname(sc->sc_dev)));
   10704 			mii->mii_media_status |= IFM_NONE;
   10705 			sc->sc_tbi_linkup = 0;
   10706 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10707 			wm_tbi_serdes_set_linkled(sc);
   10708 			return;
   10709 		}
   10710 		mii->mii_media_active |= IFM_1000_SX;
   10711 		if ((reg & PCS_LSTS_FDX) != 0)
   10712 			mii->mii_media_active |= IFM_FDX;
   10713 		else
   10714 			mii->mii_media_active |= IFM_HDX;
   10715 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10716 			/* Check flow */
   10717 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10718 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10719 				DPRINTF(sc, WM_DEBUG_LINK,
   10720 				    ("XXX LINKOK but not ACOMP\n"));
   10721 				return;
   10722 			}
   10723 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10724 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10725 			DPRINTF(sc, WM_DEBUG_LINK,
   10726 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10727 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10728 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10729 				mii->mii_media_active |= IFM_FLOW
   10730 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10731 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10732 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10733 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10734 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10735 				mii->mii_media_active |= IFM_FLOW
   10736 				    | IFM_ETH_TXPAUSE;
   10737 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10738 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10739 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10740 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10741 				mii->mii_media_active |= IFM_FLOW
   10742 				    | IFM_ETH_RXPAUSE;
   10743 		}
   10744 		/* Update LED */
   10745 		wm_tbi_serdes_set_linkled(sc);
   10746 	} else
   10747 		DPRINTF(sc, WM_DEBUG_LINK,
   10748 		    ("%s: LINK: Receive sequence error\n",
   10749 		    device_xname(sc->sc_dev)));
   10750 }
   10751 
   10752 /*
   10753  * wm_linkintr:
   10754  *
   10755  *	Helper; handle link interrupts.
   10756  */
   10757 static void
   10758 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10759 {
   10760 
   10761 	KASSERT(mutex_owned(sc->sc_core_lock));
   10762 
   10763 	if (sc->sc_flags & WM_F_HAS_MII)
   10764 		wm_linkintr_gmii(sc, icr);
   10765 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10766 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10767 		wm_linkintr_serdes(sc, icr);
   10768 	else
   10769 		wm_linkintr_tbi(sc, icr);
   10770 }
   10771 
   10772 
   10773 static inline void
   10774 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10775 {
   10776 
   10777 	if (wmq->wmq_txrx_use_workqueue) {
   10778 		if (!wmq->wmq_wq_enqueued) {
   10779 			wmq->wmq_wq_enqueued = true;
   10780 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
   10781 			    curcpu());
   10782 		}
   10783 	} else
   10784 		softint_schedule(wmq->wmq_si);
   10785 }
   10786 
   10787 static inline void
   10788 wm_legacy_intr_disable(struct wm_softc *sc)
   10789 {
   10790 
   10791 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10792 }
   10793 
   10794 static inline void
   10795 wm_legacy_intr_enable(struct wm_softc *sc)
   10796 {
   10797 
   10798 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10799 }
   10800 
   10801 /*
   10802  * wm_intr_legacy:
   10803  *
   10804  *	Interrupt service routine for INTx and MSI.
   10805  */
   10806 static int
   10807 wm_intr_legacy(void *arg)
   10808 {
   10809 	struct wm_softc *sc = arg;
   10810 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10811 	struct wm_queue *wmq = &sc->sc_queue[0];
   10812 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10813 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10814 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10815 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10816 	uint32_t icr, rndval = 0;
   10817 	bool more = false;
   10818 
   10819 	icr = CSR_READ(sc, WMREG_ICR);
   10820 	if ((icr & sc->sc_icr) == 0)
   10821 		return 0;
   10822 
   10823 	DPRINTF(sc, WM_DEBUG_TX,
   10824 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10825 	if (rndval == 0)
   10826 		rndval = icr;
   10827 
   10828 	mutex_enter(txq->txq_lock);
   10829 
   10830 	if (txq->txq_stopping) {
   10831 		mutex_exit(txq->txq_lock);
   10832 		return 1;
   10833 	}
   10834 
   10835 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10836 	if (icr & ICR_TXDW) {
   10837 		DPRINTF(sc, WM_DEBUG_TX,
   10838 		    ("%s: TX: got TXDW interrupt\n",
   10839 			device_xname(sc->sc_dev)));
   10840 		WM_Q_EVCNT_INCR(txq, txdw);
   10841 	}
   10842 #endif
   10843 	if (txlimit > 0) {
   10844 		more |= wm_txeof(txq, txlimit);
   10845 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10846 			more = true;
   10847 	} else
   10848 		more = true;
   10849 	mutex_exit(txq->txq_lock);
   10850 
   10851 	mutex_enter(rxq->rxq_lock);
   10852 
   10853 	if (rxq->rxq_stopping) {
   10854 		mutex_exit(rxq->rxq_lock);
   10855 		return 1;
   10856 	}
   10857 
   10858 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10859 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10860 		DPRINTF(sc, WM_DEBUG_RX,
   10861 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10862 			device_xname(sc->sc_dev),
   10863 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10864 		WM_Q_EVCNT_INCR(rxq, intr);
   10865 	}
   10866 #endif
   10867 	if (rxlimit > 0) {
   10868 		/*
   10869 		 * wm_rxeof() does *not* call upper layer functions directly,
   10870 		 * as if_percpuq_enqueue() just call softint_schedule().
   10871 		 * So, we can call wm_rxeof() in interrupt context.
   10872 		 */
   10873 		more = wm_rxeof(rxq, rxlimit);
   10874 	} else
   10875 		more = true;
   10876 
   10877 	mutex_exit(rxq->rxq_lock);
   10878 
   10879 	mutex_enter(sc->sc_core_lock);
   10880 
   10881 	if (sc->sc_core_stopping) {
   10882 		mutex_exit(sc->sc_core_lock);
   10883 		return 1;
   10884 	}
   10885 
   10886 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10887 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10888 		wm_linkintr(sc, icr);
   10889 	}
   10890 	if ((icr & ICR_GPI(0)) != 0)
   10891 		device_printf(sc->sc_dev, "got module interrupt\n");
   10892 
   10893 	mutex_exit(sc->sc_core_lock);
   10894 
   10895 	if (icr & ICR_RXO) {
   10896 #if defined(WM_DEBUG)
   10897 		log(LOG_WARNING, "%s: Receive overrun\n",
   10898 		    device_xname(sc->sc_dev));
   10899 #endif /* defined(WM_DEBUG) */
   10900 	}
   10901 
   10902 	rnd_add_uint32(&sc->rnd_source, rndval);
   10903 
   10904 	if (more) {
   10905 		/* Try to get more packets going. */
   10906 		wm_legacy_intr_disable(sc);
   10907 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10908 		wm_sched_handle_queue(sc, wmq);
   10909 	}
   10910 
   10911 	return 1;
   10912 }
   10913 
   10914 static inline void
   10915 wm_txrxintr_disable(struct wm_queue *wmq)
   10916 {
   10917 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10918 
   10919 	if (__predict_false(!wm_is_using_msix(sc))) {
   10920 		wm_legacy_intr_disable(sc);
   10921 		return;
   10922 	}
   10923 
   10924 	if (sc->sc_type == WM_T_82574)
   10925 		CSR_WRITE(sc, WMREG_IMC,
   10926 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10927 	else if (sc->sc_type == WM_T_82575)
   10928 		CSR_WRITE(sc, WMREG_EIMC,
   10929 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10930 	else
   10931 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10932 }
   10933 
   10934 static inline void
   10935 wm_txrxintr_enable(struct wm_queue *wmq)
   10936 {
   10937 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10938 
   10939 	wm_itrs_calculate(sc, wmq);
   10940 
   10941 	if (__predict_false(!wm_is_using_msix(sc))) {
   10942 		wm_legacy_intr_enable(sc);
   10943 		return;
   10944 	}
   10945 
   10946 	/*
   10947 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10948 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10949 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10950 	 * while each wm_handle_queue(wmq) is runnig.
   10951 	 */
   10952 	if (sc->sc_type == WM_T_82574)
   10953 		CSR_WRITE(sc, WMREG_IMS,
   10954 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10955 	else if (sc->sc_type == WM_T_82575)
   10956 		CSR_WRITE(sc, WMREG_EIMS,
   10957 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10958 	else
   10959 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10960 }
   10961 
   10962 static int
   10963 wm_txrxintr_msix(void *arg)
   10964 {
   10965 	struct wm_queue *wmq = arg;
   10966 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10967 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10968 	struct wm_softc *sc = txq->txq_sc;
   10969 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10970 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10971 	bool txmore;
   10972 	bool rxmore;
   10973 
   10974 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10975 
   10976 	DPRINTF(sc, WM_DEBUG_TX,
   10977 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10978 
   10979 	wm_txrxintr_disable(wmq);
   10980 
   10981 	mutex_enter(txq->txq_lock);
   10982 
   10983 	if (txq->txq_stopping) {
   10984 		mutex_exit(txq->txq_lock);
   10985 		return 1;
   10986 	}
   10987 
   10988 	WM_Q_EVCNT_INCR(txq, txdw);
   10989 	if (txlimit > 0) {
   10990 		txmore = wm_txeof(txq, txlimit);
   10991 		/* wm_deferred start() is done in wm_handle_queue(). */
   10992 	} else
   10993 		txmore = true;
   10994 	mutex_exit(txq->txq_lock);
   10995 
   10996 	DPRINTF(sc, WM_DEBUG_RX,
   10997 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10998 	mutex_enter(rxq->rxq_lock);
   10999 
   11000 	if (rxq->rxq_stopping) {
   11001 		mutex_exit(rxq->rxq_lock);
   11002 		return 1;
   11003 	}
   11004 
   11005 	WM_Q_EVCNT_INCR(rxq, intr);
   11006 	if (rxlimit > 0) {
   11007 		rxmore = wm_rxeof(rxq, rxlimit);
   11008 	} else
   11009 		rxmore = true;
   11010 	mutex_exit(rxq->rxq_lock);
   11011 
   11012 	wm_itrs_writereg(sc, wmq);
   11013 
   11014 	if (txmore || rxmore) {
   11015 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11016 		wm_sched_handle_queue(sc, wmq);
   11017 	} else
   11018 		wm_txrxintr_enable(wmq);
   11019 
   11020 	return 1;
   11021 }
   11022 
   11023 static void
   11024 wm_handle_queue(void *arg)
   11025 {
   11026 	struct wm_queue *wmq = arg;
   11027 	struct wm_txqueue *txq = &wmq->wmq_txq;
   11028 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   11029 	struct wm_softc *sc = txq->txq_sc;
   11030 	u_int txlimit = sc->sc_tx_process_limit;
   11031 	u_int rxlimit = sc->sc_rx_process_limit;
   11032 	bool txmore;
   11033 	bool rxmore;
   11034 
   11035 	mutex_enter(txq->txq_lock);
   11036 	if (txq->txq_stopping) {
   11037 		mutex_exit(txq->txq_lock);
   11038 		return;
   11039 	}
   11040 	txmore = wm_txeof(txq, txlimit);
   11041 	wm_deferred_start_locked(txq);
   11042 	mutex_exit(txq->txq_lock);
   11043 
   11044 	mutex_enter(rxq->rxq_lock);
   11045 	if (rxq->rxq_stopping) {
   11046 		mutex_exit(rxq->rxq_lock);
   11047 		return;
   11048 	}
   11049 	WM_Q_EVCNT_INCR(rxq, defer);
   11050 	rxmore = wm_rxeof(rxq, rxlimit);
   11051 	mutex_exit(rxq->rxq_lock);
   11052 
   11053 	if (txmore || rxmore) {
   11054 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11055 		wm_sched_handle_queue(sc, wmq);
   11056 	} else
   11057 		wm_txrxintr_enable(wmq);
   11058 }
   11059 
   11060 static void
   11061 wm_handle_queue_work(struct work *wk, void *context)
   11062 {
   11063 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   11064 
   11065 	/*
   11066 	 * Some qemu environment workaround.  They don't stop interrupt
   11067 	 * immediately.
   11068 	 */
   11069 	wmq->wmq_wq_enqueued = false;
   11070 	wm_handle_queue(wmq);
   11071 }
   11072 
   11073 /*
   11074  * wm_linkintr_msix:
   11075  *
   11076  *	Interrupt service routine for link status change for MSI-X.
   11077  */
   11078 static int
   11079 wm_linkintr_msix(void *arg)
   11080 {
   11081 	struct wm_softc *sc = arg;
   11082 	uint32_t reg;
   11083 	bool has_rxo;
   11084 
   11085 	reg = CSR_READ(sc, WMREG_ICR);
   11086 	mutex_enter(sc->sc_core_lock);
   11087 	DPRINTF(sc, WM_DEBUG_LINK,
   11088 	    ("%s: LINK: got link intr. ICR = %08x\n",
   11089 		device_xname(sc->sc_dev), reg));
   11090 
   11091 	if (sc->sc_core_stopping)
   11092 		goto out;
   11093 
   11094 	if ((reg & ICR_LSC) != 0) {
   11095 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   11096 		wm_linkintr(sc, ICR_LSC);
   11097 	}
   11098 	if ((reg & ICR_GPI(0)) != 0)
   11099 		device_printf(sc->sc_dev, "got module interrupt\n");
   11100 
   11101 	/*
   11102 	 * XXX 82574 MSI-X mode workaround
   11103 	 *
   11104 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   11105 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   11106 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   11107 	 * interrupts by writing WMREG_ICS to process receive packets.
   11108 	 */
   11109 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   11110 #if defined(WM_DEBUG)
   11111 		log(LOG_WARNING, "%s: Receive overrun\n",
   11112 		    device_xname(sc->sc_dev));
   11113 #endif /* defined(WM_DEBUG) */
   11114 
   11115 		has_rxo = true;
   11116 		/*
   11117 		 * The RXO interrupt is very high rate when receive traffic is
   11118 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   11119 		 * interrupts. ICR_OTHER will be enabled at the end of
   11120 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   11121 		 * ICR_RXQ(1) interrupts.
   11122 		 */
   11123 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   11124 
   11125 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   11126 	}
   11127 
   11128 
   11129 
   11130 out:
   11131 	mutex_exit(sc->sc_core_lock);
   11132 
   11133 	if (sc->sc_type == WM_T_82574) {
   11134 		if (!has_rxo)
   11135 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   11136 		else
   11137 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   11138 	} else if (sc->sc_type == WM_T_82575)
   11139 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   11140 	else
   11141 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   11142 
   11143 	return 1;
   11144 }
   11145 
   11146 /*
   11147  * Media related.
   11148  * GMII, SGMII, TBI (and SERDES)
   11149  */
   11150 
   11151 /* Common */
   11152 
   11153 /*
   11154  * wm_tbi_serdes_set_linkled:
   11155  *
   11156  *	Update the link LED on TBI and SERDES devices.
   11157  */
   11158 static void
   11159 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   11160 {
   11161 
   11162 	if (sc->sc_tbi_linkup)
   11163 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   11164 	else
   11165 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   11166 
   11167 	/* 82540 or newer devices are active low */
   11168 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   11169 
   11170 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11171 }
   11172 
   11173 /* GMII related */
   11174 
   11175 /*
   11176  * wm_gmii_reset:
   11177  *
   11178  *	Reset the PHY.
   11179  */
   11180 static void
   11181 wm_gmii_reset(struct wm_softc *sc)
   11182 {
   11183 	uint32_t reg;
   11184 	int rv;
   11185 
   11186 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11187 		device_xname(sc->sc_dev), __func__));
   11188 
   11189 	rv = sc->phy.acquire(sc);
   11190 	if (rv != 0) {
   11191 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11192 		    __func__);
   11193 		return;
   11194 	}
   11195 
   11196 	switch (sc->sc_type) {
   11197 	case WM_T_82542_2_0:
   11198 	case WM_T_82542_2_1:
   11199 		/* null */
   11200 		break;
   11201 	case WM_T_82543:
   11202 		/*
   11203 		 * With 82543, we need to force speed and duplex on the MAC
   11204 		 * equal to what the PHY speed and duplex configuration is.
   11205 		 * In addition, we need to perform a hardware reset on the PHY
   11206 		 * to take it out of reset.
   11207 		 */
   11208 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11209 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11210 
   11211 		/* The PHY reset pin is active-low. */
   11212 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11213 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   11214 		    CTRL_EXT_SWDPIN(4));
   11215 		reg |= CTRL_EXT_SWDPIO(4);
   11216 
   11217 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11218 		CSR_WRITE_FLUSH(sc);
   11219 		delay(10*1000);
   11220 
   11221 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   11222 		CSR_WRITE_FLUSH(sc);
   11223 		delay(150);
   11224 #if 0
   11225 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   11226 #endif
   11227 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   11228 		break;
   11229 	case WM_T_82544:	/* Reset 10000us */
   11230 	case WM_T_82540:
   11231 	case WM_T_82545:
   11232 	case WM_T_82545_3:
   11233 	case WM_T_82546:
   11234 	case WM_T_82546_3:
   11235 	case WM_T_82541:
   11236 	case WM_T_82541_2:
   11237 	case WM_T_82547:
   11238 	case WM_T_82547_2:
   11239 	case WM_T_82571:	/* Reset 100us */
   11240 	case WM_T_82572:
   11241 	case WM_T_82573:
   11242 	case WM_T_82574:
   11243 	case WM_T_82575:
   11244 	case WM_T_82576:
   11245 	case WM_T_82580:
   11246 	case WM_T_I350:
   11247 	case WM_T_I354:
   11248 	case WM_T_I210:
   11249 	case WM_T_I211:
   11250 	case WM_T_82583:
   11251 	case WM_T_80003:
   11252 		/* Generic reset */
   11253 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11254 		CSR_WRITE_FLUSH(sc);
   11255 		delay(20000);
   11256 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11257 		CSR_WRITE_FLUSH(sc);
   11258 		delay(20000);
   11259 
   11260 		if ((sc->sc_type == WM_T_82541)
   11261 		    || (sc->sc_type == WM_T_82541_2)
   11262 		    || (sc->sc_type == WM_T_82547)
   11263 		    || (sc->sc_type == WM_T_82547_2)) {
   11264 			/* Workaround for igp are done in igp_reset() */
   11265 			/* XXX add code to set LED after phy reset */
   11266 		}
   11267 		break;
   11268 	case WM_T_ICH8:
   11269 	case WM_T_ICH9:
   11270 	case WM_T_ICH10:
   11271 	case WM_T_PCH:
   11272 	case WM_T_PCH2:
   11273 	case WM_T_PCH_LPT:
   11274 	case WM_T_PCH_SPT:
   11275 	case WM_T_PCH_CNP:
   11276 		/* Generic reset */
   11277 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11278 		CSR_WRITE_FLUSH(sc);
   11279 		delay(100);
   11280 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11281 		CSR_WRITE_FLUSH(sc);
   11282 		delay(150);
   11283 		break;
   11284 	default:
   11285 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   11286 		    __func__);
   11287 		break;
   11288 	}
   11289 
   11290 	sc->phy.release(sc);
   11291 
   11292 	/* get_cfg_done */
   11293 	wm_get_cfg_done(sc);
   11294 
   11295 	/* Extra setup */
   11296 	switch (sc->sc_type) {
   11297 	case WM_T_82542_2_0:
   11298 	case WM_T_82542_2_1:
   11299 	case WM_T_82543:
   11300 	case WM_T_82544:
   11301 	case WM_T_82540:
   11302 	case WM_T_82545:
   11303 	case WM_T_82545_3:
   11304 	case WM_T_82546:
   11305 	case WM_T_82546_3:
   11306 	case WM_T_82541_2:
   11307 	case WM_T_82547_2:
   11308 	case WM_T_82571:
   11309 	case WM_T_82572:
   11310 	case WM_T_82573:
   11311 	case WM_T_82574:
   11312 	case WM_T_82583:
   11313 	case WM_T_82575:
   11314 	case WM_T_82576:
   11315 	case WM_T_82580:
   11316 	case WM_T_I350:
   11317 	case WM_T_I354:
   11318 	case WM_T_I210:
   11319 	case WM_T_I211:
   11320 	case WM_T_80003:
   11321 		/* Null */
   11322 		break;
   11323 	case WM_T_82541:
   11324 	case WM_T_82547:
   11325 		/* XXX Configure actively LED after PHY reset */
   11326 		break;
   11327 	case WM_T_ICH8:
   11328 	case WM_T_ICH9:
   11329 	case WM_T_ICH10:
   11330 	case WM_T_PCH:
   11331 	case WM_T_PCH2:
   11332 	case WM_T_PCH_LPT:
   11333 	case WM_T_PCH_SPT:
   11334 	case WM_T_PCH_CNP:
   11335 		wm_phy_post_reset(sc);
   11336 		break;
   11337 	default:
   11338 		panic("%s: unknown type\n", __func__);
   11339 		break;
   11340 	}
   11341 }
   11342 
   11343 /*
   11344  * Set up sc_phytype and mii_{read|write}reg.
   11345  *
   11346  *  To identify PHY type, correct read/write function should be selected.
   11347  * To select correct read/write function, PCI ID or MAC type are required
   11348  * without accessing PHY registers.
   11349  *
   11350  *  On the first call of this function, PHY ID is not known yet. Check
   11351  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   11352  * result might be incorrect.
   11353  *
   11354  *  In the second call, PHY OUI and model is used to identify PHY type.
   11355  * It might not be perfect because of the lack of compared entry, but it
   11356  * would be better than the first call.
   11357  *
   11358  *  If the detected new result and previous assumption is different,
   11359  * a diagnostic message will be printed.
   11360  */
   11361 static void
   11362 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   11363     uint16_t phy_model)
   11364 {
   11365 	device_t dev = sc->sc_dev;
   11366 	struct mii_data *mii = &sc->sc_mii;
   11367 	uint16_t new_phytype = WMPHY_UNKNOWN;
   11368 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   11369 	mii_readreg_t new_readreg;
   11370 	mii_writereg_t new_writereg;
   11371 	bool dodiag = true;
   11372 
   11373 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11374 		device_xname(sc->sc_dev), __func__));
   11375 
   11376 	/*
   11377 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   11378 	 * incorrect. So don't print diag output when it's 2nd call.
   11379 	 */
   11380 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   11381 		dodiag = false;
   11382 
   11383 	if (mii->mii_readreg == NULL) {
   11384 		/*
   11385 		 *  This is the first call of this function. For ICH and PCH
   11386 		 * variants, it's difficult to determine the PHY access method
   11387 		 * by sc_type, so use the PCI product ID for some devices.
   11388 		 */
   11389 
   11390 		switch (sc->sc_pcidevid) {
   11391 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   11392 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   11393 			/* 82577 */
   11394 			new_phytype = WMPHY_82577;
   11395 			break;
   11396 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   11397 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   11398 			/* 82578 */
   11399 			new_phytype = WMPHY_82578;
   11400 			break;
   11401 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   11402 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   11403 			/* 82579 */
   11404 			new_phytype = WMPHY_82579;
   11405 			break;
   11406 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   11407 		case PCI_PRODUCT_INTEL_82801I_BM:
   11408 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   11409 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   11410 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   11411 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   11412 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   11413 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   11414 			/* ICH8, 9, 10 with 82567 */
   11415 			new_phytype = WMPHY_BM;
   11416 			break;
   11417 		default:
   11418 			break;
   11419 		}
   11420 	} else {
   11421 		/* It's not the first call. Use PHY OUI and model */
   11422 		switch (phy_oui) {
   11423 		case MII_OUI_ATTANSIC: /* atphy(4) */
   11424 			switch (phy_model) {
   11425 			case MII_MODEL_ATTANSIC_AR8021:
   11426 				new_phytype = WMPHY_82578;
   11427 				break;
   11428 			default:
   11429 				break;
   11430 			}
   11431 			break;
   11432 		case MII_OUI_xxMARVELL:
   11433 			switch (phy_model) {
   11434 			case MII_MODEL_xxMARVELL_I210:
   11435 				new_phytype = WMPHY_I210;
   11436 				break;
   11437 			case MII_MODEL_xxMARVELL_E1011:
   11438 			case MII_MODEL_xxMARVELL_E1000_3:
   11439 			case MII_MODEL_xxMARVELL_E1000_5:
   11440 			case MII_MODEL_xxMARVELL_E1112:
   11441 				new_phytype = WMPHY_M88;
   11442 				break;
   11443 			case MII_MODEL_xxMARVELL_E1149:
   11444 				new_phytype = WMPHY_BM;
   11445 				break;
   11446 			case MII_MODEL_xxMARVELL_E1111:
   11447 			case MII_MODEL_xxMARVELL_I347:
   11448 			case MII_MODEL_xxMARVELL_E1512:
   11449 			case MII_MODEL_xxMARVELL_E1340M:
   11450 			case MII_MODEL_xxMARVELL_E1543:
   11451 				new_phytype = WMPHY_M88;
   11452 				break;
   11453 			case MII_MODEL_xxMARVELL_I82563:
   11454 				new_phytype = WMPHY_GG82563;
   11455 				break;
   11456 			default:
   11457 				break;
   11458 			}
   11459 			break;
   11460 		case MII_OUI_INTEL:
   11461 			switch (phy_model) {
   11462 			case MII_MODEL_INTEL_I82577:
   11463 				new_phytype = WMPHY_82577;
   11464 				break;
   11465 			case MII_MODEL_INTEL_I82579:
   11466 				new_phytype = WMPHY_82579;
   11467 				break;
   11468 			case MII_MODEL_INTEL_I217:
   11469 				new_phytype = WMPHY_I217;
   11470 				break;
   11471 			case MII_MODEL_INTEL_I82580:
   11472 				new_phytype = WMPHY_82580;
   11473 				break;
   11474 			case MII_MODEL_INTEL_I350:
   11475 				new_phytype = WMPHY_I350;
   11476 				break;
   11477 			default:
   11478 				break;
   11479 			}
   11480 			break;
   11481 		case MII_OUI_yyINTEL:
   11482 			switch (phy_model) {
   11483 			case MII_MODEL_yyINTEL_I82562G:
   11484 			case MII_MODEL_yyINTEL_I82562EM:
   11485 			case MII_MODEL_yyINTEL_I82562ET:
   11486 				new_phytype = WMPHY_IFE;
   11487 				break;
   11488 			case MII_MODEL_yyINTEL_IGP01E1000:
   11489 				new_phytype = WMPHY_IGP;
   11490 				break;
   11491 			case MII_MODEL_yyINTEL_I82566:
   11492 				new_phytype = WMPHY_IGP_3;
   11493 				break;
   11494 			default:
   11495 				break;
   11496 			}
   11497 			break;
   11498 		default:
   11499 			break;
   11500 		}
   11501 
   11502 		if (dodiag) {
   11503 			if (new_phytype == WMPHY_UNKNOWN)
   11504 				aprint_verbose_dev(dev,
   11505 				    "%s: Unknown PHY model. OUI=%06x, "
   11506 				    "model=%04x\n", __func__, phy_oui,
   11507 				    phy_model);
   11508 
   11509 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11510 			    && (sc->sc_phytype != new_phytype)) {
   11511 				aprint_error_dev(dev, "Previously assumed PHY "
   11512 				    "type(%u) was incorrect. PHY type from PHY"
   11513 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11514 			}
   11515 		}
   11516 	}
   11517 
   11518 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11519 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11520 		/* SGMII */
   11521 		new_readreg = wm_sgmii_readreg;
   11522 		new_writereg = wm_sgmii_writereg;
   11523 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11524 		/* BM2 (phyaddr == 1) */
   11525 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11526 		    && (new_phytype != WMPHY_BM)
   11527 		    && (new_phytype != WMPHY_UNKNOWN))
   11528 			doubt_phytype = new_phytype;
   11529 		new_phytype = WMPHY_BM;
   11530 		new_readreg = wm_gmii_bm_readreg;
   11531 		new_writereg = wm_gmii_bm_writereg;
   11532 	} else if (sc->sc_type >= WM_T_PCH) {
   11533 		/* All PCH* use _hv_ */
   11534 		new_readreg = wm_gmii_hv_readreg;
   11535 		new_writereg = wm_gmii_hv_writereg;
   11536 	} else if (sc->sc_type >= WM_T_ICH8) {
   11537 		/* non-82567 ICH8, 9 and 10 */
   11538 		new_readreg = wm_gmii_i82544_readreg;
   11539 		new_writereg = wm_gmii_i82544_writereg;
   11540 	} else if (sc->sc_type >= WM_T_80003) {
   11541 		/* 80003 */
   11542 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11543 		    && (new_phytype != WMPHY_GG82563)
   11544 		    && (new_phytype != WMPHY_UNKNOWN))
   11545 			doubt_phytype = new_phytype;
   11546 		new_phytype = WMPHY_GG82563;
   11547 		new_readreg = wm_gmii_i80003_readreg;
   11548 		new_writereg = wm_gmii_i80003_writereg;
   11549 	} else if (sc->sc_type >= WM_T_I210) {
   11550 		/* I210 and I211 */
   11551 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11552 		    && (new_phytype != WMPHY_I210)
   11553 		    && (new_phytype != WMPHY_UNKNOWN))
   11554 			doubt_phytype = new_phytype;
   11555 		new_phytype = WMPHY_I210;
   11556 		new_readreg = wm_gmii_gs40g_readreg;
   11557 		new_writereg = wm_gmii_gs40g_writereg;
   11558 	} else if (sc->sc_type >= WM_T_82580) {
   11559 		/* 82580, I350 and I354 */
   11560 		new_readreg = wm_gmii_82580_readreg;
   11561 		new_writereg = wm_gmii_82580_writereg;
   11562 	} else if (sc->sc_type >= WM_T_82544) {
   11563 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11564 		new_readreg = wm_gmii_i82544_readreg;
   11565 		new_writereg = wm_gmii_i82544_writereg;
   11566 	} else {
   11567 		new_readreg = wm_gmii_i82543_readreg;
   11568 		new_writereg = wm_gmii_i82543_writereg;
   11569 	}
   11570 
   11571 	if (new_phytype == WMPHY_BM) {
   11572 		/* All BM use _bm_ */
   11573 		new_readreg = wm_gmii_bm_readreg;
   11574 		new_writereg = wm_gmii_bm_writereg;
   11575 	}
   11576 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11577 		/* All PCH* use _hv_ */
   11578 		new_readreg = wm_gmii_hv_readreg;
   11579 		new_writereg = wm_gmii_hv_writereg;
   11580 	}
   11581 
   11582 	/* Diag output */
   11583 	if (dodiag) {
   11584 		if (doubt_phytype != WMPHY_UNKNOWN)
   11585 			aprint_error_dev(dev, "Assumed new PHY type was "
   11586 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11587 			    new_phytype);
   11588 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11589 		    && (sc->sc_phytype != new_phytype))
   11590 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11591 			    "was incorrect. New PHY type = %u\n",
   11592 			    sc->sc_phytype, new_phytype);
   11593 
   11594 		if ((mii->mii_readreg != NULL) &&
   11595 		    (new_phytype == WMPHY_UNKNOWN))
   11596 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11597 
   11598 		if ((mii->mii_readreg != NULL) &&
   11599 		    (mii->mii_readreg != new_readreg))
   11600 			aprint_error_dev(dev, "Previously assumed PHY "
   11601 			    "read/write function was incorrect.\n");
   11602 	}
   11603 
   11604 	/* Update now */
   11605 	sc->sc_phytype = new_phytype;
   11606 	mii->mii_readreg = new_readreg;
   11607 	mii->mii_writereg = new_writereg;
   11608 	if (new_readreg == wm_gmii_hv_readreg) {
   11609 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11610 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11611 	} else if (new_readreg == wm_sgmii_readreg) {
   11612 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11613 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11614 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11615 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11616 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11617 	}
   11618 }
   11619 
   11620 /*
   11621  * wm_get_phy_id_82575:
   11622  *
   11623  * Return PHY ID. Return -1 if it failed.
   11624  */
   11625 static int
   11626 wm_get_phy_id_82575(struct wm_softc *sc)
   11627 {
   11628 	uint32_t reg;
   11629 	int phyid = -1;
   11630 
   11631 	/* XXX */
   11632 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11633 		return -1;
   11634 
   11635 	if (wm_sgmii_uses_mdio(sc)) {
   11636 		switch (sc->sc_type) {
   11637 		case WM_T_82575:
   11638 		case WM_T_82576:
   11639 			reg = CSR_READ(sc, WMREG_MDIC);
   11640 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11641 			break;
   11642 		case WM_T_82580:
   11643 		case WM_T_I350:
   11644 		case WM_T_I354:
   11645 		case WM_T_I210:
   11646 		case WM_T_I211:
   11647 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11648 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11649 			break;
   11650 		default:
   11651 			return -1;
   11652 		}
   11653 	}
   11654 
   11655 	return phyid;
   11656 }
   11657 
   11658 /*
   11659  * wm_gmii_mediainit:
   11660  *
   11661  *	Initialize media for use on 1000BASE-T devices.
   11662  */
   11663 static void
   11664 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11665 {
   11666 	device_t dev = sc->sc_dev;
   11667 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11668 	struct mii_data *mii = &sc->sc_mii;
   11669 
   11670 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11671 		device_xname(sc->sc_dev), __func__));
   11672 
   11673 	/* We have GMII. */
   11674 	sc->sc_flags |= WM_F_HAS_MII;
   11675 
   11676 	if (sc->sc_type == WM_T_80003)
   11677 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11678 	else
   11679 		sc->sc_tipg = TIPG_1000T_DFLT;
   11680 
   11681 	/*
   11682 	 * Let the chip set speed/duplex on its own based on
   11683 	 * signals from the PHY.
   11684 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11685 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11686 	 */
   11687 	sc->sc_ctrl |= CTRL_SLU;
   11688 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11689 
   11690 	/* Initialize our media structures and probe the GMII. */
   11691 	mii->mii_ifp = ifp;
   11692 
   11693 	mii->mii_statchg = wm_gmii_statchg;
   11694 
   11695 	/* get PHY control from SMBus to PCIe */
   11696 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11697 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11698 	    || (sc->sc_type == WM_T_PCH_CNP))
   11699 		wm_init_phy_workarounds_pchlan(sc);
   11700 
   11701 	wm_gmii_reset(sc);
   11702 
   11703 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11704 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11705 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11706 
   11707 	/* Setup internal SGMII PHY for SFP */
   11708 	wm_sgmii_sfp_preconfig(sc);
   11709 
   11710 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11711 	    || (sc->sc_type == WM_T_82580)
   11712 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11713 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11714 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11715 			/* Attach only one port */
   11716 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11717 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11718 		} else {
   11719 			int i, id;
   11720 			uint32_t ctrl_ext;
   11721 
   11722 			id = wm_get_phy_id_82575(sc);
   11723 			if (id != -1) {
   11724 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11725 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11726 			}
   11727 			if ((id == -1)
   11728 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11729 				/* Power on sgmii phy if it is disabled */
   11730 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11731 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11732 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11733 				CSR_WRITE_FLUSH(sc);
   11734 				delay(300*1000); /* XXX too long */
   11735 
   11736 				/*
   11737 				 * From 1 to 8.
   11738 				 *
   11739 				 * I2C access fails with I2C register's ERROR
   11740 				 * bit set, so prevent error message while
   11741 				 * scanning.
   11742 				 */
   11743 				sc->phy.no_errprint = true;
   11744 				for (i = 1; i < 8; i++)
   11745 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11746 					    0xffffffff, i, MII_OFFSET_ANY,
   11747 					    MIIF_DOPAUSE);
   11748 				sc->phy.no_errprint = false;
   11749 
   11750 				/* Restore previous sfp cage power state */
   11751 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11752 			}
   11753 		}
   11754 	} else
   11755 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11756 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11757 
   11758 	/*
   11759 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11760 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11761 	 */
   11762 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11763 		|| (sc->sc_type == WM_T_PCH_SPT)
   11764 		|| (sc->sc_type == WM_T_PCH_CNP))
   11765 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11766 		wm_set_mdio_slow_mode_hv(sc);
   11767 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11768 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11769 	}
   11770 
   11771 	/*
   11772 	 * (For ICH8 variants)
   11773 	 * If PHY detection failed, use BM's r/w function and retry.
   11774 	 */
   11775 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11776 		/* if failed, retry with *_bm_* */
   11777 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11778 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11779 		    sc->sc_phytype);
   11780 		sc->sc_phytype = WMPHY_BM;
   11781 		mii->mii_readreg = wm_gmii_bm_readreg;
   11782 		mii->mii_writereg = wm_gmii_bm_writereg;
   11783 
   11784 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11785 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11786 	}
   11787 
   11788 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11789 		/* Any PHY wasn't found */
   11790 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11791 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11792 		sc->sc_phytype = WMPHY_NONE;
   11793 	} else {
   11794 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11795 
   11796 		/*
   11797 		 * PHY found! Check PHY type again by the second call of
   11798 		 * wm_gmii_setup_phytype.
   11799 		 */
   11800 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11801 		    child->mii_mpd_model);
   11802 
   11803 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11804 	}
   11805 }
   11806 
   11807 /*
   11808  * wm_gmii_mediachange:	[ifmedia interface function]
   11809  *
   11810  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11811  */
   11812 static int
   11813 wm_gmii_mediachange(struct ifnet *ifp)
   11814 {
   11815 	struct wm_softc *sc = ifp->if_softc;
   11816 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11817 	uint32_t reg;
   11818 	int rc;
   11819 
   11820 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11821 		device_xname(sc->sc_dev), __func__));
   11822 
   11823 	KASSERT(mutex_owned(sc->sc_core_lock));
   11824 
   11825 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11826 		return 0;
   11827 
   11828 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11829 	if ((sc->sc_type == WM_T_82580)
   11830 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11831 	    || (sc->sc_type == WM_T_I211)) {
   11832 		reg = CSR_READ(sc, WMREG_PHPM);
   11833 		reg &= ~PHPM_GO_LINK_D;
   11834 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11835 	}
   11836 
   11837 	/* Disable D0 LPLU. */
   11838 	wm_lplu_d0_disable(sc);
   11839 
   11840 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11841 	sc->sc_ctrl |= CTRL_SLU;
   11842 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11843 	    || (sc->sc_type > WM_T_82543)) {
   11844 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11845 	} else {
   11846 		sc->sc_ctrl &= ~CTRL_ASDE;
   11847 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11848 		if (ife->ifm_media & IFM_FDX)
   11849 			sc->sc_ctrl |= CTRL_FD;
   11850 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11851 		case IFM_10_T:
   11852 			sc->sc_ctrl |= CTRL_SPEED_10;
   11853 			break;
   11854 		case IFM_100_TX:
   11855 			sc->sc_ctrl |= CTRL_SPEED_100;
   11856 			break;
   11857 		case IFM_1000_T:
   11858 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11859 			break;
   11860 		case IFM_NONE:
   11861 			/* There is no specific setting for IFM_NONE */
   11862 			break;
   11863 		default:
   11864 			panic("wm_gmii_mediachange: bad media 0x%x",
   11865 			    ife->ifm_media);
   11866 		}
   11867 	}
   11868 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11869 	CSR_WRITE_FLUSH(sc);
   11870 
   11871 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11872 		wm_serdes_mediachange(ifp);
   11873 
   11874 	if (sc->sc_type <= WM_T_82543)
   11875 		wm_gmii_reset(sc);
   11876 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11877 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11878 		/* allow time for SFP cage time to power up phy */
   11879 		delay(300 * 1000);
   11880 		wm_gmii_reset(sc);
   11881 	}
   11882 
   11883 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11884 		return 0;
   11885 	return rc;
   11886 }
   11887 
   11888 /*
   11889  * wm_gmii_mediastatus:	[ifmedia interface function]
   11890  *
   11891  *	Get the current interface media status on a 1000BASE-T device.
   11892  */
   11893 static void
   11894 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11895 {
   11896 	struct wm_softc *sc = ifp->if_softc;
   11897 	struct ethercom *ec = &sc->sc_ethercom;
   11898 	struct mii_data *mii;
   11899 	bool dopoll = true;
   11900 
   11901 	/*
   11902 	 * In normal drivers, ether_mediastatus() is called here.
   11903 	 * To avoid calling mii_pollstat(), ether_mediastatus() is open coded.
   11904 	 */
   11905 	KASSERT(mutex_owned(sc->sc_core_lock));
   11906 	KASSERT(ec->ec_mii != NULL);
   11907 	KASSERT(mii_locked(ec->ec_mii));
   11908 
   11909 	mii = ec->ec_mii;
   11910 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   11911 		struct timeval now;
   11912 
   11913 		getmicrotime(&now);
   11914 		if (timercmp(&now, &sc->sc_linkup_delay_time, <))
   11915 			dopoll = false;
   11916 		else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   11917 			/* Simplify by checking tv_sec only. It's enough. */
   11918 
   11919 			sc->sc_linkup_delay_time.tv_sec = 0;
   11920 			sc->sc_linkup_delay_time.tv_usec = 0;
   11921 		}
   11922 	}
   11923 
   11924 	/*
   11925 	 * Don't call mii_pollstat() while doing workaround.
   11926 	 * See also wm_linkintr_gmii() and wm_tick().
   11927 	 */
   11928 	if (dopoll)
   11929 		mii_pollstat(mii);
   11930 	ifmr->ifm_active = mii->mii_media_active;
   11931 	ifmr->ifm_status = mii->mii_media_status;
   11932 
   11933 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11934 	    | sc->sc_flowflags;
   11935 }
   11936 
   11937 #define	MDI_IO		CTRL_SWDPIN(2)
   11938 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11939 #define	MDI_CLK		CTRL_SWDPIN(3)
   11940 
   11941 static void
   11942 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11943 {
   11944 	uint32_t i, v;
   11945 
   11946 	v = CSR_READ(sc, WMREG_CTRL);
   11947 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11948 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11949 
   11950 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11951 		if (data & i)
   11952 			v |= MDI_IO;
   11953 		else
   11954 			v &= ~MDI_IO;
   11955 		CSR_WRITE(sc, WMREG_CTRL, v);
   11956 		CSR_WRITE_FLUSH(sc);
   11957 		delay(10);
   11958 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11959 		CSR_WRITE_FLUSH(sc);
   11960 		delay(10);
   11961 		CSR_WRITE(sc, WMREG_CTRL, v);
   11962 		CSR_WRITE_FLUSH(sc);
   11963 		delay(10);
   11964 	}
   11965 }
   11966 
   11967 static uint16_t
   11968 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11969 {
   11970 	uint32_t v, i;
   11971 	uint16_t data = 0;
   11972 
   11973 	v = CSR_READ(sc, WMREG_CTRL);
   11974 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11975 	v |= CTRL_SWDPIO(3);
   11976 
   11977 	CSR_WRITE(sc, WMREG_CTRL, v);
   11978 	CSR_WRITE_FLUSH(sc);
   11979 	delay(10);
   11980 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11981 	CSR_WRITE_FLUSH(sc);
   11982 	delay(10);
   11983 	CSR_WRITE(sc, WMREG_CTRL, v);
   11984 	CSR_WRITE_FLUSH(sc);
   11985 	delay(10);
   11986 
   11987 	for (i = 0; i < 16; i++) {
   11988 		data <<= 1;
   11989 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11990 		CSR_WRITE_FLUSH(sc);
   11991 		delay(10);
   11992 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11993 			data |= 1;
   11994 		CSR_WRITE(sc, WMREG_CTRL, v);
   11995 		CSR_WRITE_FLUSH(sc);
   11996 		delay(10);
   11997 	}
   11998 
   11999 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12000 	CSR_WRITE_FLUSH(sc);
   12001 	delay(10);
   12002 	CSR_WRITE(sc, WMREG_CTRL, v);
   12003 	CSR_WRITE_FLUSH(sc);
   12004 	delay(10);
   12005 
   12006 	return data;
   12007 }
   12008 
   12009 #undef MDI_IO
   12010 #undef MDI_DIR
   12011 #undef MDI_CLK
   12012 
   12013 /*
   12014  * wm_gmii_i82543_readreg:	[mii interface function]
   12015  *
   12016  *	Read a PHY register on the GMII (i82543 version).
   12017  */
   12018 static int
   12019 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12020 {
   12021 	struct wm_softc *sc = device_private(dev);
   12022 
   12023 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   12024 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   12025 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   12026 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   12027 
   12028 	DPRINTF(sc, WM_DEBUG_GMII,
   12029 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   12030 		device_xname(dev), phy, reg, *val));
   12031 
   12032 	return 0;
   12033 }
   12034 
   12035 /*
   12036  * wm_gmii_i82543_writereg:	[mii interface function]
   12037  *
   12038  *	Write a PHY register on the GMII (i82543 version).
   12039  */
   12040 static int
   12041 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   12042 {
   12043 	struct wm_softc *sc = device_private(dev);
   12044 
   12045 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   12046 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   12047 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   12048 	    (MII_COMMAND_START << 30), 32);
   12049 
   12050 	return 0;
   12051 }
   12052 
   12053 /*
   12054  * wm_gmii_mdic_readreg:	[mii interface function]
   12055  *
   12056  *	Read a PHY register on the GMII.
   12057  */
   12058 static int
   12059 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12060 {
   12061 	struct wm_softc *sc = device_private(dev);
   12062 	uint32_t mdic = 0;
   12063 	int i;
   12064 
   12065 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12066 	    && (reg > MII_ADDRMASK)) {
   12067 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12068 		    __func__, sc->sc_phytype, reg);
   12069 		reg &= MII_ADDRMASK;
   12070 	}
   12071 
   12072 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   12073 	    MDIC_REGADD(reg));
   12074 
   12075 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12076 		delay(50);
   12077 		mdic = CSR_READ(sc, WMREG_MDIC);
   12078 		if (mdic & MDIC_READY)
   12079 			break;
   12080 	}
   12081 
   12082 	if ((mdic & MDIC_READY) == 0) {
   12083 		DPRINTF(sc, WM_DEBUG_GMII,
   12084 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   12085 			device_xname(dev), phy, reg));
   12086 		return ETIMEDOUT;
   12087 	} else if (mdic & MDIC_E) {
   12088 		/* This is normal if no PHY is present. */
   12089 		DPRINTF(sc, WM_DEBUG_GMII,
   12090 		    ("%s: MDIC read error: phy %d reg %d\n",
   12091 			device_xname(sc->sc_dev), phy, reg));
   12092 		return -1;
   12093 	} else
   12094 		*val = MDIC_DATA(mdic);
   12095 
   12096 	/*
   12097 	 * Allow some time after each MDIC transaction to avoid
   12098 	 * reading duplicate data in the next MDIC transaction.
   12099 	 */
   12100 	if (sc->sc_type == WM_T_PCH2)
   12101 		delay(100);
   12102 
   12103 	return 0;
   12104 }
   12105 
   12106 /*
   12107  * wm_gmii_mdic_writereg:	[mii interface function]
   12108  *
   12109  *	Write a PHY register on the GMII.
   12110  */
   12111 static int
   12112 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   12113 {
   12114 	struct wm_softc *sc = device_private(dev);
   12115 	uint32_t mdic = 0;
   12116 	int i;
   12117 
   12118 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12119 	    && (reg > MII_ADDRMASK)) {
   12120 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12121 		    __func__, sc->sc_phytype, reg);
   12122 		reg &= MII_ADDRMASK;
   12123 	}
   12124 
   12125 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   12126 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   12127 
   12128 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12129 		delay(50);
   12130 		mdic = CSR_READ(sc, WMREG_MDIC);
   12131 		if (mdic & MDIC_READY)
   12132 			break;
   12133 	}
   12134 
   12135 	if ((mdic & MDIC_READY) == 0) {
   12136 		DPRINTF(sc, WM_DEBUG_GMII,
   12137 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   12138 			device_xname(dev), phy, reg));
   12139 		return ETIMEDOUT;
   12140 	} else if (mdic & MDIC_E) {
   12141 		DPRINTF(sc, WM_DEBUG_GMII,
   12142 		    ("%s: MDIC write error: phy %d reg %d\n",
   12143 			device_xname(dev), phy, reg));
   12144 		return -1;
   12145 	}
   12146 
   12147 	/*
   12148 	 * Allow some time after each MDIC transaction to avoid
   12149 	 * reading duplicate data in the next MDIC transaction.
   12150 	 */
   12151 	if (sc->sc_type == WM_T_PCH2)
   12152 		delay(100);
   12153 
   12154 	return 0;
   12155 }
   12156 
   12157 /*
   12158  * wm_gmii_i82544_readreg:	[mii interface function]
   12159  *
   12160  *	Read a PHY register on the GMII.
   12161  */
   12162 static int
   12163 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12164 {
   12165 	struct wm_softc *sc = device_private(dev);
   12166 	int rv;
   12167 
   12168 	rv = sc->phy.acquire(sc);
   12169 	if (rv != 0) {
   12170 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12171 		return rv;
   12172 	}
   12173 
   12174 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   12175 
   12176 	sc->phy.release(sc);
   12177 
   12178 	return rv;
   12179 }
   12180 
   12181 static int
   12182 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12183 {
   12184 	struct wm_softc *sc = device_private(dev);
   12185 	int rv;
   12186 
   12187 	switch (sc->sc_phytype) {
   12188 	case WMPHY_IGP:
   12189 	case WMPHY_IGP_2:
   12190 	case WMPHY_IGP_3:
   12191 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12192 			rv = wm_gmii_mdic_writereg(dev, phy,
   12193 			    IGPHY_PAGE_SELECT, reg);
   12194 			if (rv != 0)
   12195 				return rv;
   12196 		}
   12197 		break;
   12198 	default:
   12199 #ifdef WM_DEBUG
   12200 		if ((reg >> MII_ADDRBITS) != 0)
   12201 			device_printf(dev,
   12202 			    "%s: PHYTYPE = 0x%x, addr = 0x%02x\n",
   12203 			    __func__, sc->sc_phytype, reg);
   12204 #endif
   12205 		break;
   12206 	}
   12207 
   12208 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12209 }
   12210 
   12211 /*
   12212  * wm_gmii_i82544_writereg:	[mii interface function]
   12213  *
   12214  *	Write a PHY register on the GMII.
   12215  */
   12216 static int
   12217 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   12218 {
   12219 	struct wm_softc *sc = device_private(dev);
   12220 	int rv;
   12221 
   12222 	rv = sc->phy.acquire(sc);
   12223 	if (rv != 0) {
   12224 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12225 		return rv;
   12226 	}
   12227 
   12228 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   12229 	sc->phy.release(sc);
   12230 
   12231 	return rv;
   12232 }
   12233 
   12234 static int
   12235 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12236 {
   12237 	struct wm_softc *sc = device_private(dev);
   12238 	int rv;
   12239 
   12240 	switch (sc->sc_phytype) {
   12241 	case WMPHY_IGP:
   12242 	case WMPHY_IGP_2:
   12243 	case WMPHY_IGP_3:
   12244 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12245 			rv = wm_gmii_mdic_writereg(dev, phy,
   12246 			    IGPHY_PAGE_SELECT, reg);
   12247 			if (rv != 0)
   12248 				return rv;
   12249 		}
   12250 		break;
   12251 	default:
   12252 #ifdef WM_DEBUG
   12253 		if ((reg >> MII_ADDRBITS) != 0)
   12254 			device_printf(dev,
   12255 			    "%s: PHYTYPE == 0x%x, addr = 0x%02x",
   12256 			    __func__, sc->sc_phytype, reg);
   12257 #endif
   12258 		break;
   12259 	}
   12260 
   12261 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12262 }
   12263 
   12264 /*
   12265  * wm_gmii_i80003_readreg:	[mii interface function]
   12266  *
   12267  *	Read a PHY register on the kumeran
   12268  * This could be handled by the PHY layer if we didn't have to lock the
   12269  * resource ...
   12270  */
   12271 static int
   12272 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12273 {
   12274 	struct wm_softc *sc = device_private(dev);
   12275 	int page_select;
   12276 	uint16_t temp, temp2;
   12277 	int rv;
   12278 
   12279 	if (phy != 1) /* Only one PHY on kumeran bus */
   12280 		return -1;
   12281 
   12282 	rv = sc->phy.acquire(sc);
   12283 	if (rv != 0) {
   12284 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12285 		return rv;
   12286 	}
   12287 
   12288 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12289 		page_select = GG82563_PHY_PAGE_SELECT;
   12290 	else {
   12291 		/*
   12292 		 * Use Alternative Page Select register to access registers
   12293 		 * 30 and 31.
   12294 		 */
   12295 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12296 	}
   12297 	temp = reg >> GG82563_PAGE_SHIFT;
   12298 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12299 		goto out;
   12300 
   12301 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12302 		/*
   12303 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12304 		 * register.
   12305 		 */
   12306 		delay(200);
   12307 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12308 		if ((rv != 0) || (temp2 != temp)) {
   12309 			device_printf(dev, "%s failed\n", __func__);
   12310 			rv = -1;
   12311 			goto out;
   12312 		}
   12313 		delay(200);
   12314 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12315 		delay(200);
   12316 	} else
   12317 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12318 
   12319 out:
   12320 	sc->phy.release(sc);
   12321 	return rv;
   12322 }
   12323 
   12324 /*
   12325  * wm_gmii_i80003_writereg:	[mii interface function]
   12326  *
   12327  *	Write a PHY register on the kumeran.
   12328  * This could be handled by the PHY layer if we didn't have to lock the
   12329  * resource ...
   12330  */
   12331 static int
   12332 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   12333 {
   12334 	struct wm_softc *sc = device_private(dev);
   12335 	int page_select, rv;
   12336 	uint16_t temp, temp2;
   12337 
   12338 	if (phy != 1) /* Only one PHY on kumeran bus */
   12339 		return -1;
   12340 
   12341 	rv = sc->phy.acquire(sc);
   12342 	if (rv != 0) {
   12343 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12344 		return rv;
   12345 	}
   12346 
   12347 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12348 		page_select = GG82563_PHY_PAGE_SELECT;
   12349 	else {
   12350 		/*
   12351 		 * Use Alternative Page Select register to access registers
   12352 		 * 30 and 31.
   12353 		 */
   12354 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12355 	}
   12356 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   12357 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12358 		goto out;
   12359 
   12360 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12361 		/*
   12362 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12363 		 * register.
   12364 		 */
   12365 		delay(200);
   12366 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12367 		if ((rv != 0) || (temp2 != temp)) {
   12368 			device_printf(dev, "%s failed\n", __func__);
   12369 			rv = -1;
   12370 			goto out;
   12371 		}
   12372 		delay(200);
   12373 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12374 		delay(200);
   12375 	} else
   12376 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12377 
   12378 out:
   12379 	sc->phy.release(sc);
   12380 	return rv;
   12381 }
   12382 
   12383 /*
   12384  * wm_gmii_bm_readreg:	[mii interface function]
   12385  *
   12386  *	Read a PHY register on the kumeran
   12387  * This could be handled by the PHY layer if we didn't have to lock the
   12388  * resource ...
   12389  */
   12390 static int
   12391 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12392 {
   12393 	struct wm_softc *sc = device_private(dev);
   12394 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12395 	int rv;
   12396 
   12397 	rv = sc->phy.acquire(sc);
   12398 	if (rv != 0) {
   12399 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12400 		return rv;
   12401 	}
   12402 
   12403 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12404 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12405 		    || (reg == 31)) ? 1 : phy;
   12406 	/* Page 800 works differently than the rest so it has its own func */
   12407 	if (page == BM_WUC_PAGE) {
   12408 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12409 		goto release;
   12410 	}
   12411 
   12412 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12413 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12414 		    && (sc->sc_type != WM_T_82583))
   12415 			rv = wm_gmii_mdic_writereg(dev, phy,
   12416 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12417 		else
   12418 			rv = wm_gmii_mdic_writereg(dev, phy,
   12419 			    BME1000_PHY_PAGE_SELECT, page);
   12420 		if (rv != 0)
   12421 			goto release;
   12422 	}
   12423 
   12424 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12425 
   12426 release:
   12427 	sc->phy.release(sc);
   12428 	return rv;
   12429 }
   12430 
   12431 /*
   12432  * wm_gmii_bm_writereg:	[mii interface function]
   12433  *
   12434  *	Write a PHY register on the kumeran.
   12435  * This could be handled by the PHY layer if we didn't have to lock the
   12436  * resource ...
   12437  */
   12438 static int
   12439 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   12440 {
   12441 	struct wm_softc *sc = device_private(dev);
   12442 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12443 	int rv;
   12444 
   12445 	rv = sc->phy.acquire(sc);
   12446 	if (rv != 0) {
   12447 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12448 		return rv;
   12449 	}
   12450 
   12451 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12452 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12453 		    || (reg == 31)) ? 1 : phy;
   12454 	/* Page 800 works differently than the rest so it has its own func */
   12455 	if (page == BM_WUC_PAGE) {
   12456 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   12457 		goto release;
   12458 	}
   12459 
   12460 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12461 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12462 		    && (sc->sc_type != WM_T_82583))
   12463 			rv = wm_gmii_mdic_writereg(dev, phy,
   12464 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12465 		else
   12466 			rv = wm_gmii_mdic_writereg(dev, phy,
   12467 			    BME1000_PHY_PAGE_SELECT, page);
   12468 		if (rv != 0)
   12469 			goto release;
   12470 	}
   12471 
   12472 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12473 
   12474 release:
   12475 	sc->phy.release(sc);
   12476 	return rv;
   12477 }
   12478 
   12479 /*
   12480  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   12481  *  @dev: pointer to the HW structure
   12482  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   12483  *
   12484  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   12485  *  address to store contents of the BM_WUC_ENABLE_REG register.
   12486  */
   12487 static int
   12488 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12489 {
   12490 #ifdef WM_DEBUG
   12491 	struct wm_softc *sc = device_private(dev);
   12492 #endif
   12493 	uint16_t temp;
   12494 	int rv;
   12495 
   12496 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12497 		device_xname(dev), __func__));
   12498 
   12499 	if (!phy_regp)
   12500 		return -1;
   12501 
   12502 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12503 
   12504 	/* Select Port Control Registers page */
   12505 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12506 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12507 	if (rv != 0)
   12508 		return rv;
   12509 
   12510 	/* Read WUCE and save it */
   12511 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12512 	if (rv != 0)
   12513 		return rv;
   12514 
   12515 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12516 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12517 	 */
   12518 	temp = *phy_regp;
   12519 	temp |= BM_WUC_ENABLE_BIT;
   12520 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12521 
   12522 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12523 		return rv;
   12524 
   12525 	/* Select Host Wakeup Registers page - caller now able to write
   12526 	 * registers on the Wakeup registers page
   12527 	 */
   12528 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12529 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12530 }
   12531 
   12532 /*
   12533  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12534  *  @dev: pointer to the HW structure
   12535  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12536  *
   12537  *  Restore BM_WUC_ENABLE_REG to its original value.
   12538  *
   12539  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12540  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12541  *  caller.
   12542  */
   12543 static int
   12544 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12545 {
   12546 #ifdef WM_DEBUG
   12547 	struct wm_softc *sc = device_private(dev);
   12548 #endif
   12549 
   12550 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12551 		device_xname(dev), __func__));
   12552 
   12553 	if (!phy_regp)
   12554 		return -1;
   12555 
   12556 	/* Select Port Control Registers page */
   12557 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12558 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12559 
   12560 	/* Restore 769.17 to its original value */
   12561 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12562 
   12563 	return 0;
   12564 }
   12565 
   12566 /*
   12567  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12568  *  @sc: pointer to the HW structure
   12569  *  @offset: register offset to be read or written
   12570  *  @val: pointer to the data to read or write
   12571  *  @rd: determines if operation is read or write
   12572  *  @page_set: BM_WUC_PAGE already set and access enabled
   12573  *
   12574  *  Read the PHY register at offset and store the retrieved information in
   12575  *  data, or write data to PHY register at offset.  Note the procedure to
   12576  *  access the PHY wakeup registers is different than reading the other PHY
   12577  *  registers. It works as such:
   12578  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12579  *  2) Set page to 800 for host (801 if we were manageability)
   12580  *  3) Write the address using the address opcode (0x11)
   12581  *  4) Read or write the data using the data opcode (0x12)
   12582  *  5) Restore 769.17.2 to its original value
   12583  *
   12584  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12585  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12586  *
   12587  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12588  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12589  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12590  */
   12591 static int
   12592 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12593     bool page_set)
   12594 {
   12595 	struct wm_softc *sc = device_private(dev);
   12596 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12597 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12598 	uint16_t wuce;
   12599 	int rv = 0;
   12600 
   12601 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12602 		device_xname(dev), __func__));
   12603 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12604 	if ((sc->sc_type == WM_T_PCH)
   12605 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12606 		device_printf(dev,
   12607 		    "Attempting to access page %d while gig enabled.\n", page);
   12608 	}
   12609 
   12610 	if (!page_set) {
   12611 		/* Enable access to PHY wakeup registers */
   12612 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12613 		if (rv != 0) {
   12614 			device_printf(dev,
   12615 			    "%s: Could not enable PHY wakeup reg access\n",
   12616 			    __func__);
   12617 			return rv;
   12618 		}
   12619 	}
   12620 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12621 		device_xname(sc->sc_dev), __func__, page, regnum));
   12622 
   12623 	/*
   12624 	 * 2) Access PHY wakeup register.
   12625 	 * See wm_access_phy_wakeup_reg_bm.
   12626 	 */
   12627 
   12628 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12629 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12630 	if (rv != 0)
   12631 		return rv;
   12632 
   12633 	if (rd) {
   12634 		/* Read the Wakeup register page value using opcode 0x12 */
   12635 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12636 	} else {
   12637 		/* Write the Wakeup register page value using opcode 0x12 */
   12638 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12639 	}
   12640 	if (rv != 0)
   12641 		return rv;
   12642 
   12643 	if (!page_set)
   12644 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12645 
   12646 	return rv;
   12647 }
   12648 
   12649 /*
   12650  * wm_gmii_hv_readreg:	[mii interface function]
   12651  *
   12652  *	Read a PHY register on the kumeran
   12653  * This could be handled by the PHY layer if we didn't have to lock the
   12654  * resource ...
   12655  */
   12656 static int
   12657 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12658 {
   12659 	struct wm_softc *sc = device_private(dev);
   12660 	int rv;
   12661 
   12662 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12663 		device_xname(dev), __func__));
   12664 
   12665 	rv = sc->phy.acquire(sc);
   12666 	if (rv != 0) {
   12667 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12668 		return rv;
   12669 	}
   12670 
   12671 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12672 	sc->phy.release(sc);
   12673 	return rv;
   12674 }
   12675 
   12676 static int
   12677 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12678 {
   12679 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12680 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12681 	int rv;
   12682 
   12683 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12684 
   12685 	/* Page 800 works differently than the rest so it has its own func */
   12686 	if (page == BM_WUC_PAGE)
   12687 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12688 
   12689 	/*
   12690 	 * Lower than page 768 works differently than the rest so it has its
   12691 	 * own func
   12692 	 */
   12693 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12694 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12695 		return -1;
   12696 	}
   12697 
   12698 	/*
   12699 	 * XXX I21[789] documents say that the SMBus Address register is at
   12700 	 * PHY address 01, Page 0 (not 768), Register 26.
   12701 	 */
   12702 	if (page == HV_INTC_FC_PAGE_START)
   12703 		page = 0;
   12704 
   12705 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12706 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12707 		    page << BME1000_PAGE_SHIFT);
   12708 		if (rv != 0)
   12709 			return rv;
   12710 	}
   12711 
   12712 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12713 }
   12714 
   12715 /*
   12716  * wm_gmii_hv_writereg:	[mii interface function]
   12717  *
   12718  *	Write a PHY register on the kumeran.
   12719  * This could be handled by the PHY layer if we didn't have to lock the
   12720  * resource ...
   12721  */
   12722 static int
   12723 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12724 {
   12725 	struct wm_softc *sc = device_private(dev);
   12726 	int rv;
   12727 
   12728 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12729 		device_xname(dev), __func__));
   12730 
   12731 	rv = sc->phy.acquire(sc);
   12732 	if (rv != 0) {
   12733 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12734 		return rv;
   12735 	}
   12736 
   12737 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12738 	sc->phy.release(sc);
   12739 
   12740 	return rv;
   12741 }
   12742 
   12743 static int
   12744 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12745 {
   12746 	struct wm_softc *sc = device_private(dev);
   12747 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12748 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12749 	int rv;
   12750 
   12751 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12752 
   12753 	/* Page 800 works differently than the rest so it has its own func */
   12754 	if (page == BM_WUC_PAGE)
   12755 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12756 		    false);
   12757 
   12758 	/*
   12759 	 * Lower than page 768 works differently than the rest so it has its
   12760 	 * own func
   12761 	 */
   12762 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12763 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12764 		return -1;
   12765 	}
   12766 
   12767 	{
   12768 		/*
   12769 		 * XXX I21[789] documents say that the SMBus Address register
   12770 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12771 		 */
   12772 		if (page == HV_INTC_FC_PAGE_START)
   12773 			page = 0;
   12774 
   12775 		/*
   12776 		 * XXX Workaround MDIO accesses being disabled after entering
   12777 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12778 		 * register is set)
   12779 		 */
   12780 		if (sc->sc_phytype == WMPHY_82578) {
   12781 			struct mii_softc *child;
   12782 
   12783 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12784 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12785 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12786 			    && ((val & (1 << 11)) != 0)) {
   12787 				device_printf(dev, "XXX need workaround\n");
   12788 			}
   12789 		}
   12790 
   12791 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12792 			rv = wm_gmii_mdic_writereg(dev, 1,
   12793 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12794 			if (rv != 0)
   12795 				return rv;
   12796 		}
   12797 	}
   12798 
   12799 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12800 }
   12801 
   12802 /*
   12803  * wm_gmii_82580_readreg:	[mii interface function]
   12804  *
   12805  *	Read a PHY register on the 82580 and I350.
   12806  * This could be handled by the PHY layer if we didn't have to lock the
   12807  * resource ...
   12808  */
   12809 static int
   12810 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12811 {
   12812 	struct wm_softc *sc = device_private(dev);
   12813 	int rv;
   12814 
   12815 	rv = sc->phy.acquire(sc);
   12816 	if (rv != 0) {
   12817 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12818 		return rv;
   12819 	}
   12820 
   12821 #ifdef DIAGNOSTIC
   12822 	if (reg > MII_ADDRMASK) {
   12823 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12824 		    __func__, sc->sc_phytype, reg);
   12825 		reg &= MII_ADDRMASK;
   12826 	}
   12827 #endif
   12828 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12829 
   12830 	sc->phy.release(sc);
   12831 	return rv;
   12832 }
   12833 
   12834 /*
   12835  * wm_gmii_82580_writereg:	[mii interface function]
   12836  *
   12837  *	Write a PHY register on the 82580 and I350.
   12838  * This could be handled by the PHY layer if we didn't have to lock the
   12839  * resource ...
   12840  */
   12841 static int
   12842 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12843 {
   12844 	struct wm_softc *sc = device_private(dev);
   12845 	int rv;
   12846 
   12847 	rv = sc->phy.acquire(sc);
   12848 	if (rv != 0) {
   12849 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12850 		return rv;
   12851 	}
   12852 
   12853 #ifdef DIAGNOSTIC
   12854 	if (reg > MII_ADDRMASK) {
   12855 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12856 		    __func__, sc->sc_phytype, reg);
   12857 		reg &= MII_ADDRMASK;
   12858 	}
   12859 #endif
   12860 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12861 
   12862 	sc->phy.release(sc);
   12863 	return rv;
   12864 }
   12865 
   12866 /*
   12867  * wm_gmii_gs40g_readreg:	[mii interface function]
   12868  *
   12869  *	Read a PHY register on the I2100 and I211.
   12870  * This could be handled by the PHY layer if we didn't have to lock the
   12871  * resource ...
   12872  */
   12873 static int
   12874 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12875 {
   12876 	struct wm_softc *sc = device_private(dev);
   12877 	int page, offset;
   12878 	int rv;
   12879 
   12880 	/* Acquire semaphore */
   12881 	rv = sc->phy.acquire(sc);
   12882 	if (rv != 0) {
   12883 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12884 		return rv;
   12885 	}
   12886 
   12887 	/* Page select */
   12888 	page = reg >> GS40G_PAGE_SHIFT;
   12889 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12890 	if (rv != 0)
   12891 		goto release;
   12892 
   12893 	/* Read reg */
   12894 	offset = reg & GS40G_OFFSET_MASK;
   12895 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12896 
   12897 release:
   12898 	sc->phy.release(sc);
   12899 	return rv;
   12900 }
   12901 
   12902 /*
   12903  * wm_gmii_gs40g_writereg:	[mii interface function]
   12904  *
   12905  *	Write a PHY register on the I210 and I211.
   12906  * This could be handled by the PHY layer if we didn't have to lock the
   12907  * resource ...
   12908  */
   12909 static int
   12910 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12911 {
   12912 	struct wm_softc *sc = device_private(dev);
   12913 	uint16_t page;
   12914 	int offset, rv;
   12915 
   12916 	/* Acquire semaphore */
   12917 	rv = sc->phy.acquire(sc);
   12918 	if (rv != 0) {
   12919 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12920 		return rv;
   12921 	}
   12922 
   12923 	/* Page select */
   12924 	page = reg >> GS40G_PAGE_SHIFT;
   12925 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12926 	if (rv != 0)
   12927 		goto release;
   12928 
   12929 	/* Write reg */
   12930 	offset = reg & GS40G_OFFSET_MASK;
   12931 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12932 
   12933 release:
   12934 	/* Release semaphore */
   12935 	sc->phy.release(sc);
   12936 	return rv;
   12937 }
   12938 
   12939 /*
   12940  * wm_gmii_statchg:	[mii interface function]
   12941  *
   12942  *	Callback from MII layer when media changes.
   12943  */
   12944 static void
   12945 wm_gmii_statchg(struct ifnet *ifp)
   12946 {
   12947 	struct wm_softc *sc = ifp->if_softc;
   12948 	struct mii_data *mii = &sc->sc_mii;
   12949 
   12950 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12951 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12952 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12953 
   12954 	/* Get flow control negotiation result. */
   12955 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12956 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12957 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12958 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12959 	}
   12960 
   12961 	if (sc->sc_flowflags & IFM_FLOW) {
   12962 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12963 			sc->sc_ctrl |= CTRL_TFCE;
   12964 			sc->sc_fcrtl |= FCRTL_XONE;
   12965 		}
   12966 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12967 			sc->sc_ctrl |= CTRL_RFCE;
   12968 	}
   12969 
   12970 	if (mii->mii_media_active & IFM_FDX) {
   12971 		DPRINTF(sc, WM_DEBUG_LINK,
   12972 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12973 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12974 	} else {
   12975 		DPRINTF(sc, WM_DEBUG_LINK,
   12976 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12977 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12978 	}
   12979 
   12980 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12981 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12982 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12983 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12984 	if (sc->sc_type == WM_T_80003) {
   12985 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12986 		case IFM_1000_T:
   12987 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12988 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12989 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12990 			break;
   12991 		default:
   12992 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12993 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12994 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12995 			break;
   12996 		}
   12997 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12998 	}
   12999 }
   13000 
   13001 /* kumeran related (80003, ICH* and PCH*) */
   13002 
   13003 /*
   13004  * wm_kmrn_readreg:
   13005  *
   13006  *	Read a kumeran register
   13007  */
   13008 static int
   13009 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   13010 {
   13011 	int rv;
   13012 
   13013 	if (sc->sc_type == WM_T_80003)
   13014 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13015 	else
   13016 		rv = sc->phy.acquire(sc);
   13017 	if (rv != 0) {
   13018 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   13019 		    __func__);
   13020 		return rv;
   13021 	}
   13022 
   13023 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   13024 
   13025 	if (sc->sc_type == WM_T_80003)
   13026 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13027 	else
   13028 		sc->phy.release(sc);
   13029 
   13030 	return rv;
   13031 }
   13032 
   13033 static int
   13034 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   13035 {
   13036 
   13037 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   13038 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   13039 	    KUMCTRLSTA_REN);
   13040 	CSR_WRITE_FLUSH(sc);
   13041 	delay(2);
   13042 
   13043 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   13044 
   13045 	return 0;
   13046 }
   13047 
   13048 /*
   13049  * wm_kmrn_writereg:
   13050  *
   13051  *	Write a kumeran register
   13052  */
   13053 static int
   13054 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   13055 {
   13056 	int rv;
   13057 
   13058 	if (sc->sc_type == WM_T_80003)
   13059 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13060 	else
   13061 		rv = sc->phy.acquire(sc);
   13062 	if (rv != 0) {
   13063 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   13064 		    __func__);
   13065 		return rv;
   13066 	}
   13067 
   13068 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   13069 
   13070 	if (sc->sc_type == WM_T_80003)
   13071 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13072 	else
   13073 		sc->phy.release(sc);
   13074 
   13075 	return rv;
   13076 }
   13077 
   13078 static int
   13079 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   13080 {
   13081 
   13082 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   13083 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   13084 
   13085 	return 0;
   13086 }
   13087 
   13088 /*
   13089  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   13090  * This access method is different from IEEE MMD.
   13091  */
   13092 static int
   13093 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   13094 {
   13095 	struct wm_softc *sc = device_private(dev);
   13096 	int rv;
   13097 
   13098 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   13099 	if (rv != 0)
   13100 		return rv;
   13101 
   13102 	if (rd)
   13103 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   13104 	else
   13105 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   13106 	return rv;
   13107 }
   13108 
   13109 static int
   13110 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   13111 {
   13112 
   13113 	return wm_access_emi_reg_locked(dev, reg, val, true);
   13114 }
   13115 
   13116 static int
   13117 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   13118 {
   13119 
   13120 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   13121 }
   13122 
   13123 /* SGMII related */
   13124 
   13125 /*
   13126  * wm_sgmii_uses_mdio
   13127  *
   13128  * Check whether the transaction is to the internal PHY or the external
   13129  * MDIO interface. Return true if it's MDIO.
   13130  */
   13131 static bool
   13132 wm_sgmii_uses_mdio(struct wm_softc *sc)
   13133 {
   13134 	uint32_t reg;
   13135 	bool ismdio = false;
   13136 
   13137 	switch (sc->sc_type) {
   13138 	case WM_T_82575:
   13139 	case WM_T_82576:
   13140 		reg = CSR_READ(sc, WMREG_MDIC);
   13141 		ismdio = ((reg & MDIC_DEST) != 0);
   13142 		break;
   13143 	case WM_T_82580:
   13144 	case WM_T_I350:
   13145 	case WM_T_I354:
   13146 	case WM_T_I210:
   13147 	case WM_T_I211:
   13148 		reg = CSR_READ(sc, WMREG_MDICNFG);
   13149 		ismdio = ((reg & MDICNFG_DEST) != 0);
   13150 		break;
   13151 	default:
   13152 		break;
   13153 	}
   13154 
   13155 	return ismdio;
   13156 }
   13157 
   13158 /* Setup internal SGMII PHY for SFP */
   13159 static void
   13160 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   13161 {
   13162 	uint16_t id1, id2, phyreg;
   13163 	int i, rv;
   13164 
   13165 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   13166 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   13167 		return;
   13168 
   13169 	for (i = 0; i < MII_NPHY; i++) {
   13170 		sc->phy.no_errprint = true;
   13171 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   13172 		if (rv != 0)
   13173 			continue;
   13174 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   13175 		if (rv != 0)
   13176 			continue;
   13177 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   13178 			continue;
   13179 		sc->phy.no_errprint = false;
   13180 
   13181 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   13182 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   13183 		phyreg |= ESSR_SGMII_WOC_COPPER;
   13184 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   13185 		break;
   13186 	}
   13187 
   13188 }
   13189 
   13190 /*
   13191  * wm_sgmii_readreg:	[mii interface function]
   13192  *
   13193  *	Read a PHY register on the SGMII
   13194  * This could be handled by the PHY layer if we didn't have to lock the
   13195  * resource ...
   13196  */
   13197 static int
   13198 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   13199 {
   13200 	struct wm_softc *sc = device_private(dev);
   13201 	int rv;
   13202 
   13203 	rv = sc->phy.acquire(sc);
   13204 	if (rv != 0) {
   13205 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13206 		return rv;
   13207 	}
   13208 
   13209 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   13210 
   13211 	sc->phy.release(sc);
   13212 	return rv;
   13213 }
   13214 
   13215 static int
   13216 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   13217 {
   13218 	struct wm_softc *sc = device_private(dev);
   13219 	uint32_t i2ccmd;
   13220 	int i, rv = 0;
   13221 
   13222 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13223 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13224 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13225 
   13226 	/* Poll the ready bit */
   13227 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13228 		delay(50);
   13229 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13230 		if (i2ccmd & I2CCMD_READY)
   13231 			break;
   13232 	}
   13233 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13234 		device_printf(dev, "I2CCMD Read did not complete\n");
   13235 		rv = ETIMEDOUT;
   13236 	}
   13237 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13238 		if (!sc->phy.no_errprint)
   13239 			device_printf(dev, "I2CCMD Error bit set\n");
   13240 		rv = EIO;
   13241 	}
   13242 
   13243 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   13244 
   13245 	return rv;
   13246 }
   13247 
   13248 /*
   13249  * wm_sgmii_writereg:	[mii interface function]
   13250  *
   13251  *	Write a PHY register on the SGMII.
   13252  * This could be handled by the PHY layer if we didn't have to lock the
   13253  * resource ...
   13254  */
   13255 static int
   13256 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   13257 {
   13258 	struct wm_softc *sc = device_private(dev);
   13259 	int rv;
   13260 
   13261 	rv = sc->phy.acquire(sc);
   13262 	if (rv != 0) {
   13263 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13264 		return rv;
   13265 	}
   13266 
   13267 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   13268 
   13269 	sc->phy.release(sc);
   13270 
   13271 	return rv;
   13272 }
   13273 
   13274 static int
   13275 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   13276 {
   13277 	struct wm_softc *sc = device_private(dev);
   13278 	uint32_t i2ccmd;
   13279 	uint16_t swapdata;
   13280 	int rv = 0;
   13281 	int i;
   13282 
   13283 	/* Swap the data bytes for the I2C interface */
   13284 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   13285 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13286 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   13287 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13288 
   13289 	/* Poll the ready bit */
   13290 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13291 		delay(50);
   13292 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13293 		if (i2ccmd & I2CCMD_READY)
   13294 			break;
   13295 	}
   13296 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13297 		device_printf(dev, "I2CCMD Write did not complete\n");
   13298 		rv = ETIMEDOUT;
   13299 	}
   13300 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13301 		device_printf(dev, "I2CCMD Error bit set\n");
   13302 		rv = EIO;
   13303 	}
   13304 
   13305 	return rv;
   13306 }
   13307 
   13308 /* TBI related */
   13309 
   13310 static bool
   13311 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   13312 {
   13313 	bool sig;
   13314 
   13315 	sig = ctrl & CTRL_SWDPIN(1);
   13316 
   13317 	/*
   13318 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   13319 	 * detect a signal, 1 if they don't.
   13320 	 */
   13321 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   13322 		sig = !sig;
   13323 
   13324 	return sig;
   13325 }
   13326 
   13327 /*
   13328  * wm_tbi_mediainit:
   13329  *
   13330  *	Initialize media for use on 1000BASE-X devices.
   13331  */
   13332 static void
   13333 wm_tbi_mediainit(struct wm_softc *sc)
   13334 {
   13335 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13336 	const char *sep = "";
   13337 
   13338 	if (sc->sc_type < WM_T_82543)
   13339 		sc->sc_tipg = TIPG_WM_DFLT;
   13340 	else
   13341 		sc->sc_tipg = TIPG_LG_DFLT;
   13342 
   13343 	sc->sc_tbi_serdes_anegticks = 5;
   13344 
   13345 	/* Initialize our media structures */
   13346 	sc->sc_mii.mii_ifp = ifp;
   13347 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   13348 
   13349 	ifp->if_baudrate = IF_Gbps(1);
   13350 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   13351 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13352 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13353 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   13354 		    sc->sc_core_lock);
   13355 	} else {
   13356 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13357 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   13358 	}
   13359 
   13360 	/*
   13361 	 * SWD Pins:
   13362 	 *
   13363 	 *	0 = Link LED (output)
   13364 	 *	1 = Loss Of Signal (input)
   13365 	 */
   13366 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   13367 
   13368 	/* XXX Perhaps this is only for TBI */
   13369 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13370 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   13371 
   13372 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   13373 		sc->sc_ctrl &= ~CTRL_LRST;
   13374 
   13375 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13376 
   13377 #define	ADD(ss, mm, dd)							  \
   13378 do {									  \
   13379 	aprint_normal("%s%s", sep, ss);					  \
   13380 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   13381 	sep = ", ";							  \
   13382 } while (/*CONSTCOND*/0)
   13383 
   13384 	aprint_normal_dev(sc->sc_dev, "");
   13385 
   13386 	if (sc->sc_type == WM_T_I354) {
   13387 		uint32_t status;
   13388 
   13389 		status = CSR_READ(sc, WMREG_STATUS);
   13390 		if (((status & STATUS_2P5_SKU) != 0)
   13391 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13392 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   13393 		} else
   13394 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   13395 	} else if (sc->sc_type == WM_T_82545) {
   13396 		/* Only 82545 is LX (XXX except SFP) */
   13397 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13398 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13399 	} else if (sc->sc_sfptype != 0) {
   13400 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   13401 		switch (sc->sc_sfptype) {
   13402 		default:
   13403 		case SFF_SFP_ETH_FLAGS_1000SX:
   13404 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13405 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13406 			break;
   13407 		case SFF_SFP_ETH_FLAGS_1000LX:
   13408 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13409 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13410 			break;
   13411 		case SFF_SFP_ETH_FLAGS_1000CX:
   13412 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   13413 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   13414 			break;
   13415 		case SFF_SFP_ETH_FLAGS_1000T:
   13416 			ADD("1000baseT", IFM_1000_T, 0);
   13417 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   13418 			break;
   13419 		case SFF_SFP_ETH_FLAGS_100FX:
   13420 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   13421 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   13422 			break;
   13423 		}
   13424 	} else {
   13425 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13426 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13427 	}
   13428 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   13429 	aprint_normal("\n");
   13430 
   13431 #undef ADD
   13432 
   13433 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   13434 }
   13435 
   13436 /*
   13437  * wm_tbi_mediachange:	[ifmedia interface function]
   13438  *
   13439  *	Set hardware to newly-selected media on a 1000BASE-X device.
   13440  */
   13441 static int
   13442 wm_tbi_mediachange(struct ifnet *ifp)
   13443 {
   13444 	struct wm_softc *sc = ifp->if_softc;
   13445 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13446 	uint32_t status, ctrl;
   13447 	bool signal;
   13448 	int i;
   13449 
   13450 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   13451 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13452 		/* XXX need some work for >= 82571 and < 82575 */
   13453 		if (sc->sc_type < WM_T_82575)
   13454 			return 0;
   13455 	}
   13456 
   13457 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13458 	    || (sc->sc_type >= WM_T_82575))
   13459 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13460 
   13461 	sc->sc_ctrl &= ~CTRL_LRST;
   13462 	sc->sc_txcw = TXCW_ANE;
   13463 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13464 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   13465 	else if (ife->ifm_media & IFM_FDX)
   13466 		sc->sc_txcw |= TXCW_FD;
   13467 	else
   13468 		sc->sc_txcw |= TXCW_HD;
   13469 
   13470 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   13471 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   13472 
   13473 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   13474 		device_xname(sc->sc_dev), sc->sc_txcw));
   13475 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13476 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13477 	CSR_WRITE_FLUSH(sc);
   13478 	delay(1000);
   13479 
   13480 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13481 	signal = wm_tbi_havesignal(sc, ctrl);
   13482 
   13483 	DPRINTF(sc, WM_DEBUG_LINK,
   13484 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   13485 
   13486 	if (signal) {
   13487 		/* Have signal; wait for the link to come up. */
   13488 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   13489 			delay(10000);
   13490 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   13491 				break;
   13492 		}
   13493 
   13494 		DPRINTF(sc, WM_DEBUG_LINK,
   13495 		    ("%s: i = %d after waiting for link\n",
   13496 			device_xname(sc->sc_dev), i));
   13497 
   13498 		status = CSR_READ(sc, WMREG_STATUS);
   13499 		DPRINTF(sc, WM_DEBUG_LINK,
   13500 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13501 			__PRIxBIT "\n",
   13502 			device_xname(sc->sc_dev), status, STATUS_LU));
   13503 		if (status & STATUS_LU) {
   13504 			/* Link is up. */
   13505 			DPRINTF(sc, WM_DEBUG_LINK,
   13506 			    ("%s: LINK: set media -> link up %s\n",
   13507 				device_xname(sc->sc_dev),
   13508 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13509 
   13510 			/*
   13511 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13512 			 * so we should update sc->sc_ctrl
   13513 			 */
   13514 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13515 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13516 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13517 			if (status & STATUS_FD)
   13518 				sc->sc_tctl |=
   13519 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13520 			else
   13521 				sc->sc_tctl |=
   13522 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13523 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13524 				sc->sc_fcrtl |= FCRTL_XONE;
   13525 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13526 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13527 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13528 			sc->sc_tbi_linkup = 1;
   13529 		} else {
   13530 			if (i == WM_LINKUP_TIMEOUT)
   13531 				wm_check_for_link(sc);
   13532 			/* Link is down. */
   13533 			DPRINTF(sc, WM_DEBUG_LINK,
   13534 			    ("%s: LINK: set media -> link down\n",
   13535 				device_xname(sc->sc_dev)));
   13536 			sc->sc_tbi_linkup = 0;
   13537 		}
   13538 	} else {
   13539 		DPRINTF(sc, WM_DEBUG_LINK,
   13540 		    ("%s: LINK: set media -> no signal\n",
   13541 			device_xname(sc->sc_dev)));
   13542 		sc->sc_tbi_linkup = 0;
   13543 	}
   13544 
   13545 	wm_tbi_serdes_set_linkled(sc);
   13546 
   13547 	return 0;
   13548 }
   13549 
   13550 /*
   13551  * wm_tbi_mediastatus:	[ifmedia interface function]
   13552  *
   13553  *	Get the current interface media status on a 1000BASE-X device.
   13554  */
   13555 static void
   13556 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13557 {
   13558 	struct wm_softc *sc = ifp->if_softc;
   13559 	uint32_t ctrl, status;
   13560 
   13561 	ifmr->ifm_status = IFM_AVALID;
   13562 	ifmr->ifm_active = IFM_ETHER;
   13563 
   13564 	status = CSR_READ(sc, WMREG_STATUS);
   13565 	if ((status & STATUS_LU) == 0) {
   13566 		ifmr->ifm_active |= IFM_NONE;
   13567 		return;
   13568 	}
   13569 
   13570 	ifmr->ifm_status |= IFM_ACTIVE;
   13571 	/* Only 82545 is LX */
   13572 	if (sc->sc_type == WM_T_82545)
   13573 		ifmr->ifm_active |= IFM_1000_LX;
   13574 	else
   13575 		ifmr->ifm_active |= IFM_1000_SX;
   13576 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13577 		ifmr->ifm_active |= IFM_FDX;
   13578 	else
   13579 		ifmr->ifm_active |= IFM_HDX;
   13580 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13581 	if (ctrl & CTRL_RFCE)
   13582 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13583 	if (ctrl & CTRL_TFCE)
   13584 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13585 }
   13586 
   13587 /* XXX TBI only */
   13588 static int
   13589 wm_check_for_link(struct wm_softc *sc)
   13590 {
   13591 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13592 	uint32_t rxcw;
   13593 	uint32_t ctrl;
   13594 	uint32_t status;
   13595 	bool signal;
   13596 
   13597 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13598 		device_xname(sc->sc_dev), __func__));
   13599 
   13600 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13601 		/* XXX need some work for >= 82571 */
   13602 		if (sc->sc_type >= WM_T_82571) {
   13603 			sc->sc_tbi_linkup = 1;
   13604 			return 0;
   13605 		}
   13606 	}
   13607 
   13608 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13609 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13610 	status = CSR_READ(sc, WMREG_STATUS);
   13611 	signal = wm_tbi_havesignal(sc, ctrl);
   13612 
   13613 	DPRINTF(sc, WM_DEBUG_LINK,
   13614 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13615 		device_xname(sc->sc_dev), __func__, signal,
   13616 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13617 
   13618 	/*
   13619 	 * SWDPIN   LU RXCW
   13620 	 *	0    0	  0
   13621 	 *	0    0	  1	(should not happen)
   13622 	 *	0    1	  0	(should not happen)
   13623 	 *	0    1	  1	(should not happen)
   13624 	 *	1    0	  0	Disable autonego and force linkup
   13625 	 *	1    0	  1	got /C/ but not linkup yet
   13626 	 *	1    1	  0	(linkup)
   13627 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13628 	 *
   13629 	 */
   13630 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13631 		DPRINTF(sc, WM_DEBUG_LINK,
   13632 		    ("%s: %s: force linkup and fullduplex\n",
   13633 			device_xname(sc->sc_dev), __func__));
   13634 		sc->sc_tbi_linkup = 0;
   13635 		/* Disable auto-negotiation in the TXCW register */
   13636 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13637 
   13638 		/*
   13639 		 * Force link-up and also force full-duplex.
   13640 		 *
   13641 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13642 		 * so we should update sc->sc_ctrl
   13643 		 */
   13644 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13645 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13646 	} else if (((status & STATUS_LU) != 0)
   13647 	    && ((rxcw & RXCW_C) != 0)
   13648 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13649 		sc->sc_tbi_linkup = 1;
   13650 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13651 			device_xname(sc->sc_dev), __func__));
   13652 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13653 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13654 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13655 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13656 			device_xname(sc->sc_dev), __func__));
   13657 	} else {
   13658 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13659 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13660 			status));
   13661 	}
   13662 
   13663 	return 0;
   13664 }
   13665 
   13666 /*
   13667  * wm_tbi_tick:
   13668  *
   13669  *	Check the link on TBI devices.
   13670  *	This function acts as mii_tick().
   13671  */
   13672 static void
   13673 wm_tbi_tick(struct wm_softc *sc)
   13674 {
   13675 	struct mii_data *mii = &sc->sc_mii;
   13676 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13677 	uint32_t status;
   13678 
   13679 	KASSERT(mutex_owned(sc->sc_core_lock));
   13680 
   13681 	status = CSR_READ(sc, WMREG_STATUS);
   13682 
   13683 	/* XXX is this needed? */
   13684 	(void)CSR_READ(sc, WMREG_RXCW);
   13685 	(void)CSR_READ(sc, WMREG_CTRL);
   13686 
   13687 	/* set link status */
   13688 	if ((status & STATUS_LU) == 0) {
   13689 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13690 			device_xname(sc->sc_dev)));
   13691 		sc->sc_tbi_linkup = 0;
   13692 	} else if (sc->sc_tbi_linkup == 0) {
   13693 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13694 			device_xname(sc->sc_dev),
   13695 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13696 		sc->sc_tbi_linkup = 1;
   13697 		sc->sc_tbi_serdes_ticks = 0;
   13698 	}
   13699 
   13700 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13701 		goto setled;
   13702 
   13703 	if ((status & STATUS_LU) == 0) {
   13704 		sc->sc_tbi_linkup = 0;
   13705 		/* If the timer expired, retry autonegotiation */
   13706 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13707 		    && (++sc->sc_tbi_serdes_ticks
   13708 			>= sc->sc_tbi_serdes_anegticks)) {
   13709 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13710 				device_xname(sc->sc_dev), __func__));
   13711 			sc->sc_tbi_serdes_ticks = 0;
   13712 			/*
   13713 			 * Reset the link, and let autonegotiation do
   13714 			 * its thing
   13715 			 */
   13716 			sc->sc_ctrl |= CTRL_LRST;
   13717 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13718 			CSR_WRITE_FLUSH(sc);
   13719 			delay(1000);
   13720 			sc->sc_ctrl &= ~CTRL_LRST;
   13721 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13722 			CSR_WRITE_FLUSH(sc);
   13723 			delay(1000);
   13724 			CSR_WRITE(sc, WMREG_TXCW,
   13725 			    sc->sc_txcw & ~TXCW_ANE);
   13726 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13727 		}
   13728 	}
   13729 
   13730 setled:
   13731 	wm_tbi_serdes_set_linkled(sc);
   13732 }
   13733 
   13734 /* SERDES related */
   13735 static void
   13736 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13737 {
   13738 	uint32_t reg;
   13739 
   13740 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13741 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13742 		return;
   13743 
   13744 	/* Enable PCS to turn on link */
   13745 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13746 	reg |= PCS_CFG_PCS_EN;
   13747 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13748 
   13749 	/* Power up the laser */
   13750 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13751 	reg &= ~CTRL_EXT_SWDPIN(3);
   13752 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13753 
   13754 	/* Flush the write to verify completion */
   13755 	CSR_WRITE_FLUSH(sc);
   13756 	delay(1000);
   13757 }
   13758 
   13759 static int
   13760 wm_serdes_mediachange(struct ifnet *ifp)
   13761 {
   13762 	struct wm_softc *sc = ifp->if_softc;
   13763 	bool pcs_autoneg = true; /* XXX */
   13764 	uint32_t ctrl_ext, pcs_lctl, reg;
   13765 
   13766 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13767 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13768 		return 0;
   13769 
   13770 	/* XXX Currently, this function is not called on 8257[12] */
   13771 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13772 	    || (sc->sc_type >= WM_T_82575))
   13773 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13774 
   13775 	/* Power on the sfp cage if present */
   13776 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13777 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13778 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13779 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13780 
   13781 	sc->sc_ctrl |= CTRL_SLU;
   13782 
   13783 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13784 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13785 
   13786 		reg = CSR_READ(sc, WMREG_CONNSW);
   13787 		reg |= CONNSW_ENRGSRC;
   13788 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13789 	}
   13790 
   13791 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13792 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13793 	case CTRL_EXT_LINK_MODE_SGMII:
   13794 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13795 		pcs_autoneg = true;
   13796 		/* Autoneg time out should be disabled for SGMII mode */
   13797 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13798 		break;
   13799 	case CTRL_EXT_LINK_MODE_1000KX:
   13800 		pcs_autoneg = false;
   13801 		/* FALLTHROUGH */
   13802 	default:
   13803 		if ((sc->sc_type == WM_T_82575)
   13804 		    || (sc->sc_type == WM_T_82576)) {
   13805 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13806 				pcs_autoneg = false;
   13807 		}
   13808 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13809 		    | CTRL_FRCFDX;
   13810 
   13811 		/* Set speed of 1000/Full if speed/duplex is forced */
   13812 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13813 	}
   13814 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13815 
   13816 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13817 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13818 
   13819 	if (pcs_autoneg) {
   13820 		/* Set PCS register for autoneg */
   13821 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13822 
   13823 		/* Disable force flow control for autoneg */
   13824 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13825 
   13826 		/* Configure flow control advertisement for autoneg */
   13827 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13828 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13829 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13830 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13831 	} else
   13832 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13833 
   13834 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13835 
   13836 	return 0;
   13837 }
   13838 
   13839 static void
   13840 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13841 {
   13842 	struct wm_softc *sc = ifp->if_softc;
   13843 	struct mii_data *mii = &sc->sc_mii;
   13844 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13845 	uint32_t pcs_adv, pcs_lpab, reg;
   13846 
   13847 	ifmr->ifm_status = IFM_AVALID;
   13848 	ifmr->ifm_active = IFM_ETHER;
   13849 
   13850 	/* Check PCS */
   13851 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13852 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13853 		ifmr->ifm_active |= IFM_NONE;
   13854 		sc->sc_tbi_linkup = 0;
   13855 		goto setled;
   13856 	}
   13857 
   13858 	sc->sc_tbi_linkup = 1;
   13859 	ifmr->ifm_status |= IFM_ACTIVE;
   13860 	if (sc->sc_type == WM_T_I354) {
   13861 		uint32_t status;
   13862 
   13863 		status = CSR_READ(sc, WMREG_STATUS);
   13864 		if (((status & STATUS_2P5_SKU) != 0)
   13865 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13866 			ifmr->ifm_active |= IFM_2500_KX;
   13867 		} else
   13868 			ifmr->ifm_active |= IFM_1000_KX;
   13869 	} else {
   13870 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13871 		case PCS_LSTS_SPEED_10:
   13872 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13873 			break;
   13874 		case PCS_LSTS_SPEED_100:
   13875 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13876 			break;
   13877 		case PCS_LSTS_SPEED_1000:
   13878 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13879 			break;
   13880 		default:
   13881 			device_printf(sc->sc_dev, "Unknown speed\n");
   13882 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13883 			break;
   13884 		}
   13885 	}
   13886 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13887 	if ((reg & PCS_LSTS_FDX) != 0)
   13888 		ifmr->ifm_active |= IFM_FDX;
   13889 	else
   13890 		ifmr->ifm_active |= IFM_HDX;
   13891 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13892 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13893 		/* Check flow */
   13894 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13895 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13896 			DPRINTF(sc, WM_DEBUG_LINK,
   13897 			    ("XXX LINKOK but not ACOMP\n"));
   13898 			goto setled;
   13899 		}
   13900 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13901 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13902 		DPRINTF(sc, WM_DEBUG_LINK,
   13903 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13904 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13905 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13906 			mii->mii_media_active |= IFM_FLOW
   13907 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13908 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13909 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13910 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13911 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13912 			mii->mii_media_active |= IFM_FLOW
   13913 			    | IFM_ETH_TXPAUSE;
   13914 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13915 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13916 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13917 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13918 			mii->mii_media_active |= IFM_FLOW
   13919 			    | IFM_ETH_RXPAUSE;
   13920 		}
   13921 	}
   13922 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13923 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13924 setled:
   13925 	wm_tbi_serdes_set_linkled(sc);
   13926 }
   13927 
   13928 /*
   13929  * wm_serdes_tick:
   13930  *
   13931  *	Check the link on serdes devices.
   13932  */
   13933 static void
   13934 wm_serdes_tick(struct wm_softc *sc)
   13935 {
   13936 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13937 	struct mii_data *mii = &sc->sc_mii;
   13938 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13939 	uint32_t reg;
   13940 
   13941 	KASSERT(mutex_owned(sc->sc_core_lock));
   13942 
   13943 	mii->mii_media_status = IFM_AVALID;
   13944 	mii->mii_media_active = IFM_ETHER;
   13945 
   13946 	/* Check PCS */
   13947 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13948 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13949 		mii->mii_media_status |= IFM_ACTIVE;
   13950 		sc->sc_tbi_linkup = 1;
   13951 		sc->sc_tbi_serdes_ticks = 0;
   13952 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13953 		if ((reg & PCS_LSTS_FDX) != 0)
   13954 			mii->mii_media_active |= IFM_FDX;
   13955 		else
   13956 			mii->mii_media_active |= IFM_HDX;
   13957 	} else {
   13958 		mii->mii_media_status |= IFM_NONE;
   13959 		sc->sc_tbi_linkup = 0;
   13960 		/* If the timer expired, retry autonegotiation */
   13961 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13962 		    && (++sc->sc_tbi_serdes_ticks
   13963 			>= sc->sc_tbi_serdes_anegticks)) {
   13964 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13965 				device_xname(sc->sc_dev), __func__));
   13966 			sc->sc_tbi_serdes_ticks = 0;
   13967 			/* XXX */
   13968 			wm_serdes_mediachange(ifp);
   13969 		}
   13970 	}
   13971 
   13972 	wm_tbi_serdes_set_linkled(sc);
   13973 }
   13974 
   13975 /* SFP related */
   13976 
   13977 static int
   13978 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13979 {
   13980 	uint32_t i2ccmd;
   13981 	int i;
   13982 
   13983 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13984 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13985 
   13986 	/* Poll the ready bit */
   13987 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13988 		delay(50);
   13989 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13990 		if (i2ccmd & I2CCMD_READY)
   13991 			break;
   13992 	}
   13993 	if ((i2ccmd & I2CCMD_READY) == 0)
   13994 		return -1;
   13995 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13996 		return -1;
   13997 
   13998 	*data = i2ccmd & 0x00ff;
   13999 
   14000 	return 0;
   14001 }
   14002 
   14003 static uint32_t
   14004 wm_sfp_get_media_type(struct wm_softc *sc)
   14005 {
   14006 	uint32_t ctrl_ext;
   14007 	uint8_t val = 0;
   14008 	int timeout = 3;
   14009 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   14010 	int rv = -1;
   14011 
   14012 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14013 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   14014 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   14015 	CSR_WRITE_FLUSH(sc);
   14016 
   14017 	/* Read SFP module data */
   14018 	while (timeout) {
   14019 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   14020 		if (rv == 0)
   14021 			break;
   14022 		delay(100*1000); /* XXX too big */
   14023 		timeout--;
   14024 	}
   14025 	if (rv != 0)
   14026 		goto out;
   14027 
   14028 	switch (val) {
   14029 	case SFF_SFP_ID_SFF:
   14030 		aprint_normal_dev(sc->sc_dev,
   14031 		    "Module/Connector soldered to board\n");
   14032 		break;
   14033 	case SFF_SFP_ID_SFP:
   14034 		sc->sc_flags |= WM_F_SFP;
   14035 		break;
   14036 	case SFF_SFP_ID_UNKNOWN:
   14037 		goto out;
   14038 	default:
   14039 		break;
   14040 	}
   14041 
   14042 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   14043 	if (rv != 0)
   14044 		goto out;
   14045 
   14046 	sc->sc_sfptype = val;
   14047 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   14048 		mediatype = WM_MEDIATYPE_SERDES;
   14049 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   14050 		sc->sc_flags |= WM_F_SGMII;
   14051 		mediatype = WM_MEDIATYPE_COPPER;
   14052 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   14053 		sc->sc_flags |= WM_F_SGMII;
   14054 		mediatype = WM_MEDIATYPE_SERDES;
   14055 	} else {
   14056 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   14057 		    __func__, sc->sc_sfptype);
   14058 		sc->sc_sfptype = 0; /* XXX unknown */
   14059 	}
   14060 
   14061 out:
   14062 	/* Restore I2C interface setting */
   14063 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14064 
   14065 	return mediatype;
   14066 }
   14067 
   14068 /*
   14069  * NVM related.
   14070  * Microwire, SPI (w/wo EERD) and Flash.
   14071  */
   14072 
   14073 /* Both spi and uwire */
   14074 
   14075 /*
   14076  * wm_eeprom_sendbits:
   14077  *
   14078  *	Send a series of bits to the EEPROM.
   14079  */
   14080 static void
   14081 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   14082 {
   14083 	uint32_t reg;
   14084 	int x;
   14085 
   14086 	reg = CSR_READ(sc, WMREG_EECD);
   14087 
   14088 	for (x = nbits; x > 0; x--) {
   14089 		if (bits & (1U << (x - 1)))
   14090 			reg |= EECD_DI;
   14091 		else
   14092 			reg &= ~EECD_DI;
   14093 		CSR_WRITE(sc, WMREG_EECD, reg);
   14094 		CSR_WRITE_FLUSH(sc);
   14095 		delay(2);
   14096 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14097 		CSR_WRITE_FLUSH(sc);
   14098 		delay(2);
   14099 		CSR_WRITE(sc, WMREG_EECD, reg);
   14100 		CSR_WRITE_FLUSH(sc);
   14101 		delay(2);
   14102 	}
   14103 }
   14104 
   14105 /*
   14106  * wm_eeprom_recvbits:
   14107  *
   14108  *	Receive a series of bits from the EEPROM.
   14109  */
   14110 static void
   14111 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   14112 {
   14113 	uint32_t reg, val;
   14114 	int x;
   14115 
   14116 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   14117 
   14118 	val = 0;
   14119 	for (x = nbits; x > 0; x--) {
   14120 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14121 		CSR_WRITE_FLUSH(sc);
   14122 		delay(2);
   14123 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   14124 			val |= (1U << (x - 1));
   14125 		CSR_WRITE(sc, WMREG_EECD, reg);
   14126 		CSR_WRITE_FLUSH(sc);
   14127 		delay(2);
   14128 	}
   14129 	*valp = val;
   14130 }
   14131 
   14132 /* Microwire */
   14133 
   14134 /*
   14135  * wm_nvm_read_uwire:
   14136  *
   14137  *	Read a word from the EEPROM using the MicroWire protocol.
   14138  */
   14139 static int
   14140 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14141 {
   14142 	uint32_t reg, val;
   14143 	int i, rv;
   14144 
   14145 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14146 		device_xname(sc->sc_dev), __func__));
   14147 
   14148 	rv = sc->nvm.acquire(sc);
   14149 	if (rv != 0)
   14150 		return rv;
   14151 
   14152 	for (i = 0; i < wordcnt; i++) {
   14153 		/* Clear SK and DI. */
   14154 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   14155 		CSR_WRITE(sc, WMREG_EECD, reg);
   14156 
   14157 		/*
   14158 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   14159 		 * and Xen.
   14160 		 *
   14161 		 * We use this workaround only for 82540 because qemu's
   14162 		 * e1000 act as 82540.
   14163 		 */
   14164 		if (sc->sc_type == WM_T_82540) {
   14165 			reg |= EECD_SK;
   14166 			CSR_WRITE(sc, WMREG_EECD, reg);
   14167 			reg &= ~EECD_SK;
   14168 			CSR_WRITE(sc, WMREG_EECD, reg);
   14169 			CSR_WRITE_FLUSH(sc);
   14170 			delay(2);
   14171 		}
   14172 		/* XXX: end of workaround */
   14173 
   14174 		/* Set CHIP SELECT. */
   14175 		reg |= EECD_CS;
   14176 		CSR_WRITE(sc, WMREG_EECD, reg);
   14177 		CSR_WRITE_FLUSH(sc);
   14178 		delay(2);
   14179 
   14180 		/* Shift in the READ command. */
   14181 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   14182 
   14183 		/* Shift in address. */
   14184 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   14185 
   14186 		/* Shift out the data. */
   14187 		wm_eeprom_recvbits(sc, &val, 16);
   14188 		data[i] = val & 0xffff;
   14189 
   14190 		/* Clear CHIP SELECT. */
   14191 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   14192 		CSR_WRITE(sc, WMREG_EECD, reg);
   14193 		CSR_WRITE_FLUSH(sc);
   14194 		delay(2);
   14195 	}
   14196 
   14197 	sc->nvm.release(sc);
   14198 	return 0;
   14199 }
   14200 
   14201 /* SPI */
   14202 
   14203 /*
   14204  * Set SPI and FLASH related information from the EECD register.
   14205  * For 82541 and 82547, the word size is taken from EEPROM.
   14206  */
   14207 static int
   14208 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   14209 {
   14210 	int size;
   14211 	uint32_t reg;
   14212 	uint16_t data;
   14213 
   14214 	reg = CSR_READ(sc, WMREG_EECD);
   14215 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   14216 
   14217 	/* Read the size of NVM from EECD by default */
   14218 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14219 	switch (sc->sc_type) {
   14220 	case WM_T_82541:
   14221 	case WM_T_82541_2:
   14222 	case WM_T_82547:
   14223 	case WM_T_82547_2:
   14224 		/* Set dummy value to access EEPROM */
   14225 		sc->sc_nvm_wordsize = 64;
   14226 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   14227 			aprint_error_dev(sc->sc_dev,
   14228 			    "%s: failed to read EEPROM size\n", __func__);
   14229 		}
   14230 		reg = data;
   14231 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14232 		if (size == 0)
   14233 			size = 6; /* 64 word size */
   14234 		else
   14235 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   14236 		break;
   14237 	case WM_T_80003:
   14238 	case WM_T_82571:
   14239 	case WM_T_82572:
   14240 	case WM_T_82573: /* SPI case */
   14241 	case WM_T_82574: /* SPI case */
   14242 	case WM_T_82583: /* SPI case */
   14243 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14244 		if (size > 14)
   14245 			size = 14;
   14246 		break;
   14247 	case WM_T_82575:
   14248 	case WM_T_82576:
   14249 	case WM_T_82580:
   14250 	case WM_T_I350:
   14251 	case WM_T_I354:
   14252 	case WM_T_I210:
   14253 	case WM_T_I211:
   14254 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14255 		if (size > 15)
   14256 			size = 15;
   14257 		break;
   14258 	default:
   14259 		aprint_error_dev(sc->sc_dev,
   14260 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   14261 		return -1;
   14262 		break;
   14263 	}
   14264 
   14265 	sc->sc_nvm_wordsize = 1 << size;
   14266 
   14267 	return 0;
   14268 }
   14269 
   14270 /*
   14271  * wm_nvm_ready_spi:
   14272  *
   14273  *	Wait for a SPI EEPROM to be ready for commands.
   14274  */
   14275 static int
   14276 wm_nvm_ready_spi(struct wm_softc *sc)
   14277 {
   14278 	uint32_t val;
   14279 	int usec;
   14280 
   14281 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14282 		device_xname(sc->sc_dev), __func__));
   14283 
   14284 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   14285 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   14286 		wm_eeprom_recvbits(sc, &val, 8);
   14287 		if ((val & SPI_SR_RDY) == 0)
   14288 			break;
   14289 	}
   14290 	if (usec >= SPI_MAX_RETRIES) {
   14291 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   14292 		return -1;
   14293 	}
   14294 	return 0;
   14295 }
   14296 
   14297 /*
   14298  * wm_nvm_read_spi:
   14299  *
   14300  *	Read a work from the EEPROM using the SPI protocol.
   14301  */
   14302 static int
   14303 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14304 {
   14305 	uint32_t reg, val;
   14306 	int i;
   14307 	uint8_t opc;
   14308 	int rv;
   14309 
   14310 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14311 		device_xname(sc->sc_dev), __func__));
   14312 
   14313 	rv = sc->nvm.acquire(sc);
   14314 	if (rv != 0)
   14315 		return rv;
   14316 
   14317 	/* Clear SK and CS. */
   14318 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   14319 	CSR_WRITE(sc, WMREG_EECD, reg);
   14320 	CSR_WRITE_FLUSH(sc);
   14321 	delay(2);
   14322 
   14323 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   14324 		goto out;
   14325 
   14326 	/* Toggle CS to flush commands. */
   14327 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   14328 	CSR_WRITE_FLUSH(sc);
   14329 	delay(2);
   14330 	CSR_WRITE(sc, WMREG_EECD, reg);
   14331 	CSR_WRITE_FLUSH(sc);
   14332 	delay(2);
   14333 
   14334 	opc = SPI_OPC_READ;
   14335 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   14336 		opc |= SPI_OPC_A8;
   14337 
   14338 	wm_eeprom_sendbits(sc, opc, 8);
   14339 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   14340 
   14341 	for (i = 0; i < wordcnt; i++) {
   14342 		wm_eeprom_recvbits(sc, &val, 16);
   14343 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   14344 	}
   14345 
   14346 	/* Raise CS and clear SK. */
   14347 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   14348 	CSR_WRITE(sc, WMREG_EECD, reg);
   14349 	CSR_WRITE_FLUSH(sc);
   14350 	delay(2);
   14351 
   14352 out:
   14353 	sc->nvm.release(sc);
   14354 	return rv;
   14355 }
   14356 
   14357 /* Using with EERD */
   14358 
   14359 static int
   14360 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   14361 {
   14362 	uint32_t attempts = 100000;
   14363 	uint32_t i, reg = 0;
   14364 	int32_t done = -1;
   14365 
   14366 	for (i = 0; i < attempts; i++) {
   14367 		reg = CSR_READ(sc, rw);
   14368 
   14369 		if (reg & EERD_DONE) {
   14370 			done = 0;
   14371 			break;
   14372 		}
   14373 		delay(5);
   14374 	}
   14375 
   14376 	return done;
   14377 }
   14378 
   14379 static int
   14380 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   14381 {
   14382 	int i, eerd = 0;
   14383 	int rv;
   14384 
   14385 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14386 		device_xname(sc->sc_dev), __func__));
   14387 
   14388 	rv = sc->nvm.acquire(sc);
   14389 	if (rv != 0)
   14390 		return rv;
   14391 
   14392 	for (i = 0; i < wordcnt; i++) {
   14393 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   14394 		CSR_WRITE(sc, WMREG_EERD, eerd);
   14395 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   14396 		if (rv != 0) {
   14397 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   14398 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   14399 			break;
   14400 		}
   14401 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   14402 	}
   14403 
   14404 	sc->nvm.release(sc);
   14405 	return rv;
   14406 }
   14407 
   14408 /* Flash */
   14409 
   14410 static int
   14411 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   14412 {
   14413 	uint32_t eecd;
   14414 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   14415 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   14416 	uint32_t nvm_dword = 0;
   14417 	uint8_t sig_byte = 0;
   14418 	int rv;
   14419 
   14420 	switch (sc->sc_type) {
   14421 	case WM_T_PCH_SPT:
   14422 	case WM_T_PCH_CNP:
   14423 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   14424 		act_offset = ICH_NVM_SIG_WORD * 2;
   14425 
   14426 		/* Set bank to 0 in case flash read fails. */
   14427 		*bank = 0;
   14428 
   14429 		/* Check bank 0 */
   14430 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   14431 		if (rv != 0)
   14432 			return rv;
   14433 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14434 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14435 			*bank = 0;
   14436 			return 0;
   14437 		}
   14438 
   14439 		/* Check bank 1 */
   14440 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   14441 		    &nvm_dword);
   14442 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14443 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14444 			*bank = 1;
   14445 			return 0;
   14446 		}
   14447 		aprint_error_dev(sc->sc_dev,
   14448 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   14449 		return -1;
   14450 	case WM_T_ICH8:
   14451 	case WM_T_ICH9:
   14452 		eecd = CSR_READ(sc, WMREG_EECD);
   14453 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   14454 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   14455 			return 0;
   14456 		}
   14457 		/* FALLTHROUGH */
   14458 	default:
   14459 		/* Default to 0 */
   14460 		*bank = 0;
   14461 
   14462 		/* Check bank 0 */
   14463 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   14464 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14465 			*bank = 0;
   14466 			return 0;
   14467 		}
   14468 
   14469 		/* Check bank 1 */
   14470 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   14471 		    &sig_byte);
   14472 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14473 			*bank = 1;
   14474 			return 0;
   14475 		}
   14476 	}
   14477 
   14478 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   14479 		device_xname(sc->sc_dev)));
   14480 	return -1;
   14481 }
   14482 
   14483 /******************************************************************************
   14484  * This function does initial flash setup so that a new read/write/erase cycle
   14485  * can be started.
   14486  *
   14487  * sc - The pointer to the hw structure
   14488  ****************************************************************************/
   14489 static int32_t
   14490 wm_ich8_cycle_init(struct wm_softc *sc)
   14491 {
   14492 	uint16_t hsfsts;
   14493 	int32_t error = 1;
   14494 	int32_t i     = 0;
   14495 
   14496 	if (sc->sc_type >= WM_T_PCH_SPT)
   14497 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14498 	else
   14499 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14500 
   14501 	/* May be check the Flash Des Valid bit in Hw status */
   14502 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14503 		return error;
   14504 
   14505 	/* Clear FCERR in Hw status by writing 1 */
   14506 	/* Clear DAEL in Hw status by writing a 1 */
   14507 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14508 
   14509 	if (sc->sc_type >= WM_T_PCH_SPT)
   14510 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14511 	else
   14512 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14513 
   14514 	/*
   14515 	 * Either we should have a hardware SPI cycle in progress bit to check
   14516 	 * against, in order to start a new cycle or FDONE bit should be
   14517 	 * changed in the hardware so that it is 1 after hardware reset, which
   14518 	 * can then be used as an indication whether a cycle is in progress or
   14519 	 * has been completed .. we should also have some software semaphore
   14520 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14521 	 * threads access to those bits can be sequentiallized or a way so that
   14522 	 * 2 threads don't start the cycle at the same time
   14523 	 */
   14524 
   14525 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14526 		/*
   14527 		 * There is no cycle running at present, so we can start a
   14528 		 * cycle
   14529 		 */
   14530 
   14531 		/* Begin by setting Flash Cycle Done. */
   14532 		hsfsts |= HSFSTS_DONE;
   14533 		if (sc->sc_type >= WM_T_PCH_SPT)
   14534 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14535 			    hsfsts & 0xffffUL);
   14536 		else
   14537 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14538 		error = 0;
   14539 	} else {
   14540 		/*
   14541 		 * Otherwise poll for sometime so the current cycle has a
   14542 		 * chance to end before giving up.
   14543 		 */
   14544 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14545 			if (sc->sc_type >= WM_T_PCH_SPT)
   14546 				hsfsts = ICH8_FLASH_READ32(sc,
   14547 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14548 			else
   14549 				hsfsts = ICH8_FLASH_READ16(sc,
   14550 				    ICH_FLASH_HSFSTS);
   14551 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14552 				error = 0;
   14553 				break;
   14554 			}
   14555 			delay(1);
   14556 		}
   14557 		if (error == 0) {
   14558 			/*
   14559 			 * Successful in waiting for previous cycle to timeout,
   14560 			 * now set the Flash Cycle Done.
   14561 			 */
   14562 			hsfsts |= HSFSTS_DONE;
   14563 			if (sc->sc_type >= WM_T_PCH_SPT)
   14564 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14565 				    hsfsts & 0xffffUL);
   14566 			else
   14567 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14568 				    hsfsts);
   14569 		}
   14570 	}
   14571 	return error;
   14572 }
   14573 
   14574 /******************************************************************************
   14575  * This function starts a flash cycle and waits for its completion
   14576  *
   14577  * sc - The pointer to the hw structure
   14578  ****************************************************************************/
   14579 static int32_t
   14580 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14581 {
   14582 	uint16_t hsflctl;
   14583 	uint16_t hsfsts;
   14584 	int32_t error = 1;
   14585 	uint32_t i = 0;
   14586 
   14587 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14588 	if (sc->sc_type >= WM_T_PCH_SPT)
   14589 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14590 	else
   14591 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14592 	hsflctl |= HSFCTL_GO;
   14593 	if (sc->sc_type >= WM_T_PCH_SPT)
   14594 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14595 		    (uint32_t)hsflctl << 16);
   14596 	else
   14597 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14598 
   14599 	/* Wait till FDONE bit is set to 1 */
   14600 	do {
   14601 		if (sc->sc_type >= WM_T_PCH_SPT)
   14602 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14603 			    & 0xffffUL;
   14604 		else
   14605 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14606 		if (hsfsts & HSFSTS_DONE)
   14607 			break;
   14608 		delay(1);
   14609 		i++;
   14610 	} while (i < timeout);
   14611 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14612 		error = 0;
   14613 
   14614 	return error;
   14615 }
   14616 
   14617 /******************************************************************************
   14618  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14619  *
   14620  * sc - The pointer to the hw structure
   14621  * index - The index of the byte or word to read.
   14622  * size - Size of data to read, 1=byte 2=word, 4=dword
   14623  * data - Pointer to the word to store the value read.
   14624  *****************************************************************************/
   14625 static int32_t
   14626 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14627     uint32_t size, uint32_t *data)
   14628 {
   14629 	uint16_t hsfsts;
   14630 	uint16_t hsflctl;
   14631 	uint32_t flash_linear_address;
   14632 	uint32_t flash_data = 0;
   14633 	int32_t error = 1;
   14634 	int32_t count = 0;
   14635 
   14636 	if (size < 1  || size > 4 || data == 0x0 ||
   14637 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14638 		return error;
   14639 
   14640 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14641 	    sc->sc_ich8_flash_base;
   14642 
   14643 	do {
   14644 		delay(1);
   14645 		/* Steps */
   14646 		error = wm_ich8_cycle_init(sc);
   14647 		if (error)
   14648 			break;
   14649 
   14650 		if (sc->sc_type >= WM_T_PCH_SPT)
   14651 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14652 			    >> 16;
   14653 		else
   14654 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14655 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14656 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14657 		    & HSFCTL_BCOUNT_MASK;
   14658 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14659 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14660 			/*
   14661 			 * In SPT, This register is in Lan memory space, not
   14662 			 * flash. Therefore, only 32 bit access is supported.
   14663 			 */
   14664 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14665 			    (uint32_t)hsflctl << 16);
   14666 		} else
   14667 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14668 
   14669 		/*
   14670 		 * Write the last 24 bits of index into Flash Linear address
   14671 		 * field in Flash Address
   14672 		 */
   14673 		/* TODO: TBD maybe check the index against the size of flash */
   14674 
   14675 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14676 
   14677 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14678 
   14679 		/*
   14680 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14681 		 * the whole sequence a few more times, else read in (shift in)
   14682 		 * the Flash Data0, the order is least significant byte first
   14683 		 * msb to lsb
   14684 		 */
   14685 		if (error == 0) {
   14686 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14687 			if (size == 1)
   14688 				*data = (uint8_t)(flash_data & 0x000000FF);
   14689 			else if (size == 2)
   14690 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14691 			else if (size == 4)
   14692 				*data = (uint32_t)flash_data;
   14693 			break;
   14694 		} else {
   14695 			/*
   14696 			 * If we've gotten here, then things are probably
   14697 			 * completely hosed, but if the error condition is
   14698 			 * detected, it won't hurt to give it another try...
   14699 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14700 			 */
   14701 			if (sc->sc_type >= WM_T_PCH_SPT)
   14702 				hsfsts = ICH8_FLASH_READ32(sc,
   14703 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14704 			else
   14705 				hsfsts = ICH8_FLASH_READ16(sc,
   14706 				    ICH_FLASH_HSFSTS);
   14707 
   14708 			if (hsfsts & HSFSTS_ERR) {
   14709 				/* Repeat for some time before giving up. */
   14710 				continue;
   14711 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14712 				break;
   14713 		}
   14714 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14715 
   14716 	return error;
   14717 }
   14718 
   14719 /******************************************************************************
   14720  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14721  *
   14722  * sc - pointer to wm_hw structure
   14723  * index - The index of the byte to read.
   14724  * data - Pointer to a byte to store the value read.
   14725  *****************************************************************************/
   14726 static int32_t
   14727 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14728 {
   14729 	int32_t status;
   14730 	uint32_t word = 0;
   14731 
   14732 	status = wm_read_ich8_data(sc, index, 1, &word);
   14733 	if (status == 0)
   14734 		*data = (uint8_t)word;
   14735 	else
   14736 		*data = 0;
   14737 
   14738 	return status;
   14739 }
   14740 
   14741 /******************************************************************************
   14742  * Reads a word from the NVM using the ICH8 flash access registers.
   14743  *
   14744  * sc - pointer to wm_hw structure
   14745  * index - The starting byte index of the word to read.
   14746  * data - Pointer to a word to store the value read.
   14747  *****************************************************************************/
   14748 static int32_t
   14749 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14750 {
   14751 	int32_t status;
   14752 	uint32_t word = 0;
   14753 
   14754 	status = wm_read_ich8_data(sc, index, 2, &word);
   14755 	if (status == 0)
   14756 		*data = (uint16_t)word;
   14757 	else
   14758 		*data = 0;
   14759 
   14760 	return status;
   14761 }
   14762 
   14763 /******************************************************************************
   14764  * Reads a dword from the NVM using the ICH8 flash access registers.
   14765  *
   14766  * sc - pointer to wm_hw structure
   14767  * index - The starting byte index of the word to read.
   14768  * data - Pointer to a word to store the value read.
   14769  *****************************************************************************/
   14770 static int32_t
   14771 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14772 {
   14773 	int32_t status;
   14774 
   14775 	status = wm_read_ich8_data(sc, index, 4, data);
   14776 	return status;
   14777 }
   14778 
   14779 /******************************************************************************
   14780  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14781  * register.
   14782  *
   14783  * sc - Struct containing variables accessed by shared code
   14784  * offset - offset of word in the EEPROM to read
   14785  * data - word read from the EEPROM
   14786  * words - number of words to read
   14787  *****************************************************************************/
   14788 static int
   14789 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14790 {
   14791 	int rv;
   14792 	uint32_t flash_bank = 0;
   14793 	uint32_t act_offset = 0;
   14794 	uint32_t bank_offset = 0;
   14795 	uint16_t word = 0;
   14796 	uint16_t i = 0;
   14797 
   14798 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14799 		device_xname(sc->sc_dev), __func__));
   14800 
   14801 	rv = sc->nvm.acquire(sc);
   14802 	if (rv != 0)
   14803 		return rv;
   14804 
   14805 	/*
   14806 	 * We need to know which is the valid flash bank.  In the event
   14807 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14808 	 * managing flash_bank. So it cannot be trusted and needs
   14809 	 * to be updated with each read.
   14810 	 */
   14811 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14812 	if (rv) {
   14813 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14814 			device_xname(sc->sc_dev)));
   14815 		flash_bank = 0;
   14816 	}
   14817 
   14818 	/*
   14819 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14820 	 * size
   14821 	 */
   14822 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14823 
   14824 	for (i = 0; i < words; i++) {
   14825 		/* The NVM part needs a byte offset, hence * 2 */
   14826 		act_offset = bank_offset + ((offset + i) * 2);
   14827 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14828 		if (rv) {
   14829 			aprint_error_dev(sc->sc_dev,
   14830 			    "%s: failed to read NVM\n", __func__);
   14831 			break;
   14832 		}
   14833 		data[i] = word;
   14834 	}
   14835 
   14836 	sc->nvm.release(sc);
   14837 	return rv;
   14838 }
   14839 
   14840 /******************************************************************************
   14841  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14842  * register.
   14843  *
   14844  * sc - Struct containing variables accessed by shared code
   14845  * offset - offset of word in the EEPROM to read
   14846  * data - word read from the EEPROM
   14847  * words - number of words to read
   14848  *****************************************************************************/
   14849 static int
   14850 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14851 {
   14852 	int	 rv;
   14853 	uint32_t flash_bank = 0;
   14854 	uint32_t act_offset = 0;
   14855 	uint32_t bank_offset = 0;
   14856 	uint32_t dword = 0;
   14857 	uint16_t i = 0;
   14858 
   14859 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14860 		device_xname(sc->sc_dev), __func__));
   14861 
   14862 	rv = sc->nvm.acquire(sc);
   14863 	if (rv != 0)
   14864 		return rv;
   14865 
   14866 	/*
   14867 	 * We need to know which is the valid flash bank.  In the event
   14868 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14869 	 * managing flash_bank. So it cannot be trusted and needs
   14870 	 * to be updated with each read.
   14871 	 */
   14872 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14873 	if (rv) {
   14874 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14875 			device_xname(sc->sc_dev)));
   14876 		flash_bank = 0;
   14877 	}
   14878 
   14879 	/*
   14880 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14881 	 * size
   14882 	 */
   14883 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14884 
   14885 	for (i = 0; i < words; i++) {
   14886 		/* The NVM part needs a byte offset, hence * 2 */
   14887 		act_offset = bank_offset + ((offset + i) * 2);
   14888 		/* but we must read dword aligned, so mask ... */
   14889 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14890 		if (rv) {
   14891 			aprint_error_dev(sc->sc_dev,
   14892 			    "%s: failed to read NVM\n", __func__);
   14893 			break;
   14894 		}
   14895 		/* ... and pick out low or high word */
   14896 		if ((act_offset & 0x2) == 0)
   14897 			data[i] = (uint16_t)(dword & 0xFFFF);
   14898 		else
   14899 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14900 	}
   14901 
   14902 	sc->nvm.release(sc);
   14903 	return rv;
   14904 }
   14905 
   14906 /* iNVM */
   14907 
   14908 static int
   14909 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14910 {
   14911 	int32_t	 rv = 0;
   14912 	uint32_t invm_dword;
   14913 	uint16_t i;
   14914 	uint8_t record_type, word_address;
   14915 
   14916 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14917 		device_xname(sc->sc_dev), __func__));
   14918 
   14919 	for (i = 0; i < INVM_SIZE; i++) {
   14920 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14921 		/* Get record type */
   14922 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14923 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14924 			break;
   14925 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14926 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14927 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14928 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14929 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14930 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14931 			if (word_address == address) {
   14932 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14933 				rv = 0;
   14934 				break;
   14935 			}
   14936 		}
   14937 	}
   14938 
   14939 	return rv;
   14940 }
   14941 
   14942 static int
   14943 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14944 {
   14945 	int i, rv;
   14946 
   14947 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14948 		device_xname(sc->sc_dev), __func__));
   14949 
   14950 	rv = sc->nvm.acquire(sc);
   14951 	if (rv != 0)
   14952 		return rv;
   14953 
   14954 	for (i = 0; i < words; i++) {
   14955 		switch (offset + i) {
   14956 		case NVM_OFF_MACADDR:
   14957 		case NVM_OFF_MACADDR1:
   14958 		case NVM_OFF_MACADDR2:
   14959 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14960 			if (rv != 0) {
   14961 				data[i] = 0xffff;
   14962 				rv = -1;
   14963 			}
   14964 			break;
   14965 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14966 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14967 			if (rv != 0) {
   14968 				*data = INVM_DEFAULT_AL;
   14969 				rv = 0;
   14970 			}
   14971 			break;
   14972 		case NVM_OFF_CFG2:
   14973 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14974 			if (rv != 0) {
   14975 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14976 				rv = 0;
   14977 			}
   14978 			break;
   14979 		case NVM_OFF_CFG4:
   14980 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14981 			if (rv != 0) {
   14982 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14983 				rv = 0;
   14984 			}
   14985 			break;
   14986 		case NVM_OFF_LED_1_CFG:
   14987 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14988 			if (rv != 0) {
   14989 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14990 				rv = 0;
   14991 			}
   14992 			break;
   14993 		case NVM_OFF_LED_0_2_CFG:
   14994 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14995 			if (rv != 0) {
   14996 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14997 				rv = 0;
   14998 			}
   14999 			break;
   15000 		case NVM_OFF_ID_LED_SETTINGS:
   15001 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15002 			if (rv != 0) {
   15003 				*data = ID_LED_RESERVED_FFFF;
   15004 				rv = 0;
   15005 			}
   15006 			break;
   15007 		default:
   15008 			DPRINTF(sc, WM_DEBUG_NVM,
   15009 			    ("NVM word 0x%02x is not mapped.\n", offset));
   15010 			*data = NVM_RESERVED_WORD;
   15011 			break;
   15012 		}
   15013 	}
   15014 
   15015 	sc->nvm.release(sc);
   15016 	return rv;
   15017 }
   15018 
   15019 /* Lock, detecting NVM type, validate checksum, version and read */
   15020 
   15021 static int
   15022 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   15023 {
   15024 	uint32_t eecd = 0;
   15025 
   15026 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   15027 	    || sc->sc_type == WM_T_82583) {
   15028 		eecd = CSR_READ(sc, WMREG_EECD);
   15029 
   15030 		/* Isolate bits 15 & 16 */
   15031 		eecd = ((eecd >> 15) & 0x03);
   15032 
   15033 		/* If both bits are set, device is Flash type */
   15034 		if (eecd == 0x03)
   15035 			return 0;
   15036 	}
   15037 	return 1;
   15038 }
   15039 
   15040 static int
   15041 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   15042 {
   15043 	uint32_t eec;
   15044 
   15045 	eec = CSR_READ(sc, WMREG_EEC);
   15046 	if ((eec & EEC_FLASH_DETECTED) != 0)
   15047 		return 1;
   15048 
   15049 	return 0;
   15050 }
   15051 
   15052 /*
   15053  * wm_nvm_validate_checksum
   15054  *
   15055  * The checksum is defined as the sum of the first 64 (16 bit) words.
   15056  */
   15057 static int
   15058 wm_nvm_validate_checksum(struct wm_softc *sc)
   15059 {
   15060 	uint16_t checksum;
   15061 	uint16_t eeprom_data;
   15062 #ifdef WM_DEBUG
   15063 	uint16_t csum_wordaddr, valid_checksum;
   15064 #endif
   15065 	int i;
   15066 
   15067 	checksum = 0;
   15068 
   15069 	/* Don't check for I211 */
   15070 	if (sc->sc_type == WM_T_I211)
   15071 		return 0;
   15072 
   15073 #ifdef WM_DEBUG
   15074 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   15075 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   15076 		csum_wordaddr = NVM_OFF_COMPAT;
   15077 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   15078 	} else {
   15079 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   15080 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   15081 	}
   15082 
   15083 	/* Dump EEPROM image for debug */
   15084 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15085 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15086 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   15087 		/* XXX PCH_SPT? */
   15088 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   15089 		if ((eeprom_data & valid_checksum) == 0)
   15090 			DPRINTF(sc, WM_DEBUG_NVM,
   15091 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   15092 				device_xname(sc->sc_dev), eeprom_data,
   15093 				valid_checksum));
   15094 	}
   15095 
   15096 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   15097 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   15098 		for (i = 0; i < NVM_SIZE; i++) {
   15099 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   15100 				printf("XXXX ");
   15101 			else
   15102 				printf("%04hx ", eeprom_data);
   15103 			if (i % 8 == 7)
   15104 				printf("\n");
   15105 		}
   15106 	}
   15107 
   15108 #endif /* WM_DEBUG */
   15109 
   15110 	for (i = 0; i < NVM_SIZE; i++) {
   15111 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   15112 			return -1;
   15113 		checksum += eeprom_data;
   15114 	}
   15115 
   15116 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   15117 #ifdef WM_DEBUG
   15118 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   15119 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   15120 #endif
   15121 	}
   15122 
   15123 	return 0;
   15124 }
   15125 
   15126 static void
   15127 wm_nvm_version_invm(struct wm_softc *sc)
   15128 {
   15129 	uint32_t dword;
   15130 
   15131 	/*
   15132 	 * Linux's code to decode version is very strange, so we don't
   15133 	 * obey that algorithm and just use word 61 as the document.
   15134 	 * Perhaps it's not perfect though...
   15135 	 *
   15136 	 * Example:
   15137 	 *
   15138 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   15139 	 */
   15140 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   15141 	dword = __SHIFTOUT(dword, INVM_VER_1);
   15142 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   15143 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   15144 }
   15145 
   15146 static void
   15147 wm_nvm_version(struct wm_softc *sc)
   15148 {
   15149 	uint16_t major, minor, build, patch;
   15150 	uint16_t uid0, uid1;
   15151 	uint16_t nvm_data;
   15152 	uint16_t off;
   15153 	bool check_version = false;
   15154 	bool check_optionrom = false;
   15155 	bool have_build = false;
   15156 	bool have_uid = true;
   15157 
   15158 	/*
   15159 	 * Version format:
   15160 	 *
   15161 	 * XYYZ
   15162 	 * X0YZ
   15163 	 * X0YY
   15164 	 *
   15165 	 * Example:
   15166 	 *
   15167 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   15168 	 *	82571	0x50a6	5.10.6?
   15169 	 *	82572	0x506a	5.6.10?
   15170 	 *	82572EI	0x5069	5.6.9?
   15171 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   15172 	 *		0x2013	2.1.3?
   15173 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   15174 	 * ICH8+82567	0x0040	0.4.0?
   15175 	 * ICH9+82566	0x1040	1.4.0?
   15176 	 *ICH10+82567	0x0043	0.4.3?
   15177 	 *  PCH+82577	0x00c1	0.12.1?
   15178 	 * PCH2+82579	0x00d3	0.13.3?
   15179 	 *		0x00d4	0.13.4?
   15180 	 *  LPT+I218	0x0023	0.2.3?
   15181 	 *  SPT+I219	0x0084	0.8.4?
   15182 	 *  CNP+I219	0x0054	0.5.4?
   15183 	 */
   15184 
   15185 	/*
   15186 	 * XXX
   15187 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   15188 	 * I've never seen real 82574 hardware with such small SPI ROM.
   15189 	 */
   15190 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   15191 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   15192 		have_uid = false;
   15193 
   15194 	switch (sc->sc_type) {
   15195 	case WM_T_82571:
   15196 	case WM_T_82572:
   15197 	case WM_T_82574:
   15198 	case WM_T_82583:
   15199 		check_version = true;
   15200 		check_optionrom = true;
   15201 		have_build = true;
   15202 		break;
   15203 	case WM_T_ICH8:
   15204 	case WM_T_ICH9:
   15205 	case WM_T_ICH10:
   15206 	case WM_T_PCH:
   15207 	case WM_T_PCH2:
   15208 	case WM_T_PCH_LPT:
   15209 	case WM_T_PCH_SPT:
   15210 	case WM_T_PCH_CNP:
   15211 		check_version = true;
   15212 		have_build = true;
   15213 		have_uid = false;
   15214 		break;
   15215 	case WM_T_82575:
   15216 	case WM_T_82576:
   15217 	case WM_T_82580:
   15218 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   15219 			check_version = true;
   15220 		break;
   15221 	case WM_T_I211:
   15222 		wm_nvm_version_invm(sc);
   15223 		have_uid = false;
   15224 		goto printver;
   15225 	case WM_T_I210:
   15226 		if (!wm_nvm_flash_presence_i210(sc)) {
   15227 			wm_nvm_version_invm(sc);
   15228 			have_uid = false;
   15229 			goto printver;
   15230 		}
   15231 		/* FALLTHROUGH */
   15232 	case WM_T_I350:
   15233 	case WM_T_I354:
   15234 		check_version = true;
   15235 		check_optionrom = true;
   15236 		break;
   15237 	default:
   15238 		return;
   15239 	}
   15240 	if (check_version
   15241 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   15242 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   15243 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   15244 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   15245 			build = nvm_data & NVM_BUILD_MASK;
   15246 			have_build = true;
   15247 		} else
   15248 			minor = nvm_data & 0x00ff;
   15249 
   15250 		/* Decimal */
   15251 		minor = (minor / 16) * 10 + (minor % 16);
   15252 		sc->sc_nvm_ver_major = major;
   15253 		sc->sc_nvm_ver_minor = minor;
   15254 
   15255 printver:
   15256 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   15257 		    sc->sc_nvm_ver_minor);
   15258 		if (have_build) {
   15259 			sc->sc_nvm_ver_build = build;
   15260 			aprint_verbose(".%d", build);
   15261 		}
   15262 	}
   15263 
   15264 	/* Assume the Option ROM area is at avove NVM_SIZE */
   15265 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   15266 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   15267 		/* Option ROM Version */
   15268 		if ((off != 0x0000) && (off != 0xffff)) {
   15269 			int rv;
   15270 
   15271 			off += NVM_COMBO_VER_OFF;
   15272 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   15273 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   15274 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   15275 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   15276 				/* 16bits */
   15277 				major = uid0 >> 8;
   15278 				build = (uid0 << 8) | (uid1 >> 8);
   15279 				patch = uid1 & 0x00ff;
   15280 				aprint_verbose(", option ROM Version %d.%d.%d",
   15281 				    major, build, patch);
   15282 			}
   15283 		}
   15284 	}
   15285 
   15286 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   15287 		aprint_verbose(", Image Unique ID %08x",
   15288 		    ((uint32_t)uid1 << 16) | uid0);
   15289 }
   15290 
   15291 /*
   15292  * wm_nvm_read:
   15293  *
   15294  *	Read data from the serial EEPROM.
   15295  */
   15296 static int
   15297 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   15298 {
   15299 	int rv;
   15300 
   15301 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   15302 		device_xname(sc->sc_dev), __func__));
   15303 
   15304 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   15305 		return -1;
   15306 
   15307 	rv = sc->nvm.read(sc, word, wordcnt, data);
   15308 
   15309 	return rv;
   15310 }
   15311 
   15312 /*
   15313  * Hardware semaphores.
   15314  * Very complexed...
   15315  */
   15316 
   15317 static int
   15318 wm_get_null(struct wm_softc *sc)
   15319 {
   15320 
   15321 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15322 		device_xname(sc->sc_dev), __func__));
   15323 	return 0;
   15324 }
   15325 
   15326 static void
   15327 wm_put_null(struct wm_softc *sc)
   15328 {
   15329 
   15330 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15331 		device_xname(sc->sc_dev), __func__));
   15332 	return;
   15333 }
   15334 
   15335 static int
   15336 wm_get_eecd(struct wm_softc *sc)
   15337 {
   15338 	uint32_t reg;
   15339 	int x;
   15340 
   15341 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15342 		device_xname(sc->sc_dev), __func__));
   15343 
   15344 	reg = CSR_READ(sc, WMREG_EECD);
   15345 
   15346 	/* Request EEPROM access. */
   15347 	reg |= EECD_EE_REQ;
   15348 	CSR_WRITE(sc, WMREG_EECD, reg);
   15349 
   15350 	/* ..and wait for it to be granted. */
   15351 	for (x = 0; x < 1000; x++) {
   15352 		reg = CSR_READ(sc, WMREG_EECD);
   15353 		if (reg & EECD_EE_GNT)
   15354 			break;
   15355 		delay(5);
   15356 	}
   15357 	if ((reg & EECD_EE_GNT) == 0) {
   15358 		aprint_error_dev(sc->sc_dev,
   15359 		    "could not acquire EEPROM GNT\n");
   15360 		reg &= ~EECD_EE_REQ;
   15361 		CSR_WRITE(sc, WMREG_EECD, reg);
   15362 		return -1;
   15363 	}
   15364 
   15365 	return 0;
   15366 }
   15367 
   15368 static void
   15369 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   15370 {
   15371 
   15372 	*eecd |= EECD_SK;
   15373 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15374 	CSR_WRITE_FLUSH(sc);
   15375 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15376 		delay(1);
   15377 	else
   15378 		delay(50);
   15379 }
   15380 
   15381 static void
   15382 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   15383 {
   15384 
   15385 	*eecd &= ~EECD_SK;
   15386 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15387 	CSR_WRITE_FLUSH(sc);
   15388 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15389 		delay(1);
   15390 	else
   15391 		delay(50);
   15392 }
   15393 
   15394 static void
   15395 wm_put_eecd(struct wm_softc *sc)
   15396 {
   15397 	uint32_t reg;
   15398 
   15399 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15400 		device_xname(sc->sc_dev), __func__));
   15401 
   15402 	/* Stop nvm */
   15403 	reg = CSR_READ(sc, WMREG_EECD);
   15404 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   15405 		/* Pull CS high */
   15406 		reg |= EECD_CS;
   15407 		wm_nvm_eec_clock_lower(sc, &reg);
   15408 	} else {
   15409 		/* CS on Microwire is active-high */
   15410 		reg &= ~(EECD_CS | EECD_DI);
   15411 		CSR_WRITE(sc, WMREG_EECD, reg);
   15412 		wm_nvm_eec_clock_raise(sc, &reg);
   15413 		wm_nvm_eec_clock_lower(sc, &reg);
   15414 	}
   15415 
   15416 	reg = CSR_READ(sc, WMREG_EECD);
   15417 	reg &= ~EECD_EE_REQ;
   15418 	CSR_WRITE(sc, WMREG_EECD, reg);
   15419 
   15420 	return;
   15421 }
   15422 
   15423 /*
   15424  * Get hardware semaphore.
   15425  * Same as e1000_get_hw_semaphore_generic()
   15426  */
   15427 static int
   15428 wm_get_swsm_semaphore(struct wm_softc *sc)
   15429 {
   15430 	int32_t timeout;
   15431 	uint32_t swsm;
   15432 
   15433 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15434 		device_xname(sc->sc_dev), __func__));
   15435 	KASSERT(sc->sc_nvm_wordsize > 0);
   15436 
   15437 retry:
   15438 	/* Get the SW semaphore. */
   15439 	timeout = sc->sc_nvm_wordsize + 1;
   15440 	while (timeout) {
   15441 		swsm = CSR_READ(sc, WMREG_SWSM);
   15442 
   15443 		if ((swsm & SWSM_SMBI) == 0)
   15444 			break;
   15445 
   15446 		delay(50);
   15447 		timeout--;
   15448 	}
   15449 
   15450 	if (timeout == 0) {
   15451 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   15452 			/*
   15453 			 * In rare circumstances, the SW semaphore may already
   15454 			 * be held unintentionally. Clear the semaphore once
   15455 			 * before giving up.
   15456 			 */
   15457 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   15458 			wm_put_swsm_semaphore(sc);
   15459 			goto retry;
   15460 		}
   15461 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   15462 		return -1;
   15463 	}
   15464 
   15465 	/* Get the FW semaphore. */
   15466 	timeout = sc->sc_nvm_wordsize + 1;
   15467 	while (timeout) {
   15468 		swsm = CSR_READ(sc, WMREG_SWSM);
   15469 		swsm |= SWSM_SWESMBI;
   15470 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   15471 		/* If we managed to set the bit we got the semaphore. */
   15472 		swsm = CSR_READ(sc, WMREG_SWSM);
   15473 		if (swsm & SWSM_SWESMBI)
   15474 			break;
   15475 
   15476 		delay(50);
   15477 		timeout--;
   15478 	}
   15479 
   15480 	if (timeout == 0) {
   15481 		aprint_error_dev(sc->sc_dev,
   15482 		    "could not acquire SWSM SWESMBI\n");
   15483 		/* Release semaphores */
   15484 		wm_put_swsm_semaphore(sc);
   15485 		return -1;
   15486 	}
   15487 	return 0;
   15488 }
   15489 
   15490 /*
   15491  * Put hardware semaphore.
   15492  * Same as e1000_put_hw_semaphore_generic()
   15493  */
   15494 static void
   15495 wm_put_swsm_semaphore(struct wm_softc *sc)
   15496 {
   15497 	uint32_t swsm;
   15498 
   15499 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15500 		device_xname(sc->sc_dev), __func__));
   15501 
   15502 	swsm = CSR_READ(sc, WMREG_SWSM);
   15503 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15504 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15505 }
   15506 
   15507 /*
   15508  * Get SW/FW semaphore.
   15509  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15510  */
   15511 static int
   15512 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15513 {
   15514 	uint32_t swfw_sync;
   15515 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15516 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15517 	int timeout;
   15518 
   15519 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15520 		device_xname(sc->sc_dev), __func__));
   15521 
   15522 	if (sc->sc_type == WM_T_80003)
   15523 		timeout = 50;
   15524 	else
   15525 		timeout = 200;
   15526 
   15527 	while (timeout) {
   15528 		if (wm_get_swsm_semaphore(sc)) {
   15529 			aprint_error_dev(sc->sc_dev,
   15530 			    "%s: failed to get semaphore\n",
   15531 			    __func__);
   15532 			return -1;
   15533 		}
   15534 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15535 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15536 			swfw_sync |= swmask;
   15537 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15538 			wm_put_swsm_semaphore(sc);
   15539 			return 0;
   15540 		}
   15541 		wm_put_swsm_semaphore(sc);
   15542 		delay(5000);
   15543 		timeout--;
   15544 	}
   15545 	device_printf(sc->sc_dev,
   15546 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15547 	    mask, swfw_sync);
   15548 	return -1;
   15549 }
   15550 
   15551 static void
   15552 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15553 {
   15554 	uint32_t swfw_sync;
   15555 
   15556 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15557 		device_xname(sc->sc_dev), __func__));
   15558 
   15559 	while (wm_get_swsm_semaphore(sc) != 0)
   15560 		continue;
   15561 
   15562 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15563 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15564 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15565 
   15566 	wm_put_swsm_semaphore(sc);
   15567 }
   15568 
   15569 static int
   15570 wm_get_nvm_80003(struct wm_softc *sc)
   15571 {
   15572 	int rv;
   15573 
   15574 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15575 		device_xname(sc->sc_dev), __func__));
   15576 
   15577 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15578 		aprint_error_dev(sc->sc_dev,
   15579 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15580 		return rv;
   15581 	}
   15582 
   15583 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15584 	    && (rv = wm_get_eecd(sc)) != 0) {
   15585 		aprint_error_dev(sc->sc_dev,
   15586 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15587 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15588 		return rv;
   15589 	}
   15590 
   15591 	return 0;
   15592 }
   15593 
   15594 static void
   15595 wm_put_nvm_80003(struct wm_softc *sc)
   15596 {
   15597 
   15598 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15599 		device_xname(sc->sc_dev), __func__));
   15600 
   15601 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15602 		wm_put_eecd(sc);
   15603 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15604 }
   15605 
   15606 static int
   15607 wm_get_nvm_82571(struct wm_softc *sc)
   15608 {
   15609 	int rv;
   15610 
   15611 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15612 		device_xname(sc->sc_dev), __func__));
   15613 
   15614 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15615 		return rv;
   15616 
   15617 	switch (sc->sc_type) {
   15618 	case WM_T_82573:
   15619 		break;
   15620 	default:
   15621 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15622 			rv = wm_get_eecd(sc);
   15623 		break;
   15624 	}
   15625 
   15626 	if (rv != 0) {
   15627 		aprint_error_dev(sc->sc_dev,
   15628 		    "%s: failed to get semaphore\n",
   15629 		    __func__);
   15630 		wm_put_swsm_semaphore(sc);
   15631 	}
   15632 
   15633 	return rv;
   15634 }
   15635 
   15636 static void
   15637 wm_put_nvm_82571(struct wm_softc *sc)
   15638 {
   15639 
   15640 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15641 		device_xname(sc->sc_dev), __func__));
   15642 
   15643 	switch (sc->sc_type) {
   15644 	case WM_T_82573:
   15645 		break;
   15646 	default:
   15647 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15648 			wm_put_eecd(sc);
   15649 		break;
   15650 	}
   15651 
   15652 	wm_put_swsm_semaphore(sc);
   15653 }
   15654 
   15655 static int
   15656 wm_get_phy_82575(struct wm_softc *sc)
   15657 {
   15658 
   15659 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15660 		device_xname(sc->sc_dev), __func__));
   15661 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15662 }
   15663 
   15664 static void
   15665 wm_put_phy_82575(struct wm_softc *sc)
   15666 {
   15667 
   15668 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15669 		device_xname(sc->sc_dev), __func__));
   15670 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15671 }
   15672 
   15673 static int
   15674 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15675 {
   15676 	uint32_t ext_ctrl;
   15677 	int timeout = 200;
   15678 
   15679 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15680 		device_xname(sc->sc_dev), __func__));
   15681 
   15682 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15683 	for (timeout = 0; timeout < 200; timeout++) {
   15684 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15685 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15686 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15687 
   15688 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15689 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15690 			return 0;
   15691 		delay(5000);
   15692 	}
   15693 	device_printf(sc->sc_dev,
   15694 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15695 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15696 	return -1;
   15697 }
   15698 
   15699 static void
   15700 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15701 {
   15702 	uint32_t ext_ctrl;
   15703 
   15704 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15705 		device_xname(sc->sc_dev), __func__));
   15706 
   15707 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15708 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15709 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15710 
   15711 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15712 }
   15713 
   15714 static int
   15715 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15716 {
   15717 	uint32_t ext_ctrl;
   15718 	int timeout;
   15719 
   15720 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15721 		device_xname(sc->sc_dev), __func__));
   15722 	mutex_enter(sc->sc_ich_phymtx);
   15723 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15724 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15725 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15726 			break;
   15727 		delay(1000);
   15728 	}
   15729 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15730 		device_printf(sc->sc_dev,
   15731 		    "SW has already locked the resource\n");
   15732 		goto out;
   15733 	}
   15734 
   15735 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15736 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15737 	for (timeout = 0; timeout < 1000; timeout++) {
   15738 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15739 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15740 			break;
   15741 		delay(1000);
   15742 	}
   15743 	if (timeout >= 1000) {
   15744 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15745 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15746 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15747 		goto out;
   15748 	}
   15749 	return 0;
   15750 
   15751 out:
   15752 	mutex_exit(sc->sc_ich_phymtx);
   15753 	return -1;
   15754 }
   15755 
   15756 static void
   15757 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15758 {
   15759 	uint32_t ext_ctrl;
   15760 
   15761 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15762 		device_xname(sc->sc_dev), __func__));
   15763 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15764 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15765 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15766 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15767 	} else
   15768 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15769 
   15770 	mutex_exit(sc->sc_ich_phymtx);
   15771 }
   15772 
   15773 static int
   15774 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15775 {
   15776 
   15777 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15778 		device_xname(sc->sc_dev), __func__));
   15779 	mutex_enter(sc->sc_ich_nvmmtx);
   15780 
   15781 	return 0;
   15782 }
   15783 
   15784 static void
   15785 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15786 {
   15787 
   15788 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15789 		device_xname(sc->sc_dev), __func__));
   15790 	mutex_exit(sc->sc_ich_nvmmtx);
   15791 }
   15792 
   15793 static int
   15794 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15795 {
   15796 	int i = 0;
   15797 	uint32_t reg;
   15798 
   15799 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15800 		device_xname(sc->sc_dev), __func__));
   15801 
   15802 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15803 	do {
   15804 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15805 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15806 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15807 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15808 			break;
   15809 		delay(2*1000);
   15810 		i++;
   15811 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15812 
   15813 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15814 		wm_put_hw_semaphore_82573(sc);
   15815 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15816 		    device_xname(sc->sc_dev));
   15817 		return -1;
   15818 	}
   15819 
   15820 	return 0;
   15821 }
   15822 
   15823 static void
   15824 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15825 {
   15826 	uint32_t reg;
   15827 
   15828 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15829 		device_xname(sc->sc_dev), __func__));
   15830 
   15831 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15832 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15833 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15834 }
   15835 
   15836 /*
   15837  * Management mode and power management related subroutines.
   15838  * BMC, AMT, suspend/resume and EEE.
   15839  */
   15840 
   15841 #ifdef WM_WOL
   15842 static int
   15843 wm_check_mng_mode(struct wm_softc *sc)
   15844 {
   15845 	int rv;
   15846 
   15847 	switch (sc->sc_type) {
   15848 	case WM_T_ICH8:
   15849 	case WM_T_ICH9:
   15850 	case WM_T_ICH10:
   15851 	case WM_T_PCH:
   15852 	case WM_T_PCH2:
   15853 	case WM_T_PCH_LPT:
   15854 	case WM_T_PCH_SPT:
   15855 	case WM_T_PCH_CNP:
   15856 		rv = wm_check_mng_mode_ich8lan(sc);
   15857 		break;
   15858 	case WM_T_82574:
   15859 	case WM_T_82583:
   15860 		rv = wm_check_mng_mode_82574(sc);
   15861 		break;
   15862 	case WM_T_82571:
   15863 	case WM_T_82572:
   15864 	case WM_T_82573:
   15865 	case WM_T_80003:
   15866 		rv = wm_check_mng_mode_generic(sc);
   15867 		break;
   15868 	default:
   15869 		/* Noting to do */
   15870 		rv = 0;
   15871 		break;
   15872 	}
   15873 
   15874 	return rv;
   15875 }
   15876 
   15877 static int
   15878 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15879 {
   15880 	uint32_t fwsm;
   15881 
   15882 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15883 
   15884 	if (((fwsm & FWSM_FW_VALID) != 0)
   15885 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15886 		return 1;
   15887 
   15888 	return 0;
   15889 }
   15890 
   15891 static int
   15892 wm_check_mng_mode_82574(struct wm_softc *sc)
   15893 {
   15894 	uint16_t data;
   15895 
   15896 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15897 
   15898 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15899 		return 1;
   15900 
   15901 	return 0;
   15902 }
   15903 
   15904 static int
   15905 wm_check_mng_mode_generic(struct wm_softc *sc)
   15906 {
   15907 	uint32_t fwsm;
   15908 
   15909 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15910 
   15911 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15912 		return 1;
   15913 
   15914 	return 0;
   15915 }
   15916 #endif /* WM_WOL */
   15917 
   15918 static int
   15919 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15920 {
   15921 	uint32_t manc, fwsm, factps;
   15922 
   15923 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15924 		return 0;
   15925 
   15926 	manc = CSR_READ(sc, WMREG_MANC);
   15927 
   15928 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15929 		device_xname(sc->sc_dev), manc));
   15930 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15931 		return 0;
   15932 
   15933 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15934 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15935 		factps = CSR_READ(sc, WMREG_FACTPS);
   15936 		if (((factps & FACTPS_MNGCG) == 0)
   15937 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15938 			return 1;
   15939 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15940 		uint16_t data;
   15941 
   15942 		factps = CSR_READ(sc, WMREG_FACTPS);
   15943 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15944 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15945 			device_xname(sc->sc_dev), factps, data));
   15946 		if (((factps & FACTPS_MNGCG) == 0)
   15947 		    && ((data & NVM_CFG2_MNGM_MASK)
   15948 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15949 			return 1;
   15950 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15951 	    && ((manc & MANC_ASF_EN) == 0))
   15952 		return 1;
   15953 
   15954 	return 0;
   15955 }
   15956 
   15957 static bool
   15958 wm_phy_resetisblocked(struct wm_softc *sc)
   15959 {
   15960 	bool blocked = false;
   15961 	uint32_t reg;
   15962 	int i = 0;
   15963 
   15964 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15965 		device_xname(sc->sc_dev), __func__));
   15966 
   15967 	switch (sc->sc_type) {
   15968 	case WM_T_ICH8:
   15969 	case WM_T_ICH9:
   15970 	case WM_T_ICH10:
   15971 	case WM_T_PCH:
   15972 	case WM_T_PCH2:
   15973 	case WM_T_PCH_LPT:
   15974 	case WM_T_PCH_SPT:
   15975 	case WM_T_PCH_CNP:
   15976 		do {
   15977 			reg = CSR_READ(sc, WMREG_FWSM);
   15978 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15979 				blocked = true;
   15980 				delay(10*1000);
   15981 				continue;
   15982 			}
   15983 			blocked = false;
   15984 		} while (blocked && (i++ < 30));
   15985 		return blocked;
   15986 		break;
   15987 	case WM_T_82571:
   15988 	case WM_T_82572:
   15989 	case WM_T_82573:
   15990 	case WM_T_82574:
   15991 	case WM_T_82583:
   15992 	case WM_T_80003:
   15993 		reg = CSR_READ(sc, WMREG_MANC);
   15994 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15995 			return true;
   15996 		else
   15997 			return false;
   15998 		break;
   15999 	default:
   16000 		/* No problem */
   16001 		break;
   16002 	}
   16003 
   16004 	return false;
   16005 }
   16006 
   16007 static void
   16008 wm_get_hw_control(struct wm_softc *sc)
   16009 {
   16010 	uint32_t reg;
   16011 
   16012 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   16013 		device_xname(sc->sc_dev), __func__));
   16014 
   16015 	if (sc->sc_type == WM_T_82573) {
   16016 		reg = CSR_READ(sc, WMREG_SWSM);
   16017 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   16018 	} else if (sc->sc_type >= WM_T_82571) {
   16019 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16020 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   16021 	}
   16022 }
   16023 
   16024 static void
   16025 wm_release_hw_control(struct wm_softc *sc)
   16026 {
   16027 	uint32_t reg;
   16028 
   16029 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   16030 		device_xname(sc->sc_dev), __func__));
   16031 
   16032 	if (sc->sc_type == WM_T_82573) {
   16033 		reg = CSR_READ(sc, WMREG_SWSM);
   16034 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   16035 	} else if (sc->sc_type >= WM_T_82571) {
   16036 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16037 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   16038 	}
   16039 }
   16040 
   16041 static void
   16042 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   16043 {
   16044 	uint32_t reg;
   16045 
   16046 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16047 		device_xname(sc->sc_dev), __func__));
   16048 
   16049 	if (sc->sc_type < WM_T_PCH2)
   16050 		return;
   16051 
   16052 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   16053 
   16054 	if (gate)
   16055 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   16056 	else
   16057 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   16058 
   16059 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   16060 }
   16061 
   16062 static int
   16063 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   16064 {
   16065 	uint32_t fwsm, reg;
   16066 	int rv;
   16067 
   16068 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16069 		device_xname(sc->sc_dev), __func__));
   16070 
   16071 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   16072 	wm_gate_hw_phy_config_ich8lan(sc, true);
   16073 
   16074 	/* Disable ULP */
   16075 	wm_ulp_disable(sc);
   16076 
   16077 	/* Acquire PHY semaphore */
   16078 	rv = sc->phy.acquire(sc);
   16079 	if (rv != 0) {
   16080 		DPRINTF(sc, WM_DEBUG_INIT,
   16081 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16082 		return rv;
   16083 	}
   16084 
   16085 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   16086 	 * inaccessible and resetting the PHY is not blocked, toggle the
   16087 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   16088 	 */
   16089 	fwsm = CSR_READ(sc, WMREG_FWSM);
   16090 	switch (sc->sc_type) {
   16091 	case WM_T_PCH_LPT:
   16092 	case WM_T_PCH_SPT:
   16093 	case WM_T_PCH_CNP:
   16094 		if (wm_phy_is_accessible_pchlan(sc))
   16095 			break;
   16096 
   16097 		/* Before toggling LANPHYPC, see if PHY is accessible by
   16098 		 * forcing MAC to SMBus mode first.
   16099 		 */
   16100 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16101 		reg |= CTRL_EXT_FORCE_SMBUS;
   16102 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16103 #if 0
   16104 		/* XXX Isn't this required??? */
   16105 		CSR_WRITE_FLUSH(sc);
   16106 #endif
   16107 		/* Wait 50 milliseconds for MAC to finish any retries
   16108 		 * that it might be trying to perform from previous
   16109 		 * attempts to acknowledge any phy read requests.
   16110 		 */
   16111 		delay(50 * 1000);
   16112 		/* FALLTHROUGH */
   16113 	case WM_T_PCH2:
   16114 		if (wm_phy_is_accessible_pchlan(sc) == true)
   16115 			break;
   16116 		/* FALLTHROUGH */
   16117 	case WM_T_PCH:
   16118 		if (sc->sc_type == WM_T_PCH)
   16119 			if ((fwsm & FWSM_FW_VALID) != 0)
   16120 				break;
   16121 
   16122 		if (wm_phy_resetisblocked(sc) == true) {
   16123 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   16124 			break;
   16125 		}
   16126 
   16127 		/* Toggle LANPHYPC Value bit */
   16128 		wm_toggle_lanphypc_pch_lpt(sc);
   16129 
   16130 		if (sc->sc_type >= WM_T_PCH_LPT) {
   16131 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16132 				break;
   16133 
   16134 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   16135 			 * so ensure that the MAC is also out of SMBus mode
   16136 			 */
   16137 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16138 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16139 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16140 
   16141 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16142 				break;
   16143 			rv = -1;
   16144 		}
   16145 		break;
   16146 	default:
   16147 		break;
   16148 	}
   16149 
   16150 	/* Release semaphore */
   16151 	sc->phy.release(sc);
   16152 
   16153 	if (rv == 0) {
   16154 		/* Check to see if able to reset PHY.  Print error if not */
   16155 		if (wm_phy_resetisblocked(sc)) {
   16156 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   16157 			goto out;
   16158 		}
   16159 
   16160 		/* Reset the PHY before any access to it.  Doing so, ensures
   16161 		 * that the PHY is in a known good state before we read/write
   16162 		 * PHY registers.  The generic reset is sufficient here,
   16163 		 * because we haven't determined the PHY type yet.
   16164 		 */
   16165 		if (wm_reset_phy(sc) != 0)
   16166 			goto out;
   16167 
   16168 		/* On a successful reset, possibly need to wait for the PHY
   16169 		 * to quiesce to an accessible state before returning control
   16170 		 * to the calling function.  If the PHY does not quiesce, then
   16171 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   16172 		 *  the PHY is in.
   16173 		 */
   16174 		if (wm_phy_resetisblocked(sc))
   16175 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   16176 	}
   16177 
   16178 out:
   16179 	/* Ungate automatic PHY configuration on non-managed 82579 */
   16180 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   16181 		delay(10*1000);
   16182 		wm_gate_hw_phy_config_ich8lan(sc, false);
   16183 	}
   16184 
   16185 	return 0;
   16186 }
   16187 
   16188 static void
   16189 wm_init_manageability(struct wm_softc *sc)
   16190 {
   16191 
   16192 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16193 		device_xname(sc->sc_dev), __func__));
   16194 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   16195 
   16196 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16197 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   16198 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16199 
   16200 		/* Disable hardware interception of ARP */
   16201 		manc &= ~MANC_ARP_EN;
   16202 
   16203 		/* Enable receiving management packets to the host */
   16204 		if (sc->sc_type >= WM_T_82571) {
   16205 			manc |= MANC_EN_MNG2HOST;
   16206 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   16207 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   16208 		}
   16209 
   16210 		CSR_WRITE(sc, WMREG_MANC, manc);
   16211 	}
   16212 }
   16213 
   16214 static void
   16215 wm_release_manageability(struct wm_softc *sc)
   16216 {
   16217 
   16218 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16219 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16220 
   16221 		manc |= MANC_ARP_EN;
   16222 		if (sc->sc_type >= WM_T_82571)
   16223 			manc &= ~MANC_EN_MNG2HOST;
   16224 
   16225 		CSR_WRITE(sc, WMREG_MANC, manc);
   16226 	}
   16227 }
   16228 
   16229 static void
   16230 wm_get_wakeup(struct wm_softc *sc)
   16231 {
   16232 
   16233 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   16234 	switch (sc->sc_type) {
   16235 	case WM_T_82573:
   16236 	case WM_T_82583:
   16237 		sc->sc_flags |= WM_F_HAS_AMT;
   16238 		/* FALLTHROUGH */
   16239 	case WM_T_80003:
   16240 	case WM_T_82575:
   16241 	case WM_T_82576:
   16242 	case WM_T_82580:
   16243 	case WM_T_I350:
   16244 	case WM_T_I354:
   16245 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   16246 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   16247 		/* FALLTHROUGH */
   16248 	case WM_T_82541:
   16249 	case WM_T_82541_2:
   16250 	case WM_T_82547:
   16251 	case WM_T_82547_2:
   16252 	case WM_T_82571:
   16253 	case WM_T_82572:
   16254 	case WM_T_82574:
   16255 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16256 		break;
   16257 	case WM_T_ICH8:
   16258 	case WM_T_ICH9:
   16259 	case WM_T_ICH10:
   16260 	case WM_T_PCH:
   16261 	case WM_T_PCH2:
   16262 	case WM_T_PCH_LPT:
   16263 	case WM_T_PCH_SPT:
   16264 	case WM_T_PCH_CNP:
   16265 		sc->sc_flags |= WM_F_HAS_AMT;
   16266 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16267 		break;
   16268 	default:
   16269 		break;
   16270 	}
   16271 
   16272 	/* 1: HAS_MANAGE */
   16273 	if (wm_enable_mng_pass_thru(sc) != 0)
   16274 		sc->sc_flags |= WM_F_HAS_MANAGE;
   16275 
   16276 	/*
   16277 	 * Note that the WOL flags is set after the resetting of the eeprom
   16278 	 * stuff
   16279 	 */
   16280 }
   16281 
   16282 /*
   16283  * Unconfigure Ultra Low Power mode.
   16284  * Only for I217 and newer (see below).
   16285  */
   16286 static int
   16287 wm_ulp_disable(struct wm_softc *sc)
   16288 {
   16289 	uint32_t reg;
   16290 	uint16_t phyreg;
   16291 	int i = 0, rv;
   16292 
   16293 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16294 		device_xname(sc->sc_dev), __func__));
   16295 	/* Exclude old devices */
   16296 	if ((sc->sc_type < WM_T_PCH_LPT)
   16297 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   16298 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   16299 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   16300 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   16301 		return 0;
   16302 
   16303 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   16304 		/* Request ME un-configure ULP mode in the PHY */
   16305 		reg = CSR_READ(sc, WMREG_H2ME);
   16306 		reg &= ~H2ME_ULP;
   16307 		reg |= H2ME_ENFORCE_SETTINGS;
   16308 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16309 
   16310 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   16311 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   16312 			if (i++ == 30) {
   16313 				device_printf(sc->sc_dev, "%s timed out\n",
   16314 				    __func__);
   16315 				return -1;
   16316 			}
   16317 			delay(10 * 1000);
   16318 		}
   16319 		reg = CSR_READ(sc, WMREG_H2ME);
   16320 		reg &= ~H2ME_ENFORCE_SETTINGS;
   16321 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16322 
   16323 		return 0;
   16324 	}
   16325 
   16326 	/* Acquire semaphore */
   16327 	rv = sc->phy.acquire(sc);
   16328 	if (rv != 0) {
   16329 		DPRINTF(sc, WM_DEBUG_INIT,
   16330 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16331 		return rv;
   16332 	}
   16333 
   16334 	/* Toggle LANPHYPC */
   16335 	wm_toggle_lanphypc_pch_lpt(sc);
   16336 
   16337 	/* Unforce SMBus mode in PHY */
   16338 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   16339 	if (rv != 0) {
   16340 		uint32_t reg2;
   16341 
   16342 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   16343 		    __func__);
   16344 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   16345 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   16346 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   16347 		delay(50 * 1000);
   16348 
   16349 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   16350 		    &phyreg);
   16351 		if (rv != 0)
   16352 			goto release;
   16353 	}
   16354 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16355 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   16356 
   16357 	/* Unforce SMBus mode in MAC */
   16358 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16359 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   16360 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16361 
   16362 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   16363 	if (rv != 0)
   16364 		goto release;
   16365 	phyreg |= HV_PM_CTRL_K1_ENA;
   16366 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   16367 
   16368 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   16369 	    &phyreg);
   16370 	if (rv != 0)
   16371 		goto release;
   16372 	phyreg &= ~(I218_ULP_CONFIG1_IND
   16373 	    | I218_ULP_CONFIG1_STICKY_ULP
   16374 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   16375 	    | I218_ULP_CONFIG1_WOL_HOST
   16376 	    | I218_ULP_CONFIG1_INBAND_EXIT
   16377 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   16378 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   16379 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   16380 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16381 	phyreg |= I218_ULP_CONFIG1_START;
   16382 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16383 
   16384 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16385 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   16386 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16387 
   16388 release:
   16389 	/* Release semaphore */
   16390 	sc->phy.release(sc);
   16391 	wm_gmii_reset(sc);
   16392 	delay(50 * 1000);
   16393 
   16394 	return rv;
   16395 }
   16396 
   16397 /* WOL in the newer chipset interfaces (pchlan) */
   16398 static int
   16399 wm_enable_phy_wakeup(struct wm_softc *sc)
   16400 {
   16401 	device_t dev = sc->sc_dev;
   16402 	uint32_t mreg, moff;
   16403 	uint16_t wuce, wuc, wufc, preg;
   16404 	int i, rv;
   16405 
   16406 	KASSERT(sc->sc_type >= WM_T_PCH);
   16407 
   16408 	/* Copy MAC RARs to PHY RARs */
   16409 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   16410 
   16411 	/* Activate PHY wakeup */
   16412 	rv = sc->phy.acquire(sc);
   16413 	if (rv != 0) {
   16414 		device_printf(dev, "%s: failed to acquire semaphore\n",
   16415 		    __func__);
   16416 		return rv;
   16417 	}
   16418 
   16419 	/*
   16420 	 * Enable access to PHY wakeup registers.
   16421 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   16422 	 */
   16423 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   16424 	if (rv != 0) {
   16425 		device_printf(dev,
   16426 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   16427 		goto release;
   16428 	}
   16429 
   16430 	/* Copy MAC MTA to PHY MTA */
   16431 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   16432 		uint16_t lo, hi;
   16433 
   16434 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   16435 		lo = (uint16_t)(mreg & 0xffff);
   16436 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   16437 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   16438 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   16439 	}
   16440 
   16441 	/* Configure PHY Rx Control register */
   16442 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   16443 	mreg = CSR_READ(sc, WMREG_RCTL);
   16444 	if (mreg & RCTL_UPE)
   16445 		preg |= BM_RCTL_UPE;
   16446 	if (mreg & RCTL_MPE)
   16447 		preg |= BM_RCTL_MPE;
   16448 	preg &= ~(BM_RCTL_MO_MASK);
   16449 	moff = __SHIFTOUT(mreg, RCTL_MO);
   16450 	if (moff != 0)
   16451 		preg |= moff << BM_RCTL_MO_SHIFT;
   16452 	if (mreg & RCTL_BAM)
   16453 		preg |= BM_RCTL_BAM;
   16454 	if (mreg & RCTL_PMCF)
   16455 		preg |= BM_RCTL_PMCF;
   16456 	mreg = CSR_READ(sc, WMREG_CTRL);
   16457 	if (mreg & CTRL_RFCE)
   16458 		preg |= BM_RCTL_RFCE;
   16459 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   16460 
   16461 	wuc = WUC_APME | WUC_PME_EN;
   16462 	wufc = WUFC_MAG;
   16463 	/* Enable PHY wakeup in MAC register */
   16464 	CSR_WRITE(sc, WMREG_WUC,
   16465 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   16466 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   16467 
   16468 	/* Configure and enable PHY wakeup in PHY registers */
   16469 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   16470 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   16471 
   16472 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   16473 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16474 
   16475 release:
   16476 	sc->phy.release(sc);
   16477 
   16478 	return 0;
   16479 }
   16480 
   16481 /* Power down workaround on D3 */
   16482 static void
   16483 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   16484 {
   16485 	uint32_t reg;
   16486 	uint16_t phyreg;
   16487 	int i;
   16488 
   16489 	for (i = 0; i < 2; i++) {
   16490 		/* Disable link */
   16491 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16492 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16493 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16494 
   16495 		/*
   16496 		 * Call gig speed drop workaround on Gig disable before
   16497 		 * accessing any PHY registers
   16498 		 */
   16499 		if (sc->sc_type == WM_T_ICH8)
   16500 			wm_gig_downshift_workaround_ich8lan(sc);
   16501 
   16502 		/* Write VR power-down enable */
   16503 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16504 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16505 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16506 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16507 
   16508 		/* Read it back and test */
   16509 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16510 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16511 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16512 			break;
   16513 
   16514 		/* Issue PHY reset and repeat at most one more time */
   16515 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16516 	}
   16517 }
   16518 
   16519 /*
   16520  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16521  *  @sc: pointer to the HW structure
   16522  *
   16523  *  During S0 to Sx transition, it is possible the link remains at gig
   16524  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16525  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16526  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16527  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16528  *  needs to be written.
   16529  *  Parts that support (and are linked to a partner which support) EEE in
   16530  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16531  *  than 10Mbps w/o EEE.
   16532  */
   16533 static void
   16534 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16535 {
   16536 	device_t dev = sc->sc_dev;
   16537 	struct ethercom *ec = &sc->sc_ethercom;
   16538 	uint32_t phy_ctrl;
   16539 	int rv;
   16540 
   16541 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16542 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16543 
   16544 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16545 
   16546 	if (sc->sc_phytype == WMPHY_I217) {
   16547 		uint16_t devid = sc->sc_pcidevid;
   16548 
   16549 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16550 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16551 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16552 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16553 		    (sc->sc_type >= WM_T_PCH_SPT))
   16554 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16555 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16556 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16557 
   16558 		if (sc->phy.acquire(sc) != 0)
   16559 			goto out;
   16560 
   16561 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16562 			uint16_t eee_advert;
   16563 
   16564 			rv = wm_read_emi_reg_locked(dev,
   16565 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16566 			if (rv)
   16567 				goto release;
   16568 
   16569 			/*
   16570 			 * Disable LPLU if both link partners support 100BaseT
   16571 			 * EEE and 100Full is advertised on both ends of the
   16572 			 * link, and enable Auto Enable LPI since there will
   16573 			 * be no driver to enable LPI while in Sx.
   16574 			 */
   16575 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16576 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16577 				uint16_t anar, phy_reg;
   16578 
   16579 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16580 				    &anar);
   16581 				if (anar & ANAR_TX_FD) {
   16582 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16583 					    PHY_CTRL_NOND0A_LPLU);
   16584 
   16585 					/* Set Auto Enable LPI after link up */
   16586 					sc->phy.readreg_locked(dev, 2,
   16587 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16588 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16589 					sc->phy.writereg_locked(dev, 2,
   16590 					    I217_LPI_GPIO_CTRL, phy_reg);
   16591 				}
   16592 			}
   16593 		}
   16594 
   16595 		/*
   16596 		 * For i217 Intel Rapid Start Technology support,
   16597 		 * when the system is going into Sx and no manageability engine
   16598 		 * is present, the driver must configure proxy to reset only on
   16599 		 * power good.	LPI (Low Power Idle) state must also reset only
   16600 		 * on power good, as well as the MTA (Multicast table array).
   16601 		 * The SMBus release must also be disabled on LCD reset.
   16602 		 */
   16603 
   16604 		/*
   16605 		 * Enable MTA to reset for Intel Rapid Start Technology
   16606 		 * Support
   16607 		 */
   16608 
   16609 release:
   16610 		sc->phy.release(sc);
   16611 	}
   16612 out:
   16613 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16614 
   16615 	if (sc->sc_type == WM_T_ICH8)
   16616 		wm_gig_downshift_workaround_ich8lan(sc);
   16617 
   16618 	if (sc->sc_type >= WM_T_PCH) {
   16619 		wm_oem_bits_config_ich8lan(sc, false);
   16620 
   16621 		/* Reset PHY to activate OEM bits on 82577/8 */
   16622 		if (sc->sc_type == WM_T_PCH)
   16623 			wm_reset_phy(sc);
   16624 
   16625 		if (sc->phy.acquire(sc) != 0)
   16626 			return;
   16627 		wm_write_smbus_addr(sc);
   16628 		sc->phy.release(sc);
   16629 	}
   16630 }
   16631 
   16632 /*
   16633  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16634  *  @sc: pointer to the HW structure
   16635  *
   16636  *  During Sx to S0 transitions on non-managed devices or managed devices
   16637  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16638  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16639  *  the PHY.
   16640  *  On i217, setup Intel Rapid Start Technology.
   16641  */
   16642 static int
   16643 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16644 {
   16645 	device_t dev = sc->sc_dev;
   16646 	int rv;
   16647 
   16648 	if (sc->sc_type < WM_T_PCH2)
   16649 		return 0;
   16650 
   16651 	rv = wm_init_phy_workarounds_pchlan(sc);
   16652 	if (rv != 0)
   16653 		return rv;
   16654 
   16655 	/* For i217 Intel Rapid Start Technology support when the system
   16656 	 * is transitioning from Sx and no manageability engine is present
   16657 	 * configure SMBus to restore on reset, disable proxy, and enable
   16658 	 * the reset on MTA (Multicast table array).
   16659 	 */
   16660 	if (sc->sc_phytype == WMPHY_I217) {
   16661 		uint16_t phy_reg;
   16662 
   16663 		rv = sc->phy.acquire(sc);
   16664 		if (rv != 0)
   16665 			return rv;
   16666 
   16667 		/* Clear Auto Enable LPI after link up */
   16668 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16669 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16670 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16671 
   16672 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16673 			/* Restore clear on SMB if no manageability engine
   16674 			 * is present
   16675 			 */
   16676 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16677 			    &phy_reg);
   16678 			if (rv != 0)
   16679 				goto release;
   16680 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16681 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16682 
   16683 			/* Disable Proxy */
   16684 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16685 		}
   16686 		/* Enable reset on MTA */
   16687 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16688 		if (rv != 0)
   16689 			goto release;
   16690 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16691 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16692 
   16693 release:
   16694 		sc->phy.release(sc);
   16695 		return rv;
   16696 	}
   16697 
   16698 	return 0;
   16699 }
   16700 
   16701 static void
   16702 wm_enable_wakeup(struct wm_softc *sc)
   16703 {
   16704 	uint32_t reg, pmreg;
   16705 	pcireg_t pmode;
   16706 	int rv = 0;
   16707 
   16708 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16709 		device_xname(sc->sc_dev), __func__));
   16710 
   16711 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16712 	    &pmreg, NULL) == 0)
   16713 		return;
   16714 
   16715 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16716 		goto pme;
   16717 
   16718 	/* Advertise the wakeup capability */
   16719 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16720 	    | CTRL_SWDPIN(3));
   16721 
   16722 	/* Keep the laser running on fiber adapters */
   16723 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16724 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16725 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16726 		reg |= CTRL_EXT_SWDPIN(3);
   16727 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16728 	}
   16729 
   16730 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16731 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16732 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16733 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16734 		wm_suspend_workarounds_ich8lan(sc);
   16735 
   16736 #if 0	/* For the multicast packet */
   16737 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16738 	reg |= WUFC_MC;
   16739 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16740 #endif
   16741 
   16742 	if (sc->sc_type >= WM_T_PCH) {
   16743 		rv = wm_enable_phy_wakeup(sc);
   16744 		if (rv != 0)
   16745 			goto pme;
   16746 	} else {
   16747 		/* Enable wakeup by the MAC */
   16748 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16749 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16750 	}
   16751 
   16752 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16753 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16754 		|| (sc->sc_type == WM_T_PCH2))
   16755 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16756 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16757 
   16758 pme:
   16759 	/* Request PME */
   16760 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16761 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16762 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16763 		/* For WOL */
   16764 		pmode |= PCI_PMCSR_PME_EN;
   16765 	} else {
   16766 		/* Disable WOL */
   16767 		pmode &= ~PCI_PMCSR_PME_EN;
   16768 	}
   16769 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16770 }
   16771 
   16772 /* Disable ASPM L0s and/or L1 for workaround */
   16773 static void
   16774 wm_disable_aspm(struct wm_softc *sc)
   16775 {
   16776 	pcireg_t reg, mask = 0;
   16777 	unsigned const char *str = "";
   16778 
   16779 	/*
   16780 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16781 	 * space.
   16782 	 */
   16783 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16784 		return;
   16785 
   16786 	switch (sc->sc_type) {
   16787 	case WM_T_82571:
   16788 	case WM_T_82572:
   16789 		/*
   16790 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16791 		 * State Power management L1 State (ASPM L1).
   16792 		 */
   16793 		mask = PCIE_LCSR_ASPM_L1;
   16794 		str = "L1 is";
   16795 		break;
   16796 	case WM_T_82573:
   16797 	case WM_T_82574:
   16798 	case WM_T_82583:
   16799 		/*
   16800 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16801 		 *
   16802 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16803 		 * some chipset.  The document of 82574 and 82583 says that
   16804 		 * disabling L0s with some specific chipset is sufficient,
   16805 		 * but we follow as of the Intel em driver does.
   16806 		 *
   16807 		 * References:
   16808 		 * Errata 8 of the Specification Update of i82573.
   16809 		 * Errata 20 of the Specification Update of i82574.
   16810 		 * Errata 9 of the Specification Update of i82583.
   16811 		 */
   16812 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16813 		str = "L0s and L1 are";
   16814 		break;
   16815 	default:
   16816 		return;
   16817 	}
   16818 
   16819 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16820 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16821 	reg &= ~mask;
   16822 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16823 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16824 
   16825 	/* Print only in wm_attach() */
   16826 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16827 		aprint_verbose_dev(sc->sc_dev,
   16828 		    "ASPM %s disabled to workaround the errata.\n", str);
   16829 }
   16830 
   16831 /* LPLU */
   16832 
   16833 static void
   16834 wm_lplu_d0_disable(struct wm_softc *sc)
   16835 {
   16836 	struct mii_data *mii = &sc->sc_mii;
   16837 	uint32_t reg;
   16838 	uint16_t phyval;
   16839 
   16840 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16841 		device_xname(sc->sc_dev), __func__));
   16842 
   16843 	if (sc->sc_phytype == WMPHY_IFE)
   16844 		return;
   16845 
   16846 	switch (sc->sc_type) {
   16847 	case WM_T_82571:
   16848 	case WM_T_82572:
   16849 	case WM_T_82573:
   16850 	case WM_T_82575:
   16851 	case WM_T_82576:
   16852 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16853 		phyval &= ~PMR_D0_LPLU;
   16854 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16855 		break;
   16856 	case WM_T_82580:
   16857 	case WM_T_I350:
   16858 	case WM_T_I210:
   16859 	case WM_T_I211:
   16860 		reg = CSR_READ(sc, WMREG_PHPM);
   16861 		reg &= ~PHPM_D0A_LPLU;
   16862 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16863 		break;
   16864 	case WM_T_82574:
   16865 	case WM_T_82583:
   16866 	case WM_T_ICH8:
   16867 	case WM_T_ICH9:
   16868 	case WM_T_ICH10:
   16869 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16870 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16871 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16872 		CSR_WRITE_FLUSH(sc);
   16873 		break;
   16874 	case WM_T_PCH:
   16875 	case WM_T_PCH2:
   16876 	case WM_T_PCH_LPT:
   16877 	case WM_T_PCH_SPT:
   16878 	case WM_T_PCH_CNP:
   16879 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16880 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16881 		if (wm_phy_resetisblocked(sc) == false)
   16882 			phyval |= HV_OEM_BITS_ANEGNOW;
   16883 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16884 		break;
   16885 	default:
   16886 		break;
   16887 	}
   16888 }
   16889 
   16890 /* EEE */
   16891 
   16892 static int
   16893 wm_set_eee_i350(struct wm_softc *sc)
   16894 {
   16895 	struct ethercom *ec = &sc->sc_ethercom;
   16896 	uint32_t ipcnfg, eeer;
   16897 	uint32_t ipcnfg_mask
   16898 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16899 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16900 
   16901 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16902 
   16903 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16904 	eeer = CSR_READ(sc, WMREG_EEER);
   16905 
   16906 	/* Enable or disable per user setting */
   16907 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16908 		ipcnfg |= ipcnfg_mask;
   16909 		eeer |= eeer_mask;
   16910 	} else {
   16911 		ipcnfg &= ~ipcnfg_mask;
   16912 		eeer &= ~eeer_mask;
   16913 	}
   16914 
   16915 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16916 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16917 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16918 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16919 
   16920 	return 0;
   16921 }
   16922 
   16923 static int
   16924 wm_set_eee_pchlan(struct wm_softc *sc)
   16925 {
   16926 	device_t dev = sc->sc_dev;
   16927 	struct ethercom *ec = &sc->sc_ethercom;
   16928 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16929 	int rv;
   16930 
   16931 	switch (sc->sc_phytype) {
   16932 	case WMPHY_82579:
   16933 		lpa = I82579_EEE_LP_ABILITY;
   16934 		pcs_status = I82579_EEE_PCS_STATUS;
   16935 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16936 		break;
   16937 	case WMPHY_I217:
   16938 		lpa = I217_EEE_LP_ABILITY;
   16939 		pcs_status = I217_EEE_PCS_STATUS;
   16940 		adv_addr = I217_EEE_ADVERTISEMENT;
   16941 		break;
   16942 	default:
   16943 		return 0;
   16944 	}
   16945 
   16946 	rv = sc->phy.acquire(sc);
   16947 	if (rv != 0) {
   16948 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16949 		return rv;
   16950 	}
   16951 
   16952 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16953 	if (rv != 0)
   16954 		goto release;
   16955 
   16956 	/* Clear bits that enable EEE in various speeds */
   16957 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16958 
   16959 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16960 		/* Save off link partner's EEE ability */
   16961 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16962 		if (rv != 0)
   16963 			goto release;
   16964 
   16965 		/* Read EEE advertisement */
   16966 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16967 			goto release;
   16968 
   16969 		/*
   16970 		 * Enable EEE only for speeds in which the link partner is
   16971 		 * EEE capable and for which we advertise EEE.
   16972 		 */
   16973 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16974 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16975 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16976 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16977 			if ((data & ANLPAR_TX_FD) != 0)
   16978 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16979 			else {
   16980 				/*
   16981 				 * EEE is not supported in 100Half, so ignore
   16982 				 * partner's EEE in 100 ability if full-duplex
   16983 				 * is not advertised.
   16984 				 */
   16985 				sc->eee_lp_ability
   16986 				    &= ~AN_EEEADVERT_100_TX;
   16987 			}
   16988 		}
   16989 	}
   16990 
   16991 	if (sc->sc_phytype == WMPHY_82579) {
   16992 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16993 		if (rv != 0)
   16994 			goto release;
   16995 
   16996 		data &= ~I82579_LPI_PLL_SHUT_100;
   16997 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16998 	}
   16999 
   17000 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   17001 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   17002 		goto release;
   17003 
   17004 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   17005 release:
   17006 	sc->phy.release(sc);
   17007 
   17008 	return rv;
   17009 }
   17010 
   17011 static int
   17012 wm_set_eee(struct wm_softc *sc)
   17013 {
   17014 	struct ethercom *ec = &sc->sc_ethercom;
   17015 
   17016 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   17017 		return 0;
   17018 
   17019 	if (sc->sc_type == WM_T_I354) {
   17020 		/* I354 uses an external PHY */
   17021 		return 0; /* not yet */
   17022 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   17023 		return wm_set_eee_i350(sc);
   17024 	else if (sc->sc_type >= WM_T_PCH2)
   17025 		return wm_set_eee_pchlan(sc);
   17026 
   17027 	return 0;
   17028 }
   17029 
   17030 /*
   17031  * Workarounds (mainly PHY related).
   17032  * Basically, PHY's workarounds are in the PHY drivers.
   17033  */
   17034 
   17035 /* Workaround for 82566 Kumeran PCS lock loss */
   17036 static int
   17037 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   17038 {
   17039 	struct mii_data *mii = &sc->sc_mii;
   17040 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17041 	int i, reg, rv;
   17042 	uint16_t phyreg;
   17043 
   17044 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17045 		device_xname(sc->sc_dev), __func__));
   17046 
   17047 	/* If the link is not up, do nothing */
   17048 	if ((status & STATUS_LU) == 0)
   17049 		return 0;
   17050 
   17051 	/* Nothing to do if the link is other than 1Gbps */
   17052 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   17053 		return 0;
   17054 
   17055 	for (i = 0; i < 10; i++) {
   17056 		/* read twice */
   17057 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   17058 		if (rv != 0)
   17059 			return rv;
   17060 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   17061 		if (rv != 0)
   17062 			return rv;
   17063 
   17064 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   17065 			goto out;	/* GOOD! */
   17066 
   17067 		/* Reset the PHY */
   17068 		wm_reset_phy(sc);
   17069 		delay(5*1000);
   17070 	}
   17071 
   17072 	/* Disable GigE link negotiation */
   17073 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   17074 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   17075 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   17076 
   17077 	/*
   17078 	 * Call gig speed drop workaround on Gig disable before accessing
   17079 	 * any PHY registers.
   17080 	 */
   17081 	wm_gig_downshift_workaround_ich8lan(sc);
   17082 
   17083 out:
   17084 	return 0;
   17085 }
   17086 
   17087 /*
   17088  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   17089  *  @sc: pointer to the HW structure
   17090  *
   17091  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   17092  *  LPLU, Gig disable, MDIC PHY reset):
   17093  *    1) Set Kumeran Near-end loopback
   17094  *    2) Clear Kumeran Near-end loopback
   17095  *  Should only be called for ICH8[m] devices with any 1G Phy.
   17096  */
   17097 static void
   17098 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   17099 {
   17100 	uint16_t kmreg;
   17101 
   17102 	/* Only for igp3 */
   17103 	if (sc->sc_phytype == WMPHY_IGP_3) {
   17104 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   17105 			return;
   17106 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   17107 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   17108 			return;
   17109 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   17110 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   17111 	}
   17112 }
   17113 
   17114 /*
   17115  * Workaround for pch's PHYs
   17116  * XXX should be moved to new PHY driver?
   17117  */
   17118 static int
   17119 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17120 {
   17121 	device_t dev = sc->sc_dev;
   17122 	struct mii_data *mii = &sc->sc_mii;
   17123 	struct mii_softc *child;
   17124 	uint16_t phy_data, phyrev = 0;
   17125 	int phytype = sc->sc_phytype;
   17126 	int rv;
   17127 
   17128 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17129 		device_xname(dev), __func__));
   17130 	KASSERT(sc->sc_type == WM_T_PCH);
   17131 
   17132 	/* Set MDIO slow mode before any other MDIO access */
   17133 	if (phytype == WMPHY_82577)
   17134 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   17135 			return rv;
   17136 
   17137 	child = LIST_FIRST(&mii->mii_phys);
   17138 	if (child != NULL)
   17139 		phyrev = child->mii_mpd_rev;
   17140 
   17141 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   17142 	if ((child != NULL) &&
   17143 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   17144 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   17145 		/* Disable generation of early preamble (0x4431) */
   17146 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17147 		    &phy_data);
   17148 		if (rv != 0)
   17149 			return rv;
   17150 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   17151 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   17152 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17153 		    phy_data);
   17154 		if (rv != 0)
   17155 			return rv;
   17156 
   17157 		/* Preamble tuning for SSC */
   17158 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   17159 		if (rv != 0)
   17160 			return rv;
   17161 	}
   17162 
   17163 	/* 82578 */
   17164 	if (phytype == WMPHY_82578) {
   17165 		/*
   17166 		 * Return registers to default by doing a soft reset then
   17167 		 * writing 0x3140 to the control register
   17168 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   17169 		 */
   17170 		if ((child != NULL) && (phyrev < 2)) {
   17171 			PHY_RESET(child);
   17172 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   17173 			if (rv != 0)
   17174 				return rv;
   17175 		}
   17176 	}
   17177 
   17178 	/* Select page 0 */
   17179 	if ((rv = sc->phy.acquire(sc)) != 0)
   17180 		return rv;
   17181 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   17182 	sc->phy.release(sc);
   17183 	if (rv != 0)
   17184 		return rv;
   17185 
   17186 	/*
   17187 	 * Configure the K1 Si workaround during phy reset assuming there is
   17188 	 * link so that it disables K1 if link is in 1Gbps.
   17189 	 */
   17190 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   17191 		return rv;
   17192 
   17193 	/* Workaround for link disconnects on a busy hub in half duplex */
   17194 	rv = sc->phy.acquire(sc);
   17195 	if (rv)
   17196 		return rv;
   17197 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   17198 	if (rv)
   17199 		goto release;
   17200 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   17201 	    phy_data & 0x00ff);
   17202 	if (rv)
   17203 		goto release;
   17204 
   17205 	/* Set MSE higher to enable link to stay up when noise is high */
   17206 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   17207 release:
   17208 	sc->phy.release(sc);
   17209 
   17210 	return rv;
   17211 }
   17212 
   17213 /*
   17214  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   17215  *  @sc:   pointer to the HW structure
   17216  */
   17217 static void
   17218 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   17219 {
   17220 
   17221 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17222 		device_xname(sc->sc_dev), __func__));
   17223 
   17224 	if (sc->phy.acquire(sc) != 0)
   17225 		return;
   17226 
   17227 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17228 
   17229 	sc->phy.release(sc);
   17230 }
   17231 
   17232 static void
   17233 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   17234 {
   17235 	device_t dev = sc->sc_dev;
   17236 	uint32_t mac_reg;
   17237 	uint16_t i, wuce;
   17238 	int count;
   17239 
   17240 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17241 		device_xname(dev), __func__));
   17242 
   17243 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   17244 		return;
   17245 
   17246 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   17247 	count = wm_rar_count(sc);
   17248 	for (i = 0; i < count; i++) {
   17249 		uint16_t lo, hi;
   17250 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17251 		lo = (uint16_t)(mac_reg & 0xffff);
   17252 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   17253 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   17254 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   17255 
   17256 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17257 		lo = (uint16_t)(mac_reg & 0xffff);
   17258 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   17259 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   17260 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   17261 	}
   17262 
   17263 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   17264 }
   17265 
   17266 /*
   17267  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   17268  *  with 82579 PHY
   17269  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   17270  */
   17271 static int
   17272 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   17273 {
   17274 	device_t dev = sc->sc_dev;
   17275 	int rar_count;
   17276 	int rv;
   17277 	uint32_t mac_reg;
   17278 	uint16_t dft_ctrl, data;
   17279 	uint16_t i;
   17280 
   17281 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17282 		device_xname(dev), __func__));
   17283 
   17284 	if (sc->sc_type < WM_T_PCH2)
   17285 		return 0;
   17286 
   17287 	/* Acquire PHY semaphore */
   17288 	rv = sc->phy.acquire(sc);
   17289 	if (rv != 0)
   17290 		return rv;
   17291 
   17292 	/* Disable Rx path while enabling/disabling workaround */
   17293 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   17294 	if (rv != 0)
   17295 		goto out;
   17296 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17297 	    dft_ctrl | (1 << 14));
   17298 	if (rv != 0)
   17299 		goto out;
   17300 
   17301 	if (enable) {
   17302 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   17303 		 * SHRAL/H) and initial CRC values to the MAC
   17304 		 */
   17305 		rar_count = wm_rar_count(sc);
   17306 		for (i = 0; i < rar_count; i++) {
   17307 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   17308 			uint32_t addr_high, addr_low;
   17309 
   17310 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17311 			if (!(addr_high & RAL_AV))
   17312 				continue;
   17313 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17314 			mac_addr[0] = (addr_low & 0xFF);
   17315 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   17316 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   17317 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   17318 			mac_addr[4] = (addr_high & 0xFF);
   17319 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   17320 
   17321 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   17322 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   17323 		}
   17324 
   17325 		/* Write Rx addresses to the PHY */
   17326 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17327 	}
   17328 
   17329 	/*
   17330 	 * If enable ==
   17331 	 *	true: Enable jumbo frame workaround in the MAC.
   17332 	 *	false: Write MAC register values back to h/w defaults.
   17333 	 */
   17334 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   17335 	if (enable) {
   17336 		mac_reg &= ~(1 << 14);
   17337 		mac_reg |= (7 << 15);
   17338 	} else
   17339 		mac_reg &= ~(0xf << 14);
   17340 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   17341 
   17342 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   17343 	if (enable) {
   17344 		mac_reg |= RCTL_SECRC;
   17345 		sc->sc_rctl |= RCTL_SECRC;
   17346 		sc->sc_flags |= WM_F_CRC_STRIP;
   17347 	} else {
   17348 		mac_reg &= ~RCTL_SECRC;
   17349 		sc->sc_rctl &= ~RCTL_SECRC;
   17350 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   17351 	}
   17352 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   17353 
   17354 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   17355 	if (rv != 0)
   17356 		goto out;
   17357 	if (enable)
   17358 		data |= 1 << 0;
   17359 	else
   17360 		data &= ~(1 << 0);
   17361 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   17362 	if (rv != 0)
   17363 		goto out;
   17364 
   17365 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   17366 	if (rv != 0)
   17367 		goto out;
   17368 	/*
   17369 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   17370 	 * on both the enable case and the disable case. Is it correct?
   17371 	 */
   17372 	data &= ~(0xf << 8);
   17373 	data |= (0xb << 8);
   17374 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   17375 	if (rv != 0)
   17376 		goto out;
   17377 
   17378 	/*
   17379 	 * If enable ==
   17380 	 *	true: Enable jumbo frame workaround in the PHY.
   17381 	 *	false: Write PHY register values back to h/w defaults.
   17382 	 */
   17383 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   17384 	if (rv != 0)
   17385 		goto out;
   17386 	data &= ~(0x7F << 5);
   17387 	if (enable)
   17388 		data |= (0x37 << 5);
   17389 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   17390 	if (rv != 0)
   17391 		goto out;
   17392 
   17393 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   17394 	if (rv != 0)
   17395 		goto out;
   17396 	if (enable)
   17397 		data &= ~(1 << 13);
   17398 	else
   17399 		data |= (1 << 13);
   17400 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   17401 	if (rv != 0)
   17402 		goto out;
   17403 
   17404 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   17405 	if (rv != 0)
   17406 		goto out;
   17407 	data &= ~(0x3FF << 2);
   17408 	if (enable)
   17409 		data |= (I82579_TX_PTR_GAP << 2);
   17410 	else
   17411 		data |= (0x8 << 2);
   17412 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   17413 	if (rv != 0)
   17414 		goto out;
   17415 
   17416 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   17417 	    enable ? 0xf100 : 0x7e00);
   17418 	if (rv != 0)
   17419 		goto out;
   17420 
   17421 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   17422 	if (rv != 0)
   17423 		goto out;
   17424 	if (enable)
   17425 		data |= 1 << 10;
   17426 	else
   17427 		data &= ~(1 << 10);
   17428 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   17429 	if (rv != 0)
   17430 		goto out;
   17431 
   17432 	/* Re-enable Rx path after enabling/disabling workaround */
   17433 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17434 	    dft_ctrl & ~(1 << 14));
   17435 
   17436 out:
   17437 	sc->phy.release(sc);
   17438 
   17439 	return rv;
   17440 }
   17441 
   17442 /*
   17443  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   17444  *  done after every PHY reset.
   17445  */
   17446 static int
   17447 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17448 {
   17449 	device_t dev = sc->sc_dev;
   17450 	int rv;
   17451 
   17452 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17453 		device_xname(dev), __func__));
   17454 	KASSERT(sc->sc_type == WM_T_PCH2);
   17455 
   17456 	/* Set MDIO slow mode before any other MDIO access */
   17457 	rv = wm_set_mdio_slow_mode_hv(sc);
   17458 	if (rv != 0)
   17459 		return rv;
   17460 
   17461 	rv = sc->phy.acquire(sc);
   17462 	if (rv != 0)
   17463 		return rv;
   17464 	/* Set MSE higher to enable link to stay up when noise is high */
   17465 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   17466 	if (rv != 0)
   17467 		goto release;
   17468 	/* Drop link after 5 times MSE threshold was reached */
   17469 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   17470 release:
   17471 	sc->phy.release(sc);
   17472 
   17473 	return rv;
   17474 }
   17475 
   17476 /**
   17477  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   17478  *  @link: link up bool flag
   17479  *
   17480  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   17481  *  preventing further DMA write requests.  Workaround the issue by disabling
   17482  *  the de-assertion of the clock request when in 1Gpbs mode.
   17483  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   17484  *  speeds in order to avoid Tx hangs.
   17485  **/
   17486 static int
   17487 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   17488 {
   17489 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   17490 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17491 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   17492 	uint16_t phyreg;
   17493 
   17494 	if (link && (speed == STATUS_SPEED_1000)) {
   17495 		int rv;
   17496 
   17497 		rv = sc->phy.acquire(sc);
   17498 		if (rv != 0)
   17499 			return rv;
   17500 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17501 		    &phyreg);
   17502 		if (rv != 0)
   17503 			goto release;
   17504 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17505 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17506 		if (rv != 0)
   17507 			goto release;
   17508 		delay(20);
   17509 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17510 
   17511 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17512 		    &phyreg);
   17513 release:
   17514 		sc->phy.release(sc);
   17515 		return rv;
   17516 	}
   17517 
   17518 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17519 
   17520 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17521 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17522 	    || !link
   17523 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17524 		goto update_fextnvm6;
   17525 
   17526 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17527 
   17528 	/* Clear link status transmit timeout */
   17529 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17530 	if (speed == STATUS_SPEED_100) {
   17531 		/* Set inband Tx timeout to 5x10us for 100Half */
   17532 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17533 
   17534 		/* Do not extend the K1 entry latency for 100Half */
   17535 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17536 	} else {
   17537 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17538 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17539 
   17540 		/* Extend the K1 entry latency for 10 Mbps */
   17541 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17542 	}
   17543 
   17544 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17545 
   17546 update_fextnvm6:
   17547 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17548 	return 0;
   17549 }
   17550 
   17551 /*
   17552  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17553  *  @sc:   pointer to the HW structure
   17554  *  @link: link up bool flag
   17555  *
   17556  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17557  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17558  *  If link is down, the function will restore the default K1 setting located
   17559  *  in the NVM.
   17560  */
   17561 static int
   17562 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17563 {
   17564 	int k1_enable = sc->sc_nvm_k1_enabled;
   17565 	int rv;
   17566 
   17567 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17568 		device_xname(sc->sc_dev), __func__));
   17569 
   17570 	rv = sc->phy.acquire(sc);
   17571 	if (rv != 0)
   17572 		return rv;
   17573 
   17574 	if (link) {
   17575 		k1_enable = 0;
   17576 
   17577 		/* Link stall fix for link up */
   17578 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17579 		    0x0100);
   17580 	} else {
   17581 		/* Link stall fix for link down */
   17582 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17583 		    0x4100);
   17584 	}
   17585 
   17586 	wm_configure_k1_ich8lan(sc, k1_enable);
   17587 	sc->phy.release(sc);
   17588 
   17589 	return 0;
   17590 }
   17591 
   17592 /*
   17593  *  wm_k1_workaround_lv - K1 Si workaround
   17594  *  @sc:   pointer to the HW structure
   17595  *
   17596  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17597  *  Disable K1 for 1000 and 100 speeds
   17598  */
   17599 static int
   17600 wm_k1_workaround_lv(struct wm_softc *sc)
   17601 {
   17602 	uint32_t reg;
   17603 	uint16_t phyreg;
   17604 	int rv;
   17605 
   17606 	if (sc->sc_type != WM_T_PCH2)
   17607 		return 0;
   17608 
   17609 	/* Set K1 beacon duration based on 10Mbps speed */
   17610 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17611 	if (rv != 0)
   17612 		return rv;
   17613 
   17614 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17615 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17616 		if (phyreg &
   17617 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17618 			/* LV 1G/100 Packet drop issue wa  */
   17619 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17620 			    &phyreg);
   17621 			if (rv != 0)
   17622 				return rv;
   17623 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17624 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17625 			    phyreg);
   17626 			if (rv != 0)
   17627 				return rv;
   17628 		} else {
   17629 			/* For 10Mbps */
   17630 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17631 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17632 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17633 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17634 		}
   17635 	}
   17636 
   17637 	return 0;
   17638 }
   17639 
   17640 /*
   17641  *  wm_link_stall_workaround_hv - Si workaround
   17642  *  @sc: pointer to the HW structure
   17643  *
   17644  *  This function works around a Si bug where the link partner can get
   17645  *  a link up indication before the PHY does. If small packets are sent
   17646  *  by the link partner they can be placed in the packet buffer without
   17647  *  being properly accounted for by the PHY and will stall preventing
   17648  *  further packets from being received.  The workaround is to clear the
   17649  *  packet buffer after the PHY detects link up.
   17650  */
   17651 static int
   17652 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17653 {
   17654 	uint16_t phyreg;
   17655 
   17656 	if (sc->sc_phytype != WMPHY_82578)
   17657 		return 0;
   17658 
   17659 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17660 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17661 	if ((phyreg & BMCR_LOOP) != 0)
   17662 		return 0;
   17663 
   17664 	/* Check if link is up and at 1Gbps */
   17665 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17666 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17667 	    | BM_CS_STATUS_SPEED_MASK;
   17668 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17669 		| BM_CS_STATUS_SPEED_1000))
   17670 		return 0;
   17671 
   17672 	delay(200 * 1000);	/* XXX too big */
   17673 
   17674 	/* Flush the packets in the fifo buffer */
   17675 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17676 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17677 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17678 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17679 
   17680 	return 0;
   17681 }
   17682 
   17683 static int
   17684 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17685 {
   17686 	int rv;
   17687 
   17688 	rv = sc->phy.acquire(sc);
   17689 	if (rv != 0) {
   17690 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17691 		    __func__);
   17692 		return rv;
   17693 	}
   17694 
   17695 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17696 
   17697 	sc->phy.release(sc);
   17698 
   17699 	return rv;
   17700 }
   17701 
   17702 static int
   17703 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17704 {
   17705 	int rv;
   17706 	uint16_t reg;
   17707 
   17708 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17709 	if (rv != 0)
   17710 		return rv;
   17711 
   17712 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17713 	    reg | HV_KMRN_MDIO_SLOW);
   17714 }
   17715 
   17716 /*
   17717  *  wm_configure_k1_ich8lan - Configure K1 power state
   17718  *  @sc: pointer to the HW structure
   17719  *  @enable: K1 state to configure
   17720  *
   17721  *  Configure the K1 power state based on the provided parameter.
   17722  *  Assumes semaphore already acquired.
   17723  */
   17724 static void
   17725 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17726 {
   17727 	uint32_t ctrl, ctrl_ext, tmp;
   17728 	uint16_t kmreg;
   17729 	int rv;
   17730 
   17731 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17732 
   17733 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17734 	if (rv != 0)
   17735 		return;
   17736 
   17737 	if (k1_enable)
   17738 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17739 	else
   17740 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17741 
   17742 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17743 	if (rv != 0)
   17744 		return;
   17745 
   17746 	delay(20);
   17747 
   17748 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17749 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17750 
   17751 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17752 	tmp |= CTRL_FRCSPD;
   17753 
   17754 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17755 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17756 	CSR_WRITE_FLUSH(sc);
   17757 	delay(20);
   17758 
   17759 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17760 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17761 	CSR_WRITE_FLUSH(sc);
   17762 	delay(20);
   17763 
   17764 	return;
   17765 }
   17766 
   17767 /* special case - for 82575 - need to do manual init ... */
   17768 static void
   17769 wm_reset_init_script_82575(struct wm_softc *sc)
   17770 {
   17771 	/*
   17772 	 * Remark: this is untested code - we have no board without EEPROM
   17773 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17774 	 */
   17775 
   17776 	/* SerDes configuration via SERDESCTRL */
   17777 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17778 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17779 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17780 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17781 
   17782 	/* CCM configuration via CCMCTL register */
   17783 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17784 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17785 
   17786 	/* PCIe lanes configuration */
   17787 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17788 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17789 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17790 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17791 
   17792 	/* PCIe PLL Configuration */
   17793 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17794 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17795 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17796 }
   17797 
   17798 static void
   17799 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17800 {
   17801 	uint32_t reg;
   17802 	uint16_t nvmword;
   17803 	int rv;
   17804 
   17805 	if (sc->sc_type != WM_T_82580)
   17806 		return;
   17807 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17808 		return;
   17809 
   17810 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17811 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17812 	if (rv != 0) {
   17813 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17814 		    __func__);
   17815 		return;
   17816 	}
   17817 
   17818 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17819 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17820 		reg |= MDICNFG_DEST;
   17821 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17822 		reg |= MDICNFG_COM_MDIO;
   17823 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17824 }
   17825 
   17826 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17827 
   17828 static bool
   17829 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17830 {
   17831 	uint32_t reg;
   17832 	uint16_t id1, id2;
   17833 	int i, rv;
   17834 
   17835 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17836 		device_xname(sc->sc_dev), __func__));
   17837 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17838 
   17839 	id1 = id2 = 0xffff;
   17840 	for (i = 0; i < 2; i++) {
   17841 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17842 		    &id1);
   17843 		if ((rv != 0) || MII_INVALIDID(id1))
   17844 			continue;
   17845 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17846 		    &id2);
   17847 		if ((rv != 0) || MII_INVALIDID(id2))
   17848 			continue;
   17849 		break;
   17850 	}
   17851 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17852 		goto out;
   17853 
   17854 	/*
   17855 	 * In case the PHY needs to be in mdio slow mode,
   17856 	 * set slow mode and try to get the PHY id again.
   17857 	 */
   17858 	rv = 0;
   17859 	if (sc->sc_type < WM_T_PCH_LPT) {
   17860 		wm_set_mdio_slow_mode_hv_locked(sc);
   17861 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17862 		    &id1);
   17863 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17864 		    &id2);
   17865 	}
   17866 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17867 		device_printf(sc->sc_dev, "XXX return with false\n");
   17868 		return false;
   17869 	}
   17870 out:
   17871 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17872 		/* Only unforce SMBus if ME is not active */
   17873 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17874 			uint16_t phyreg;
   17875 
   17876 			/* Unforce SMBus mode in PHY */
   17877 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17878 			    CV_SMB_CTRL, &phyreg);
   17879 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17880 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17881 			    CV_SMB_CTRL, phyreg);
   17882 
   17883 			/* Unforce SMBus mode in MAC */
   17884 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17885 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17886 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17887 		}
   17888 	}
   17889 	return true;
   17890 }
   17891 
   17892 static void
   17893 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17894 {
   17895 	uint32_t reg;
   17896 	int i;
   17897 
   17898 	/* Set PHY Config Counter to 50msec */
   17899 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17900 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17901 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17902 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17903 
   17904 	/* Toggle LANPHYPC */
   17905 	reg = CSR_READ(sc, WMREG_CTRL);
   17906 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17907 	reg &= ~CTRL_LANPHYPC_VALUE;
   17908 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17909 	CSR_WRITE_FLUSH(sc);
   17910 	delay(1000);
   17911 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17912 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17913 	CSR_WRITE_FLUSH(sc);
   17914 
   17915 	if (sc->sc_type < WM_T_PCH_LPT)
   17916 		delay(50 * 1000);
   17917 	else {
   17918 		i = 20;
   17919 
   17920 		do {
   17921 			delay(5 * 1000);
   17922 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17923 		    && i--);
   17924 
   17925 		delay(30 * 1000);
   17926 	}
   17927 }
   17928 
   17929 static int
   17930 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17931 {
   17932 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17933 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17934 	uint32_t rxa;
   17935 	uint16_t scale = 0, lat_enc = 0;
   17936 	int32_t obff_hwm = 0;
   17937 	int64_t lat_ns, value;
   17938 
   17939 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17940 		device_xname(sc->sc_dev), __func__));
   17941 
   17942 	if (link) {
   17943 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17944 		uint32_t status;
   17945 		uint16_t speed;
   17946 		pcireg_t preg;
   17947 
   17948 		status = CSR_READ(sc, WMREG_STATUS);
   17949 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17950 		case STATUS_SPEED_10:
   17951 			speed = 10;
   17952 			break;
   17953 		case STATUS_SPEED_100:
   17954 			speed = 100;
   17955 			break;
   17956 		case STATUS_SPEED_1000:
   17957 			speed = 1000;
   17958 			break;
   17959 		default:
   17960 			device_printf(sc->sc_dev, "Unknown speed "
   17961 			    "(status = %08x)\n", status);
   17962 			return -1;
   17963 		}
   17964 
   17965 		/* Rx Packet Buffer Allocation size (KB) */
   17966 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17967 
   17968 		/*
   17969 		 * Determine the maximum latency tolerated by the device.
   17970 		 *
   17971 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17972 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17973 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17974 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17975 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17976 		 */
   17977 		lat_ns = ((int64_t)rxa * 1024 -
   17978 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17979 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17980 		if (lat_ns < 0)
   17981 			lat_ns = 0;
   17982 		else
   17983 			lat_ns /= speed;
   17984 		value = lat_ns;
   17985 
   17986 		while (value > LTRV_VALUE) {
   17987 			scale ++;
   17988 			value = howmany(value, __BIT(5));
   17989 		}
   17990 		if (scale > LTRV_SCALE_MAX) {
   17991 			device_printf(sc->sc_dev,
   17992 			    "Invalid LTR latency scale %d\n", scale);
   17993 			return -1;
   17994 		}
   17995 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17996 
   17997 		/* Determine the maximum latency tolerated by the platform */
   17998 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17999 		    WM_PCI_LTR_CAP_LPT);
   18000 		max_snoop = preg & 0xffff;
   18001 		max_nosnoop = preg >> 16;
   18002 
   18003 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   18004 
   18005 		if (lat_enc > max_ltr_enc) {
   18006 			lat_enc = max_ltr_enc;
   18007 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   18008 			    * PCI_LTR_SCALETONS(
   18009 				    __SHIFTOUT(lat_enc,
   18010 					PCI_LTR_MAXSNOOPLAT_SCALE));
   18011 		}
   18012 
   18013 		if (lat_ns) {
   18014 			lat_ns *= speed * 1000;
   18015 			lat_ns /= 8;
   18016 			lat_ns /= 1000000000;
   18017 			obff_hwm = (int32_t)(rxa - lat_ns);
   18018 		}
   18019 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   18020 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   18021 			    "(rxa = %d, lat_ns = %d)\n",
   18022 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   18023 			return -1;
   18024 		}
   18025 	}
   18026 	/* Snoop and No-Snoop latencies the same */
   18027 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   18028 	CSR_WRITE(sc, WMREG_LTRV, reg);
   18029 
   18030 	/* Set OBFF high water mark */
   18031 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   18032 	reg |= obff_hwm;
   18033 	CSR_WRITE(sc, WMREG_SVT, reg);
   18034 
   18035 	/* Enable OBFF */
   18036 	reg = CSR_READ(sc, WMREG_SVCR);
   18037 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   18038 	CSR_WRITE(sc, WMREG_SVCR, reg);
   18039 
   18040 	return 0;
   18041 }
   18042 
   18043 /*
   18044  * I210 Errata 25 and I211 Errata 10
   18045  * Slow System Clock.
   18046  *
   18047  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   18048  */
   18049 static int
   18050 wm_pll_workaround_i210(struct wm_softc *sc)
   18051 {
   18052 	uint32_t mdicnfg, wuc;
   18053 	uint32_t reg;
   18054 	pcireg_t pcireg;
   18055 	uint32_t pmreg;
   18056 	uint16_t nvmword, tmp_nvmword;
   18057 	uint16_t phyval;
   18058 	bool wa_done = false;
   18059 	int i, rv = 0;
   18060 
   18061 	/* Get Power Management cap offset */
   18062 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   18063 	    &pmreg, NULL) == 0)
   18064 		return -1;
   18065 
   18066 	/* Save WUC and MDICNFG registers */
   18067 	wuc = CSR_READ(sc, WMREG_WUC);
   18068 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   18069 
   18070 	reg = mdicnfg & ~MDICNFG_DEST;
   18071 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   18072 
   18073 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   18074 		/*
   18075 		 * The default value of the Initialization Control Word 1
   18076 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   18077 		 */
   18078 		nvmword = INVM_DEFAULT_AL;
   18079 	}
   18080 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   18081 
   18082 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   18083 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   18084 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   18085 
   18086 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   18087 			rv = 0;
   18088 			break; /* OK */
   18089 		} else
   18090 			rv = -1;
   18091 
   18092 		wa_done = true;
   18093 		/* Directly reset the internal PHY */
   18094 		reg = CSR_READ(sc, WMREG_CTRL);
   18095 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   18096 
   18097 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   18098 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   18099 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   18100 
   18101 		CSR_WRITE(sc, WMREG_WUC, 0);
   18102 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   18103 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18104 
   18105 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   18106 		    pmreg + PCI_PMCSR);
   18107 		pcireg |= PCI_PMCSR_STATE_D3;
   18108 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   18109 		    pmreg + PCI_PMCSR, pcireg);
   18110 		delay(1000);
   18111 		pcireg &= ~PCI_PMCSR_STATE_D3;
   18112 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   18113 		    pmreg + PCI_PMCSR, pcireg);
   18114 
   18115 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   18116 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18117 
   18118 		/* Restore WUC register */
   18119 		CSR_WRITE(sc, WMREG_WUC, wuc);
   18120 	}
   18121 
   18122 	/* Restore MDICNFG setting */
   18123 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   18124 	if (wa_done)
   18125 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   18126 	return rv;
   18127 }
   18128 
   18129 static void
   18130 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   18131 {
   18132 	uint32_t reg;
   18133 
   18134 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   18135 		device_xname(sc->sc_dev), __func__));
   18136 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   18137 	    || (sc->sc_type == WM_T_PCH_CNP));
   18138 
   18139 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   18140 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   18141 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   18142 
   18143 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   18144 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   18145 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   18146 }
   18147 
   18148 /* Sysctl functions */
   18149 static int
   18150 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   18151 {
   18152 	struct sysctlnode node = *rnode;
   18153 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18154 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18155 	struct wm_softc *sc = txq->txq_sc;
   18156 	uint32_t reg;
   18157 
   18158 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   18159 	node.sysctl_data = &reg;
   18160 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18161 }
   18162 
   18163 static int
   18164 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   18165 {
   18166 	struct sysctlnode node = *rnode;
   18167 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18168 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18169 	struct wm_softc *sc = txq->txq_sc;
   18170 	uint32_t reg;
   18171 
   18172 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   18173 	node.sysctl_data = &reg;
   18174 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18175 }
   18176 
   18177 #ifdef WM_DEBUG
   18178 static int
   18179 wm_sysctl_debug(SYSCTLFN_ARGS)
   18180 {
   18181 	struct sysctlnode node = *rnode;
   18182 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   18183 	uint32_t dflags;
   18184 	int error;
   18185 
   18186 	dflags = sc->sc_debug;
   18187 	node.sysctl_data = &dflags;
   18188 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   18189 
   18190 	if (error || newp == NULL)
   18191 		return error;
   18192 
   18193 	sc->sc_debug = dflags;
   18194 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   18195 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   18196 
   18197 	return 0;
   18198 }
   18199 #endif
   18200