Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.783
      1 /*	$NetBSD: if_wm.c,v 1.783 2023/08/25 08:14:14 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.783 2023/08/25 08:14:14 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 
    464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	bool wmq_wq_enqueued;
    482 	struct work wmq_cookie;
    483 	void *wmq_si;
    484 };
    485 
    486 struct wm_phyop {
    487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    488 	void (*release)(struct wm_softc *);
    489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    490 	int (*writereg_locked)(device_t, int, int, uint16_t);
    491 	int reset_delay_us;
    492 	bool no_errprint;
    493 };
    494 
    495 struct wm_nvmop {
    496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    497 	void (*release)(struct wm_softc *);
    498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    499 };
    500 
    501 /*
    502  * Software state per device.
    503  */
    504 struct wm_softc {
    505 	device_t sc_dev;		/* generic device information */
    506 	bus_space_tag_t sc_st;		/* bus space tag */
    507 	bus_space_handle_t sc_sh;	/* bus space handle */
    508 	bus_size_t sc_ss;		/* bus space size */
    509 	bus_space_tag_t sc_iot;		/* I/O space tag */
    510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    511 	bus_size_t sc_ios;		/* I/O space size */
    512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    514 	bus_size_t sc_flashs;		/* flash registers space size */
    515 	off_t sc_flashreg_offset;	/*
    516 					 * offset to flash registers from
    517 					 * start of BAR
    518 					 */
    519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    520 
    521 	struct ethercom sc_ethercom;	/* Ethernet common data */
    522 	struct mii_data sc_mii;		/* MII/media information */
    523 
    524 	pci_chipset_tag_t sc_pc;
    525 	pcitag_t sc_pcitag;
    526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    528 
    529 	uint16_t sc_pcidevid;		/* PCI device ID */
    530 	wm_chip_type sc_type;		/* MAC type */
    531 	int sc_rev;			/* MAC revision */
    532 	wm_phy_type sc_phytype;		/* PHY type */
    533 	uint8_t sc_sfptype;		/* SFP type */
    534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    535 #define	WM_MEDIATYPE_UNKNOWN		0x00
    536 #define	WM_MEDIATYPE_FIBER		0x01
    537 #define	WM_MEDIATYPE_COPPER		0x02
    538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    540 	u_int sc_flags;			/* flags; see below */
    541 	u_short sc_if_flags;		/* last if_flags */
    542 	int sc_ec_capenable;		/* last ec_capenable */
    543 	int sc_flowflags;		/* 802.3x flow control flags */
    544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    545 	int sc_align_tweak;
    546 
    547 	void *sc_ihs[WM_MAX_NINTR];	/*
    548 					 * interrupt cookie.
    549 					 * - legacy and msi use sc_ihs[0] only
    550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    551 					 */
    552 	pci_intr_handle_t *sc_intrs;	/*
    553 					 * legacy and msi use sc_intrs[0] only
    554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    555 					 */
    556 	int sc_nintrs;			/* number of interrupts */
    557 
    558 	int sc_link_intr_idx;		/* index of MSI-X tables */
    559 
    560 	callout_t sc_tick_ch;		/* tick callout */
    561 	bool sc_core_stopping;
    562 
    563 	int sc_nvm_ver_major;
    564 	int sc_nvm_ver_minor;
    565 	int sc_nvm_ver_build;
    566 	int sc_nvm_addrbits;		/* NVM address bits */
    567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    568 	int sc_ich8_flash_base;
    569 	int sc_ich8_flash_bank_size;
    570 	int sc_nvm_k1_enabled;
    571 
    572 	int sc_nqueues;
    573 	struct wm_queue *sc_queue;
    574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    578 	struct workqueue *sc_queue_wq;
    579 	bool sc_txrx_use_workqueue;
    580 
    581 	int sc_affinity_offset;
    582 
    583 #ifdef WM_EVENT_COUNTERS
    584 	/* Event counters. */
    585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    586 
    587 	/* >= WM_T_82542_2_1 */
    588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    593 
    594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    599 	struct evcnt sc_ev_scc;		/* Single Collision */
    600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    602 	struct evcnt sc_ev_latecol;	/* Late Collision */
    603 	struct evcnt sc_ev_colc;	/* Collision */
    604 	struct evcnt sc_ev_cbtmpc;	/* Circuit Breaker Tx Mng. Packet */
    605 	struct evcnt sc_ev_dc;		/* Defer */
    606 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    607 	struct evcnt sc_ev_sec;		/* Sequence Error */
    608 
    609 	/* Old */
    610 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    611 	/* New */
    612 	struct evcnt sc_ev_htdpmc;	/* Host Tx Discarded Pkts by MAC */
    613 
    614 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    615 	struct evcnt sc_ev_cbrdpc;	/* Circuit Breaker Rx Dropped Packet */
    616 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    617 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    618 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    619 	struct evcnt sc_ev_prc511;	/* Packets Rx (256-511 bytes) */
    620 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    621 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    622 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    623 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    624 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    625 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    626 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    627 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    628 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    629 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    630 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    631 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    632 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    633 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    634 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    635 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    636 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    637 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    638 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    639 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    640 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    641 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    642 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    643 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    644 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    645 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    646 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    647 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx */
    648 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    649 
    650 	/* Old */
    651 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    652 	/* New */
    653 	struct evcnt sc_ev_cbrmpc;	/* Circuit Breaker Rx Mng. Packet */
    654 
    655 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    656 
    657 	/* Old */
    658 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    659 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    660 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    661 	struct evcnt sc_ev_ictxatc;	/* Intr. Cause Tx Abs Timer Expire */
    662 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    663 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    664 	/*
    665 	 * sc_ev_rxdmtc is shared with both "Intr. cause" and
    666 	 * non "Intr. cause" register.
    667 	 */
    668 	struct evcnt sc_ev_rxdmtc;	/* (Intr. Cause) Rx Desc Min Thresh */
    669 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    670 	/* New */
    671 	struct evcnt sc_ev_rpthc;	/* Rx Packets To Host */
    672 	struct evcnt sc_ev_debug1;	/* Debug Counter 1 */
    673 	struct evcnt sc_ev_debug2;	/* Debug Counter 2 */
    674 	struct evcnt sc_ev_debug3;	/* Debug Counter 3 */
    675 	struct evcnt sc_ev_hgptc;	/* Host Good Packets TX */
    676 	struct evcnt sc_ev_debug4;	/* Debug Counter 4 */
    677 	struct evcnt sc_ev_htcbdpc;	/* Host Tx Circuit Breaker Drp. Pkts */
    678 	struct evcnt sc_ev_hgorc;	/* Host Good Octets Rx */
    679 	struct evcnt sc_ev_hgotc;	/* Host Good Octets Tx */
    680 	struct evcnt sc_ev_lenerrs;	/* Length Error */
    681 	struct evcnt sc_ev_tlpic;	/* EEE Tx LPI */
    682 	struct evcnt sc_ev_rlpic;	/* EEE Rx LPI */
    683 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    684 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    685 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    686 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    687 	struct evcnt sc_ev_scvpc;	/* SerDes/SGMII Code Violation Pkt. */
    688 	struct evcnt sc_ev_hrmpc;	/* Header Redirection Missed Packet */
    689 #endif /* WM_EVENT_COUNTERS */
    690 
    691 	struct sysctllog *sc_sysctllog;
    692 
    693 	/* This variable are used only on the 82547. */
    694 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    695 
    696 	uint32_t sc_ctrl;		/* prototype CTRL register */
    697 #if 0
    698 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    699 #endif
    700 	uint32_t sc_icr;		/* prototype interrupt bits */
    701 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    702 	uint32_t sc_tctl;		/* prototype TCTL register */
    703 	uint32_t sc_rctl;		/* prototype RCTL register */
    704 	uint32_t sc_txcw;		/* prototype TXCW register */
    705 	uint32_t sc_tipg;		/* prototype TIPG register */
    706 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    707 	uint32_t sc_pba;		/* prototype PBA register */
    708 
    709 	int sc_tbi_linkup;		/* TBI link status */
    710 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    711 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    712 	struct timeval sc_linkup_delay_time; /* delay LINK_STATE_UP */
    713 
    714 	int sc_mchash_type;		/* multicast filter offset */
    715 
    716 	krndsource_t rnd_source;	/* random source */
    717 
    718 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    719 
    720 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    721 	kmutex_t *sc_ich_phymtx;	/*
    722 					 * 82574/82583/ICH/PCH specific PHY
    723 					 * mutex. For 82574/82583, the mutex
    724 					 * is used for both PHY and NVM.
    725 					 */
    726 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    727 
    728 	struct wm_phyop phy;
    729 	struct wm_nvmop nvm;
    730 
    731 	struct workqueue *sc_reset_wq;
    732 	struct work sc_reset_work;
    733 	volatile unsigned sc_reset_pending;
    734 
    735 	bool sc_dying;
    736 
    737 #ifdef WM_DEBUG
    738 	uint32_t sc_debug;
    739 	bool sc_trigger_reset;
    740 #endif
    741 };
    742 
    743 #define	WM_RXCHAIN_RESET(rxq)						\
    744 do {									\
    745 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    746 	*(rxq)->rxq_tailp = NULL;					\
    747 	(rxq)->rxq_len = 0;						\
    748 } while (/*CONSTCOND*/0)
    749 
    750 #define	WM_RXCHAIN_LINK(rxq, m)						\
    751 do {									\
    752 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    753 	(rxq)->rxq_tailp = &(m)->m_next;				\
    754 } while (/*CONSTCOND*/0)
    755 
    756 #ifdef WM_EVENT_COUNTERS
    757 #ifdef __HAVE_ATOMIC64_LOADSTORE
    758 #define	WM_EVCNT_INCR(ev)						\
    759 	atomic_store_relaxed(&((ev)->ev_count),				\
    760 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    761 #define	WM_EVCNT_STORE(ev, val)						\
    762 	atomic_store_relaxed(&((ev)->ev_count), (val))
    763 #define	WM_EVCNT_ADD(ev, val)						\
    764 	atomic_store_relaxed(&((ev)->ev_count),				\
    765 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    766 #else
    767 #define	WM_EVCNT_INCR(ev)						\
    768 	((ev)->ev_count)++
    769 #define	WM_EVCNT_STORE(ev, val)						\
    770 	((ev)->ev_count = (val))
    771 #define	WM_EVCNT_ADD(ev, val)						\
    772 	(ev)->ev_count += (val)
    773 #endif
    774 
    775 #define WM_Q_EVCNT_INCR(qname, evname)			\
    776 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    777 #define WM_Q_EVCNT_STORE(qname, evname, val)		\
    778 	WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val))
    779 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    780 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    781 #else /* !WM_EVENT_COUNTERS */
    782 #define	WM_EVCNT_INCR(ev)	/* nothing */
    783 #define	WM_EVCNT_STORE(ev, val)	/* nothing */
    784 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    785 
    786 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    787 #define WM_Q_EVCNT_STORE(qname, evname, val)	/* nothing */
    788 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    789 #endif /* !WM_EVENT_COUNTERS */
    790 
    791 #define	CSR_READ(sc, reg)						\
    792 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    793 #define	CSR_WRITE(sc, reg, val)						\
    794 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    795 #define	CSR_WRITE_FLUSH(sc)						\
    796 	(void)CSR_READ((sc), WMREG_STATUS)
    797 
    798 #define ICH8_FLASH_READ32(sc, reg)					\
    799 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    800 	    (reg) + sc->sc_flashreg_offset)
    801 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    802 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    803 	    (reg) + sc->sc_flashreg_offset, (data))
    804 
    805 #define ICH8_FLASH_READ16(sc, reg)					\
    806 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    807 	    (reg) + sc->sc_flashreg_offset)
    808 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    809 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    810 	    (reg) + sc->sc_flashreg_offset, (data))
    811 
    812 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    813 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    814 
    815 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    816 #define	WM_CDTXADDR_HI(txq, x)						\
    817 	(sizeof(bus_addr_t) == 8 ?					\
    818 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    819 
    820 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    821 #define	WM_CDRXADDR_HI(rxq, x)						\
    822 	(sizeof(bus_addr_t) == 8 ?					\
    823 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    824 
    825 /*
    826  * Register read/write functions.
    827  * Other than CSR_{READ|WRITE}().
    828  */
    829 #if 0
    830 static inline uint32_t wm_io_read(struct wm_softc *, int);
    831 #endif
    832 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    833 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    834     uint32_t, uint32_t);
    835 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    836 
    837 /*
    838  * Descriptor sync/init functions.
    839  */
    840 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    841 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    842 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    843 
    844 /*
    845  * Device driver interface functions and commonly used functions.
    846  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    847  */
    848 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    849 static int	wm_match(device_t, cfdata_t, void *);
    850 static void	wm_attach(device_t, device_t, void *);
    851 static int	wm_detach(device_t, int);
    852 static bool	wm_suspend(device_t, const pmf_qual_t *);
    853 static bool	wm_resume(device_t, const pmf_qual_t *);
    854 static bool	wm_watchdog(struct ifnet *);
    855 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    856     uint16_t *);
    857 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    858     uint16_t *);
    859 static void	wm_tick(void *);
    860 static int	wm_ifflags_cb(struct ethercom *);
    861 static int	wm_ioctl(struct ifnet *, u_long, void *);
    862 /* MAC address related */
    863 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    864 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    865 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    866 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    867 static int	wm_rar_count(struct wm_softc *);
    868 static void	wm_set_filter(struct wm_softc *);
    869 /* Reset and init related */
    870 static void	wm_set_vlan(struct wm_softc *);
    871 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    872 static void	wm_get_auto_rd_done(struct wm_softc *);
    873 static void	wm_lan_init_done(struct wm_softc *);
    874 static void	wm_get_cfg_done(struct wm_softc *);
    875 static int	wm_phy_post_reset(struct wm_softc *);
    876 static int	wm_write_smbus_addr(struct wm_softc *);
    877 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    878 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    879 static void	wm_initialize_hardware_bits(struct wm_softc *);
    880 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    881 static int	wm_reset_phy(struct wm_softc *);
    882 static void	wm_flush_desc_rings(struct wm_softc *);
    883 static void	wm_reset(struct wm_softc *);
    884 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    885 static void	wm_rxdrain(struct wm_rxqueue *);
    886 static void	wm_init_rss(struct wm_softc *);
    887 static void	wm_adjust_qnum(struct wm_softc *, int);
    888 static inline bool	wm_is_using_msix(struct wm_softc *);
    889 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    890 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    891 static int	wm_setup_legacy(struct wm_softc *);
    892 static int	wm_setup_msix(struct wm_softc *);
    893 static int	wm_init(struct ifnet *);
    894 static int	wm_init_locked(struct ifnet *);
    895 static void	wm_init_sysctls(struct wm_softc *);
    896 static void	wm_update_stats(struct wm_softc *);
    897 static void	wm_clear_evcnt(struct wm_softc *);
    898 static void	wm_unset_stopping_flags(struct wm_softc *);
    899 static void	wm_set_stopping_flags(struct wm_softc *);
    900 static void	wm_stop(struct ifnet *, int);
    901 static void	wm_stop_locked(struct ifnet *, bool, bool);
    902 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    903 static void	wm_82547_txfifo_stall(void *);
    904 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    905 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    906 /* DMA related */
    907 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    908 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    909 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    910 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    911     struct wm_txqueue *);
    912 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    913 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    914 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    915     struct wm_rxqueue *);
    916 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    917 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    918 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    919 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    920 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    921 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    922 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    923     struct wm_txqueue *);
    924 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    925     struct wm_rxqueue *);
    926 static int	wm_alloc_txrx_queues(struct wm_softc *);
    927 static void	wm_free_txrx_queues(struct wm_softc *);
    928 static int	wm_init_txrx_queues(struct wm_softc *);
    929 /* Start */
    930 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    931     struct wm_txsoft *, uint32_t *, uint8_t *);
    932 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    933 static void	wm_start(struct ifnet *);
    934 static void	wm_start_locked(struct ifnet *);
    935 static int	wm_transmit(struct ifnet *, struct mbuf *);
    936 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    937 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    938     bool);
    939 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    940     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    941 static void	wm_nq_start(struct ifnet *);
    942 static void	wm_nq_start_locked(struct ifnet *);
    943 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    944 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    945 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    946     bool);
    947 static void	wm_deferred_start_locked(struct wm_txqueue *);
    948 static void	wm_handle_queue(void *);
    949 static void	wm_handle_queue_work(struct work *, void *);
    950 static void	wm_handle_reset_work(struct work *, void *);
    951 /* Interrupt */
    952 static bool	wm_txeof(struct wm_txqueue *, u_int);
    953 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    954 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    955 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    956 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    957 static void	wm_linkintr(struct wm_softc *, uint32_t);
    958 static int	wm_intr_legacy(void *);
    959 static inline void	wm_txrxintr_disable(struct wm_queue *);
    960 static inline void	wm_txrxintr_enable(struct wm_queue *);
    961 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    962 static int	wm_txrxintr_msix(void *);
    963 static int	wm_linkintr_msix(void *);
    964 
    965 /*
    966  * Media related.
    967  * GMII, SGMII, TBI, SERDES and SFP.
    968  */
    969 /* Common */
    970 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    971 /* GMII related */
    972 static void	wm_gmii_reset(struct wm_softc *);
    973 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    974 static int	wm_get_phy_id_82575(struct wm_softc *);
    975 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    976 static int	wm_gmii_mediachange(struct ifnet *);
    977 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    978 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    979 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    980 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    981 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    982 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    983 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    984 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    985 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    986 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    987 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    988 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    989 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    990 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    991 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    992 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    993 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    994 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    995 	bool);
    996 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    997 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    998 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    999 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
   1000 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
   1001 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
   1002 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
   1003 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
   1004 static void	wm_gmii_statchg(struct ifnet *);
   1005 /*
   1006  * kumeran related (80003, ICH* and PCH*).
   1007  * These functions are not for accessing MII registers but for accessing
   1008  * kumeran specific registers.
   1009  */
   1010 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
   1011 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
   1012 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
   1013 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
   1014 /* EMI register related */
   1015 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
   1016 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
   1017 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
   1018 /* SGMII */
   1019 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
   1020 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
   1021 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
   1022 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
   1023 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
   1024 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
   1025 /* TBI related */
   1026 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
   1027 static void	wm_tbi_mediainit(struct wm_softc *);
   1028 static int	wm_tbi_mediachange(struct ifnet *);
   1029 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
   1030 static int	wm_check_for_link(struct wm_softc *);
   1031 static void	wm_tbi_tick(struct wm_softc *);
   1032 /* SERDES related */
   1033 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1034 static int	wm_serdes_mediachange(struct ifnet *);
   1035 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1036 static void	wm_serdes_tick(struct wm_softc *);
   1037 /* SFP related */
   1038 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1039 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1040 
   1041 /*
   1042  * NVM related.
   1043  * Microwire, SPI (w/wo EERD) and Flash.
   1044  */
   1045 /* Misc functions */
   1046 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1047 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1048 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1049 /* Microwire */
   1050 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1051 /* SPI */
   1052 static int	wm_nvm_ready_spi(struct wm_softc *);
   1053 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1054 /* Using with EERD */
   1055 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1056 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1057 /* Flash */
   1058 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1059     unsigned int *);
   1060 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1061 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1062 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1063     uint32_t *);
   1064 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1065 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1066 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1067 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1068 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1069 /* iNVM */
   1070 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1071 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1072 /* Lock, detecting NVM type, validate checksum and read */
   1073 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1074 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1075 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1076 static void	wm_nvm_version_invm(struct wm_softc *);
   1077 static void	wm_nvm_version(struct wm_softc *);
   1078 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1079 
   1080 /*
   1081  * Hardware semaphores.
   1082  * Very complexed...
   1083  */
   1084 static int	wm_get_null(struct wm_softc *);
   1085 static void	wm_put_null(struct wm_softc *);
   1086 static int	wm_get_eecd(struct wm_softc *);
   1087 static void	wm_put_eecd(struct wm_softc *);
   1088 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1089 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1090 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1091 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1092 static int	wm_get_nvm_80003(struct wm_softc *);
   1093 static void	wm_put_nvm_80003(struct wm_softc *);
   1094 static int	wm_get_nvm_82571(struct wm_softc *);
   1095 static void	wm_put_nvm_82571(struct wm_softc *);
   1096 static int	wm_get_phy_82575(struct wm_softc *);
   1097 static void	wm_put_phy_82575(struct wm_softc *);
   1098 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1099 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1100 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1101 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1102 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1103 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1104 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1105 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1106 
   1107 /*
   1108  * Management mode and power management related subroutines.
   1109  * BMC, AMT, suspend/resume and EEE.
   1110  */
   1111 #if 0
   1112 static int	wm_check_mng_mode(struct wm_softc *);
   1113 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1114 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1115 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1116 #endif
   1117 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1118 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1119 static void	wm_get_hw_control(struct wm_softc *);
   1120 static void	wm_release_hw_control(struct wm_softc *);
   1121 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1122 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1123 static void	wm_init_manageability(struct wm_softc *);
   1124 static void	wm_release_manageability(struct wm_softc *);
   1125 static void	wm_get_wakeup(struct wm_softc *);
   1126 static int	wm_ulp_disable(struct wm_softc *);
   1127 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1128 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1129 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1130 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1131 static void	wm_enable_wakeup(struct wm_softc *);
   1132 static void	wm_disable_aspm(struct wm_softc *);
   1133 /* LPLU (Low Power Link Up) */
   1134 static void	wm_lplu_d0_disable(struct wm_softc *);
   1135 /* EEE */
   1136 static int	wm_set_eee_i350(struct wm_softc *);
   1137 static int	wm_set_eee_pchlan(struct wm_softc *);
   1138 static int	wm_set_eee(struct wm_softc *);
   1139 
   1140 /*
   1141  * Workarounds (mainly PHY related).
   1142  * Basically, PHY's workarounds are in the PHY drivers.
   1143  */
   1144 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1145 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1146 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1147 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1148 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1149 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1150 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1151 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1152 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1153 static int	wm_k1_workaround_lv(struct wm_softc *);
   1154 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1155 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1156 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1157 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1158 static void	wm_reset_init_script_82575(struct wm_softc *);
   1159 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1160 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1161 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1162 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1163 static int	wm_pll_workaround_i210(struct wm_softc *);
   1164 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1165 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1166 static void	wm_set_linkdown_discard(struct wm_softc *);
   1167 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1168 
   1169 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1170 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1171 #ifdef WM_DEBUG
   1172 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1173 #endif
   1174 
   1175 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1176     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1177 
   1178 /*
   1179  * Devices supported by this driver.
   1180  */
   1181 static const struct wm_product {
   1182 	pci_vendor_id_t		wmp_vendor;
   1183 	pci_product_id_t	wmp_product;
   1184 	const char		*wmp_name;
   1185 	wm_chip_type		wmp_type;
   1186 	uint32_t		wmp_flags;
   1187 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1188 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1189 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1190 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1191 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1192 } wm_products[] = {
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1194 	  "Intel i82542 1000BASE-X Ethernet",
   1195 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1198 	  "Intel i82543GC 1000BASE-X Ethernet",
   1199 	  WM_T_82543,		WMP_F_FIBER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1202 	  "Intel i82543GC 1000BASE-T Ethernet",
   1203 	  WM_T_82543,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1206 	  "Intel i82544EI 1000BASE-T Ethernet",
   1207 	  WM_T_82544,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1210 	  "Intel i82544EI 1000BASE-X Ethernet",
   1211 	  WM_T_82544,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1214 	  "Intel i82544GC 1000BASE-T Ethernet",
   1215 	  WM_T_82544,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1218 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1219 	  WM_T_82544,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1222 	  "Intel i82540EM 1000BASE-T Ethernet",
   1223 	  WM_T_82540,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1226 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1227 	  WM_T_82540,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1230 	  "Intel i82540EP 1000BASE-T Ethernet",
   1231 	  WM_T_82540,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1234 	  "Intel i82540EP 1000BASE-T Ethernet",
   1235 	  WM_T_82540,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1238 	  "Intel i82540EP 1000BASE-T Ethernet",
   1239 	  WM_T_82540,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1242 	  "Intel i82545EM 1000BASE-T Ethernet",
   1243 	  WM_T_82545,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1246 	  "Intel i82545GM 1000BASE-T Ethernet",
   1247 	  WM_T_82545_3,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1250 	  "Intel i82545GM 1000BASE-X Ethernet",
   1251 	  WM_T_82545_3,		WMP_F_FIBER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1254 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1255 	  WM_T_82545_3,		WMP_F_SERDES },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1258 	  "Intel i82546EB 1000BASE-T Ethernet",
   1259 	  WM_T_82546,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1262 	  "Intel i82546EB 1000BASE-T Ethernet",
   1263 	  WM_T_82546,		WMP_F_COPPER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1266 	  "Intel i82545EM 1000BASE-X Ethernet",
   1267 	  WM_T_82545,		WMP_F_FIBER },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1270 	  "Intel i82546EB 1000BASE-X Ethernet",
   1271 	  WM_T_82546,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1274 	  "Intel i82546GB 1000BASE-T Ethernet",
   1275 	  WM_T_82546_3,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1278 	  "Intel i82546GB 1000BASE-X Ethernet",
   1279 	  WM_T_82546_3,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1282 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82546_3,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1286 	  "i82546GB quad-port Gigabit Ethernet",
   1287 	  WM_T_82546_3,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1290 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1291 	  WM_T_82546_3,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1294 	  "Intel PRO/1000MT (82546GB)",
   1295 	  WM_T_82546_3,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1298 	  "Intel i82541EI 1000BASE-T Ethernet",
   1299 	  WM_T_82541,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1302 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1303 	  WM_T_82541,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1306 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1307 	  WM_T_82541,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1310 	  "Intel i82541ER 1000BASE-T Ethernet",
   1311 	  WM_T_82541_2,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1314 	  "Intel i82541GI 1000BASE-T Ethernet",
   1315 	  WM_T_82541_2,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1318 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1319 	  WM_T_82541_2,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1322 	  "Intel i82541PI 1000BASE-T Ethernet",
   1323 	  WM_T_82541_2,		WMP_F_COPPER },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1326 	  "Intel i82547EI 1000BASE-T Ethernet",
   1327 	  WM_T_82547,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1330 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1331 	  WM_T_82547,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1334 	  "Intel i82547GI 1000BASE-T Ethernet",
   1335 	  WM_T_82547_2,		WMP_F_COPPER },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1338 	  "Intel PRO/1000 PT (82571EB)",
   1339 	  WM_T_82571,		WMP_F_COPPER },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1342 	  "Intel PRO/1000 PF (82571EB)",
   1343 	  WM_T_82571,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1346 	  "Intel PRO/1000 PB (82571EB)",
   1347 	  WM_T_82571,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1350 	  "Intel PRO/1000 QT (82571EB)",
   1351 	  WM_T_82571,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1354 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1355 	  WM_T_82571,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1358 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1359 	  WM_T_82571,		WMP_F_COPPER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1362 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1363 	  WM_T_82571,		WMP_F_SERDES },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1366 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1367 	  WM_T_82571,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1370 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1371 	  WM_T_82571,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1374 	  "Intel i82572EI 1000baseT Ethernet",
   1375 	  WM_T_82572,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1378 	  "Intel i82572EI 1000baseX Ethernet",
   1379 	  WM_T_82572,		WMP_F_FIBER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1382 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1383 	  WM_T_82572,		WMP_F_SERDES },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1386 	  "Intel i82572EI 1000baseT Ethernet",
   1387 	  WM_T_82572,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1390 	  "Intel i82573E",
   1391 	  WM_T_82573,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1394 	  "Intel i82573E IAMT",
   1395 	  WM_T_82573,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1398 	  "Intel i82573L Gigabit Ethernet",
   1399 	  WM_T_82573,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1402 	  "Intel i82574L",
   1403 	  WM_T_82574,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1406 	  "Intel i82574L",
   1407 	  WM_T_82574,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1410 	  "Intel i82583V",
   1411 	  WM_T_82583,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1414 	  "i80003 dual 1000baseT Ethernet",
   1415 	  WM_T_80003,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1418 	  "i80003 dual 1000baseX Ethernet",
   1419 	  WM_T_80003,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1422 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1423 	  WM_T_80003,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1426 	  "Intel i80003 1000baseT Ethernet",
   1427 	  WM_T_80003,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1430 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1431 	  WM_T_80003,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1434 	  "Intel i82801H (M_AMT) LAN Controller",
   1435 	  WM_T_ICH8,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1437 	  "Intel i82801H (AMT) LAN Controller",
   1438 	  WM_T_ICH8,		WMP_F_COPPER },
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1440 	  "Intel i82801H LAN Controller",
   1441 	  WM_T_ICH8,		WMP_F_COPPER },
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1443 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1444 	  WM_T_ICH8,		WMP_F_COPPER },
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1446 	  "Intel i82801H (M) LAN Controller",
   1447 	  WM_T_ICH8,		WMP_F_COPPER },
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1449 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1450 	  WM_T_ICH8,		WMP_F_COPPER },
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1452 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1453 	  WM_T_ICH8,		WMP_F_COPPER },
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1455 	  "82567V-3 LAN Controller",
   1456 	  WM_T_ICH8,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1458 	  "82801I (AMT) LAN Controller",
   1459 	  WM_T_ICH9,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1461 	  "82801I 10/100 LAN Controller",
   1462 	  WM_T_ICH9,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1464 	  "82801I (G) 10/100 LAN Controller",
   1465 	  WM_T_ICH9,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1467 	  "82801I (GT) 10/100 LAN Controller",
   1468 	  WM_T_ICH9,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1470 	  "82801I (C) LAN Controller",
   1471 	  WM_T_ICH9,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1473 	  "82801I mobile LAN Controller",
   1474 	  WM_T_ICH9,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1476 	  "82801I mobile (V) LAN Controller",
   1477 	  WM_T_ICH9,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1479 	  "82801I mobile (AMT) LAN Controller",
   1480 	  WM_T_ICH9,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1482 	  "82567LM-4 LAN Controller",
   1483 	  WM_T_ICH9,		WMP_F_COPPER },
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1485 	  "82567LM-2 LAN Controller",
   1486 	  WM_T_ICH10,		WMP_F_COPPER },
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1488 	  "82567LF-2 LAN Controller",
   1489 	  WM_T_ICH10,		WMP_F_COPPER },
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1491 	  "82567LM-3 LAN Controller",
   1492 	  WM_T_ICH10,		WMP_F_COPPER },
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1494 	  "82567LF-3 LAN Controller",
   1495 	  WM_T_ICH10,		WMP_F_COPPER },
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1497 	  "82567V-2 LAN Controller",
   1498 	  WM_T_ICH10,		WMP_F_COPPER },
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1500 	  "82567V-3? LAN Controller",
   1501 	  WM_T_ICH10,		WMP_F_COPPER },
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1503 	  "HANKSVILLE LAN Controller",
   1504 	  WM_T_ICH10,		WMP_F_COPPER },
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1506 	  "PCH LAN (82577LM) Controller",
   1507 	  WM_T_PCH,		WMP_F_COPPER },
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1509 	  "PCH LAN (82577LC) Controller",
   1510 	  WM_T_PCH,		WMP_F_COPPER },
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1512 	  "PCH LAN (82578DM) Controller",
   1513 	  WM_T_PCH,		WMP_F_COPPER },
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1515 	  "PCH LAN (82578DC) Controller",
   1516 	  WM_T_PCH,		WMP_F_COPPER },
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1518 	  "PCH2 LAN (82579LM) Controller",
   1519 	  WM_T_PCH2,		WMP_F_COPPER },
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1521 	  "PCH2 LAN (82579V) Controller",
   1522 	  WM_T_PCH2,		WMP_F_COPPER },
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1524 	  "82575EB dual-1000baseT Ethernet",
   1525 	  WM_T_82575,		WMP_F_COPPER },
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1527 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1528 	  WM_T_82575,		WMP_F_SERDES },
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1530 	  "82575GB quad-1000baseT Ethernet",
   1531 	  WM_T_82575,		WMP_F_COPPER },
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1533 	  "82575GB quad-1000baseT Ethernet (PM)",
   1534 	  WM_T_82575,		WMP_F_COPPER },
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1536 	  "82576 1000BaseT Ethernet",
   1537 	  WM_T_82576,		WMP_F_COPPER },
   1538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1539 	  "82576 1000BaseX Ethernet",
   1540 	  WM_T_82576,		WMP_F_FIBER },
   1541 
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1543 	  "82576 gigabit Ethernet (SERDES)",
   1544 	  WM_T_82576,		WMP_F_SERDES },
   1545 
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1547 	  "82576 quad-1000BaseT Ethernet",
   1548 	  WM_T_82576,		WMP_F_COPPER },
   1549 
   1550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1551 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1552 	  WM_T_82576,		WMP_F_COPPER },
   1553 
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1555 	  "82576 gigabit Ethernet",
   1556 	  WM_T_82576,		WMP_F_COPPER },
   1557 
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1559 	  "82576 gigabit Ethernet (SERDES)",
   1560 	  WM_T_82576,		WMP_F_SERDES },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1562 	  "82576 quad-gigabit Ethernet (SERDES)",
   1563 	  WM_T_82576,		WMP_F_SERDES },
   1564 
   1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1566 	  "82580 1000BaseT Ethernet",
   1567 	  WM_T_82580,		WMP_F_COPPER },
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1569 	  "82580 1000BaseX Ethernet",
   1570 	  WM_T_82580,		WMP_F_FIBER },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1573 	  "82580 1000BaseT Ethernet (SERDES)",
   1574 	  WM_T_82580,		WMP_F_SERDES },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1577 	  "82580 gigabit Ethernet (SGMII)",
   1578 	  WM_T_82580,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1580 	  "82580 dual-1000BaseT Ethernet",
   1581 	  WM_T_82580,		WMP_F_COPPER },
   1582 
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1584 	  "82580 quad-1000BaseX Ethernet",
   1585 	  WM_T_82580,		WMP_F_FIBER },
   1586 
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1588 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1589 	  WM_T_82580,		WMP_F_COPPER },
   1590 
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1592 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1593 	  WM_T_82580,		WMP_F_SERDES },
   1594 
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1596 	  "DH89XXCC 1000BASE-KX Ethernet",
   1597 	  WM_T_82580,		WMP_F_SERDES },
   1598 
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1600 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1601 	  WM_T_82580,		WMP_F_SERDES },
   1602 
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1604 	  "I350 Gigabit Network Connection",
   1605 	  WM_T_I350,		WMP_F_COPPER },
   1606 
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1608 	  "I350 Gigabit Fiber Network Connection",
   1609 	  WM_T_I350,		WMP_F_FIBER },
   1610 
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1612 	  "I350 Gigabit Backplane Connection",
   1613 	  WM_T_I350,		WMP_F_SERDES },
   1614 
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1616 	  "I350 Quad Port Gigabit Ethernet",
   1617 	  WM_T_I350,		WMP_F_SERDES },
   1618 
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1620 	  "I350 Gigabit Connection",
   1621 	  WM_T_I350,		WMP_F_COPPER },
   1622 
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1624 	  "I354 Gigabit Ethernet (KX)",
   1625 	  WM_T_I354,		WMP_F_SERDES },
   1626 
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1628 	  "I354 Gigabit Ethernet (SGMII)",
   1629 	  WM_T_I354,		WMP_F_COPPER },
   1630 
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1632 	  "I354 Gigabit Ethernet (2.5G)",
   1633 	  WM_T_I354,		WMP_F_COPPER },
   1634 
   1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1636 	  "I210-T1 Ethernet Server Adapter",
   1637 	  WM_T_I210,		WMP_F_COPPER },
   1638 
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1640 	  "I210 Ethernet (Copper OEM)",
   1641 	  WM_T_I210,		WMP_F_COPPER },
   1642 
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1644 	  "I210 Ethernet (Copper IT)",
   1645 	  WM_T_I210,		WMP_F_COPPER },
   1646 
   1647 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1648 	  "I210 Ethernet (Copper, FLASH less)",
   1649 	  WM_T_I210,		WMP_F_COPPER },
   1650 
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1652 	  "I210 Gigabit Ethernet (Fiber)",
   1653 	  WM_T_I210,		WMP_F_FIBER },
   1654 
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1656 	  "I210 Gigabit Ethernet (SERDES)",
   1657 	  WM_T_I210,		WMP_F_SERDES },
   1658 
   1659 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1660 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1661 	  WM_T_I210,		WMP_F_SERDES },
   1662 
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1664 	  "I210 Gigabit Ethernet (SGMII)",
   1665 	  WM_T_I210,		WMP_F_COPPER },
   1666 
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1668 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1669 	  WM_T_I210,		WMP_F_COPPER },
   1670 
   1671 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1672 	  "I211 Ethernet (COPPER)",
   1673 	  WM_T_I211,		WMP_F_COPPER },
   1674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1675 	  "I217 V Ethernet Connection",
   1676 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1678 	  "I217 LM Ethernet Connection",
   1679 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1681 	  "I218 V Ethernet Connection",
   1682 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1683 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1684 	  "I218 V Ethernet Connection",
   1685 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1687 	  "I218 V Ethernet Connection",
   1688 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1690 	  "I218 LM Ethernet Connection",
   1691 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1693 	  "I218 LM Ethernet Connection",
   1694 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1695 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1696 	  "I218 LM Ethernet Connection",
   1697 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1699 	  "I219 LM Ethernet Connection",
   1700 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1702 	  "I219 LM (2) Ethernet Connection",
   1703 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1705 	  "I219 LM (3) Ethernet Connection",
   1706 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1707 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1708 	  "I219 LM (4) Ethernet Connection",
   1709 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1711 	  "I219 LM (5) Ethernet Connection",
   1712 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1714 	  "I219 LM (6) Ethernet Connection",
   1715 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1717 	  "I219 LM (7) Ethernet Connection",
   1718 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1719 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1720 	  "I219 LM (8) Ethernet Connection",
   1721 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1723 	  "I219 LM (9) Ethernet Connection",
   1724 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1726 	  "I219 LM (10) Ethernet Connection",
   1727 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1729 	  "I219 LM (11) Ethernet Connection",
   1730 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1731 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1732 	  "I219 LM (12) Ethernet Connection",
   1733 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1735 	  "I219 LM (13) Ethernet Connection",
   1736 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1738 	  "I219 LM (14) Ethernet Connection",
   1739 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1741 	  "I219 LM (15) Ethernet Connection",
   1742 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1743 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1744 	  "I219 LM (16) Ethernet Connection",
   1745 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1747 	  "I219 LM (17) Ethernet Connection",
   1748 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1750 	  "I219 LM (18) Ethernet Connection",
   1751 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1753 	  "I219 LM (19) Ethernet Connection",
   1754 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1755 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1756 	  "I219 V Ethernet Connection",
   1757 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1759 	  "I219 V (2) Ethernet Connection",
   1760 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1762 	  "I219 V (4) Ethernet Connection",
   1763 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1765 	  "I219 V (5) Ethernet Connection",
   1766 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1767 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1768 	  "I219 V (6) Ethernet Connection",
   1769 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1771 	  "I219 V (7) Ethernet Connection",
   1772 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1774 	  "I219 V (8) Ethernet Connection",
   1775 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1777 	  "I219 V (9) Ethernet Connection",
   1778 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1779 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1780 	  "I219 V (10) Ethernet Connection",
   1781 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1783 	  "I219 V (11) Ethernet Connection",
   1784 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1786 	  "I219 V (12) Ethernet Connection",
   1787 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1789 	  "I219 V (13) Ethernet Connection",
   1790 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1791 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1792 	  "I219 V (14) Ethernet Connection",
   1793 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1795 	  "I219 V (15) Ethernet Connection",
   1796 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1798 	  "I219 V (16) Ethernet Connection",
   1799 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1801 	  "I219 V (17) Ethernet Connection",
   1802 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1804 	  "I219 V (18) Ethernet Connection",
   1805 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1807 	  "I219 V (19) Ethernet Connection",
   1808 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1809 	{ 0,			0,
   1810 	  NULL,
   1811 	  0,			0 },
   1812 };
   1813 
   1814 /*
   1815  * Register read/write functions.
   1816  * Other than CSR_{READ|WRITE}().
   1817  */
   1818 
   1819 #if 0 /* Not currently used */
   1820 static inline uint32_t
   1821 wm_io_read(struct wm_softc *sc, int reg)
   1822 {
   1823 
   1824 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1825 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1826 }
   1827 #endif
   1828 
   1829 static inline void
   1830 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1831 {
   1832 
   1833 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1834 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1835 }
   1836 
   1837 static inline void
   1838 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1839     uint32_t data)
   1840 {
   1841 	uint32_t regval;
   1842 	int i;
   1843 
   1844 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1845 
   1846 	CSR_WRITE(sc, reg, regval);
   1847 
   1848 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1849 		delay(5);
   1850 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1851 			break;
   1852 	}
   1853 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1854 		aprint_error("%s: WARNING:"
   1855 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1856 		    device_xname(sc->sc_dev), reg);
   1857 	}
   1858 }
   1859 
   1860 static inline void
   1861 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1862 {
   1863 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1864 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1865 }
   1866 
   1867 /*
   1868  * Descriptor sync/init functions.
   1869  */
   1870 static inline void
   1871 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1872 {
   1873 	struct wm_softc *sc = txq->txq_sc;
   1874 
   1875 	/* If it will wrap around, sync to the end of the ring. */
   1876 	if ((start + num) > WM_NTXDESC(txq)) {
   1877 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1878 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1879 		    (WM_NTXDESC(txq) - start), ops);
   1880 		num -= (WM_NTXDESC(txq) - start);
   1881 		start = 0;
   1882 	}
   1883 
   1884 	/* Now sync whatever is left. */
   1885 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1886 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1887 }
   1888 
   1889 static inline void
   1890 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1891 {
   1892 	struct wm_softc *sc = rxq->rxq_sc;
   1893 
   1894 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1895 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1896 }
   1897 
   1898 static inline void
   1899 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1900 {
   1901 	struct wm_softc *sc = rxq->rxq_sc;
   1902 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1903 	struct mbuf *m = rxs->rxs_mbuf;
   1904 
   1905 	/*
   1906 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1907 	 * so that the payload after the Ethernet header is aligned
   1908 	 * to a 4-byte boundary.
   1909 
   1910 	 * XXX BRAINDAMAGE ALERT!
   1911 	 * The stupid chip uses the same size for every buffer, which
   1912 	 * is set in the Receive Control register.  We are using the 2K
   1913 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1914 	 * reason, we can't "scoot" packets longer than the standard
   1915 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1916 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1917 	 * the upper layer copy the headers.
   1918 	 */
   1919 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1920 
   1921 	if (sc->sc_type == WM_T_82574) {
   1922 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1923 		rxd->erx_data.erxd_addr =
   1924 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1925 		rxd->erx_data.erxd_dd = 0;
   1926 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1927 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1928 
   1929 		rxd->nqrx_data.nrxd_paddr =
   1930 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1931 		/* Currently, split header is not supported. */
   1932 		rxd->nqrx_data.nrxd_haddr = 0;
   1933 	} else {
   1934 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1935 
   1936 		wm_set_dma_addr(&rxd->wrx_addr,
   1937 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1938 		rxd->wrx_len = 0;
   1939 		rxd->wrx_cksum = 0;
   1940 		rxd->wrx_status = 0;
   1941 		rxd->wrx_errors = 0;
   1942 		rxd->wrx_special = 0;
   1943 	}
   1944 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1945 
   1946 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1947 }
   1948 
   1949 /*
   1950  * Device driver interface functions and commonly used functions.
   1951  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1952  */
   1953 
   1954 /* Lookup supported device table */
   1955 static const struct wm_product *
   1956 wm_lookup(const struct pci_attach_args *pa)
   1957 {
   1958 	const struct wm_product *wmp;
   1959 
   1960 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1961 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1962 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1963 			return wmp;
   1964 	}
   1965 	return NULL;
   1966 }
   1967 
   1968 /* The match function (ca_match) */
   1969 static int
   1970 wm_match(device_t parent, cfdata_t cf, void *aux)
   1971 {
   1972 	struct pci_attach_args *pa = aux;
   1973 
   1974 	if (wm_lookup(pa) != NULL)
   1975 		return 1;
   1976 
   1977 	return 0;
   1978 }
   1979 
   1980 /* The attach function (ca_attach) */
   1981 static void
   1982 wm_attach(device_t parent, device_t self, void *aux)
   1983 {
   1984 	struct wm_softc *sc = device_private(self);
   1985 	struct pci_attach_args *pa = aux;
   1986 	prop_dictionary_t dict;
   1987 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1988 	pci_chipset_tag_t pc = pa->pa_pc;
   1989 	int counts[PCI_INTR_TYPE_SIZE];
   1990 	pci_intr_type_t max_type;
   1991 	const char *eetype, *xname;
   1992 	bus_space_tag_t memt;
   1993 	bus_space_handle_t memh;
   1994 	bus_size_t memsize;
   1995 	int memh_valid;
   1996 	int i, error;
   1997 	const struct wm_product *wmp;
   1998 	prop_data_t ea;
   1999 	prop_number_t pn;
   2000 	uint8_t enaddr[ETHER_ADDR_LEN];
   2001 	char buf[256];
   2002 	char wqname[MAXCOMLEN];
   2003 	uint16_t cfg1, cfg2, swdpin, nvmword;
   2004 	pcireg_t preg, memtype;
   2005 	uint16_t eeprom_data, apme_mask;
   2006 	bool force_clear_smbi;
   2007 	uint32_t link_mode;
   2008 	uint32_t reg;
   2009 
   2010 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   2011 	sc->sc_debug = WM_DEBUG_DEFAULT;
   2012 #endif
   2013 	sc->sc_dev = self;
   2014 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   2015 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   2016 	sc->sc_core_stopping = false;
   2017 
   2018 	wmp = wm_lookup(pa);
   2019 #ifdef DIAGNOSTIC
   2020 	if (wmp == NULL) {
   2021 		printf("\n");
   2022 		panic("wm_attach: impossible");
   2023 	}
   2024 #endif
   2025 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   2026 
   2027 	sc->sc_pc = pa->pa_pc;
   2028 	sc->sc_pcitag = pa->pa_tag;
   2029 
   2030 	if (pci_dma64_available(pa)) {
   2031 		aprint_verbose(", 64-bit DMA");
   2032 		sc->sc_dmat = pa->pa_dmat64;
   2033 	} else {
   2034 		aprint_verbose(", 32-bit DMA");
   2035 		sc->sc_dmat = pa->pa_dmat;
   2036 	}
   2037 
   2038 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2039 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2040 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2041 
   2042 	sc->sc_type = wmp->wmp_type;
   2043 
   2044 	/* Set default function pointers */
   2045 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2046 	sc->phy.release = sc->nvm.release = wm_put_null;
   2047 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2048 
   2049 	if (sc->sc_type < WM_T_82543) {
   2050 		if (sc->sc_rev < 2) {
   2051 			aprint_error_dev(sc->sc_dev,
   2052 			    "i82542 must be at least rev. 2\n");
   2053 			return;
   2054 		}
   2055 		if (sc->sc_rev < 3)
   2056 			sc->sc_type = WM_T_82542_2_0;
   2057 	}
   2058 
   2059 	/*
   2060 	 * Disable MSI for Errata:
   2061 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2062 	 *
   2063 	 *  82544: Errata 25
   2064 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2065 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2066 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2067 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2068 	 *
   2069 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2070 	 *
   2071 	 *  82571 & 82572: Errata 63
   2072 	 */
   2073 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2074 	    || (sc->sc_type == WM_T_82572))
   2075 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2076 
   2077 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2078 	    || (sc->sc_type == WM_T_82580)
   2079 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2080 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2081 		sc->sc_flags |= WM_F_NEWQUEUE;
   2082 
   2083 	/* Set device properties (mactype) */
   2084 	dict = device_properties(sc->sc_dev);
   2085 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2086 
   2087 	/*
   2088 	 * Map the device.  All devices support memory-mapped acccess,
   2089 	 * and it is really required for normal operation.
   2090 	 */
   2091 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2092 	switch (memtype) {
   2093 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2094 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2095 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2096 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2097 		break;
   2098 	default:
   2099 		memh_valid = 0;
   2100 		break;
   2101 	}
   2102 
   2103 	if (memh_valid) {
   2104 		sc->sc_st = memt;
   2105 		sc->sc_sh = memh;
   2106 		sc->sc_ss = memsize;
   2107 	} else {
   2108 		aprint_error_dev(sc->sc_dev,
   2109 		    "unable to map device registers\n");
   2110 		return;
   2111 	}
   2112 
   2113 	/*
   2114 	 * In addition, i82544 and later support I/O mapped indirect
   2115 	 * register access.  It is not desirable (nor supported in
   2116 	 * this driver) to use it for normal operation, though it is
   2117 	 * required to work around bugs in some chip versions.
   2118 	 */
   2119 	switch (sc->sc_type) {
   2120 	case WM_T_82544:
   2121 	case WM_T_82541:
   2122 	case WM_T_82541_2:
   2123 	case WM_T_82547:
   2124 	case WM_T_82547_2:
   2125 		/* First we have to find the I/O BAR. */
   2126 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2127 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2128 			if (memtype == PCI_MAPREG_TYPE_IO)
   2129 				break;
   2130 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2131 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2132 				i += 4;	/* skip high bits, too */
   2133 		}
   2134 		if (i < PCI_MAPREG_END) {
   2135 			/*
   2136 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2137 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2138 			 * It's no problem because newer chips has no this
   2139 			 * bug.
   2140 			 *
   2141 			 * The i8254x doesn't apparently respond when the
   2142 			 * I/O BAR is 0, which looks somewhat like it's not
   2143 			 * been configured.
   2144 			 */
   2145 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2146 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2147 				aprint_error_dev(sc->sc_dev,
   2148 				    "WARNING: I/O BAR at zero.\n");
   2149 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2150 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2151 			    == 0) {
   2152 				sc->sc_flags |= WM_F_IOH_VALID;
   2153 			} else
   2154 				aprint_error_dev(sc->sc_dev,
   2155 				    "WARNING: unable to map I/O space\n");
   2156 		}
   2157 		break;
   2158 	default:
   2159 		break;
   2160 	}
   2161 
   2162 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2163 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2164 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2165 	if (sc->sc_type < WM_T_82542_2_1)
   2166 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2167 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2168 
   2169 	/* Power up chip */
   2170 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2171 	    && error != EOPNOTSUPP) {
   2172 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2173 		return;
   2174 	}
   2175 
   2176 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2177 	/*
   2178 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2179 	 * resource.
   2180 	 */
   2181 	if (sc->sc_nqueues > 1) {
   2182 		max_type = PCI_INTR_TYPE_MSIX;
   2183 		/*
   2184 		 *  82583 has a MSI-X capability in the PCI configuration space
   2185 		 * but it doesn't support it. At least the document doesn't
   2186 		 * say anything about MSI-X.
   2187 		 */
   2188 		counts[PCI_INTR_TYPE_MSIX]
   2189 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2190 	} else {
   2191 		max_type = PCI_INTR_TYPE_MSI;
   2192 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2193 	}
   2194 
   2195 	/* Allocation settings */
   2196 	counts[PCI_INTR_TYPE_MSI] = 1;
   2197 	counts[PCI_INTR_TYPE_INTX] = 1;
   2198 	/* overridden by disable flags */
   2199 	if (wm_disable_msi != 0) {
   2200 		counts[PCI_INTR_TYPE_MSI] = 0;
   2201 		if (wm_disable_msix != 0) {
   2202 			max_type = PCI_INTR_TYPE_INTX;
   2203 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2204 		}
   2205 	} else if (wm_disable_msix != 0) {
   2206 		max_type = PCI_INTR_TYPE_MSI;
   2207 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2208 	}
   2209 
   2210 alloc_retry:
   2211 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2212 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2213 		return;
   2214 	}
   2215 
   2216 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2217 		error = wm_setup_msix(sc);
   2218 		if (error) {
   2219 			pci_intr_release(pc, sc->sc_intrs,
   2220 			    counts[PCI_INTR_TYPE_MSIX]);
   2221 
   2222 			/* Setup for MSI: Disable MSI-X */
   2223 			max_type = PCI_INTR_TYPE_MSI;
   2224 			counts[PCI_INTR_TYPE_MSI] = 1;
   2225 			counts[PCI_INTR_TYPE_INTX] = 1;
   2226 			goto alloc_retry;
   2227 		}
   2228 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2229 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2230 		error = wm_setup_legacy(sc);
   2231 		if (error) {
   2232 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2233 			    counts[PCI_INTR_TYPE_MSI]);
   2234 
   2235 			/* The next try is for INTx: Disable MSI */
   2236 			max_type = PCI_INTR_TYPE_INTX;
   2237 			counts[PCI_INTR_TYPE_INTX] = 1;
   2238 			goto alloc_retry;
   2239 		}
   2240 	} else {
   2241 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2242 		error = wm_setup_legacy(sc);
   2243 		if (error) {
   2244 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2245 			    counts[PCI_INTR_TYPE_INTX]);
   2246 			return;
   2247 		}
   2248 	}
   2249 
   2250 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2251 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2252 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2253 	    WQ_PERCPU | WQ_MPSAFE);
   2254 	if (error) {
   2255 		aprint_error_dev(sc->sc_dev,
   2256 		    "unable to create TxRx workqueue\n");
   2257 		goto out;
   2258 	}
   2259 
   2260 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2261 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2262 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2263 	    WQ_MPSAFE);
   2264 	if (error) {
   2265 		workqueue_destroy(sc->sc_queue_wq);
   2266 		aprint_error_dev(sc->sc_dev,
   2267 		    "unable to create reset workqueue\n");
   2268 		goto out;
   2269 	}
   2270 
   2271 	/*
   2272 	 * Check the function ID (unit number of the chip).
   2273 	 */
   2274 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2275 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2276 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2277 	    || (sc->sc_type == WM_T_82580)
   2278 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2279 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2280 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2281 	else
   2282 		sc->sc_funcid = 0;
   2283 
   2284 	/*
   2285 	 * Determine a few things about the bus we're connected to.
   2286 	 */
   2287 	if (sc->sc_type < WM_T_82543) {
   2288 		/* We don't really know the bus characteristics here. */
   2289 		sc->sc_bus_speed = 33;
   2290 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2291 		/*
   2292 		 * CSA (Communication Streaming Architecture) is about as fast
   2293 		 * a 32-bit 66MHz PCI Bus.
   2294 		 */
   2295 		sc->sc_flags |= WM_F_CSA;
   2296 		sc->sc_bus_speed = 66;
   2297 		aprint_verbose_dev(sc->sc_dev,
   2298 		    "Communication Streaming Architecture\n");
   2299 		if (sc->sc_type == WM_T_82547) {
   2300 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2301 			callout_setfunc(&sc->sc_txfifo_ch,
   2302 			    wm_82547_txfifo_stall, sc);
   2303 			aprint_verbose_dev(sc->sc_dev,
   2304 			    "using 82547 Tx FIFO stall work-around\n");
   2305 		}
   2306 	} else if (sc->sc_type >= WM_T_82571) {
   2307 		sc->sc_flags |= WM_F_PCIE;
   2308 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2309 		    && (sc->sc_type != WM_T_ICH10)
   2310 		    && (sc->sc_type != WM_T_PCH)
   2311 		    && (sc->sc_type != WM_T_PCH2)
   2312 		    && (sc->sc_type != WM_T_PCH_LPT)
   2313 		    && (sc->sc_type != WM_T_PCH_SPT)
   2314 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2315 			/* ICH* and PCH* have no PCIe capability registers */
   2316 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2317 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2318 				NULL) == 0)
   2319 				aprint_error_dev(sc->sc_dev,
   2320 				    "unable to find PCIe capability\n");
   2321 		}
   2322 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2323 	} else {
   2324 		reg = CSR_READ(sc, WMREG_STATUS);
   2325 		if (reg & STATUS_BUS64)
   2326 			sc->sc_flags |= WM_F_BUS64;
   2327 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2328 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2329 
   2330 			sc->sc_flags |= WM_F_PCIX;
   2331 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2332 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2333 				aprint_error_dev(sc->sc_dev,
   2334 				    "unable to find PCIX capability\n");
   2335 			else if (sc->sc_type != WM_T_82545_3 &&
   2336 			    sc->sc_type != WM_T_82546_3) {
   2337 				/*
   2338 				 * Work around a problem caused by the BIOS
   2339 				 * setting the max memory read byte count
   2340 				 * incorrectly.
   2341 				 */
   2342 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2343 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2344 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2345 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2346 
   2347 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2348 				    PCIX_CMD_BYTECNT_SHIFT;
   2349 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2350 				    PCIX_STATUS_MAXB_SHIFT;
   2351 				if (bytecnt > maxb) {
   2352 					aprint_verbose_dev(sc->sc_dev,
   2353 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2354 					    512 << bytecnt, 512 << maxb);
   2355 					pcix_cmd = (pcix_cmd &
   2356 					    ~PCIX_CMD_BYTECNT_MASK) |
   2357 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2358 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2359 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2360 					    pcix_cmd);
   2361 				}
   2362 			}
   2363 		}
   2364 		/*
   2365 		 * The quad port adapter is special; it has a PCIX-PCIX
   2366 		 * bridge on the board, and can run the secondary bus at
   2367 		 * a higher speed.
   2368 		 */
   2369 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2370 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2371 								      : 66;
   2372 		} else if (sc->sc_flags & WM_F_PCIX) {
   2373 			switch (reg & STATUS_PCIXSPD_MASK) {
   2374 			case STATUS_PCIXSPD_50_66:
   2375 				sc->sc_bus_speed = 66;
   2376 				break;
   2377 			case STATUS_PCIXSPD_66_100:
   2378 				sc->sc_bus_speed = 100;
   2379 				break;
   2380 			case STATUS_PCIXSPD_100_133:
   2381 				sc->sc_bus_speed = 133;
   2382 				break;
   2383 			default:
   2384 				aprint_error_dev(sc->sc_dev,
   2385 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2386 				    reg & STATUS_PCIXSPD_MASK);
   2387 				sc->sc_bus_speed = 66;
   2388 				break;
   2389 			}
   2390 		} else
   2391 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2392 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2393 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2394 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2395 	}
   2396 
   2397 	/* clear interesting stat counters */
   2398 	CSR_READ(sc, WMREG_COLC);
   2399 	CSR_READ(sc, WMREG_RXERRC);
   2400 
   2401 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2402 	    || (sc->sc_type >= WM_T_ICH8))
   2403 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2404 	if (sc->sc_type >= WM_T_ICH8)
   2405 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2406 
   2407 	/* Set PHY, NVM mutex related stuff */
   2408 	switch (sc->sc_type) {
   2409 	case WM_T_82542_2_0:
   2410 	case WM_T_82542_2_1:
   2411 	case WM_T_82543:
   2412 	case WM_T_82544:
   2413 		/* Microwire */
   2414 		sc->nvm.read = wm_nvm_read_uwire;
   2415 		sc->sc_nvm_wordsize = 64;
   2416 		sc->sc_nvm_addrbits = 6;
   2417 		break;
   2418 	case WM_T_82540:
   2419 	case WM_T_82545:
   2420 	case WM_T_82545_3:
   2421 	case WM_T_82546:
   2422 	case WM_T_82546_3:
   2423 		/* Microwire */
   2424 		sc->nvm.read = wm_nvm_read_uwire;
   2425 		reg = CSR_READ(sc, WMREG_EECD);
   2426 		if (reg & EECD_EE_SIZE) {
   2427 			sc->sc_nvm_wordsize = 256;
   2428 			sc->sc_nvm_addrbits = 8;
   2429 		} else {
   2430 			sc->sc_nvm_wordsize = 64;
   2431 			sc->sc_nvm_addrbits = 6;
   2432 		}
   2433 		sc->sc_flags |= WM_F_LOCK_EECD;
   2434 		sc->nvm.acquire = wm_get_eecd;
   2435 		sc->nvm.release = wm_put_eecd;
   2436 		break;
   2437 	case WM_T_82541:
   2438 	case WM_T_82541_2:
   2439 	case WM_T_82547:
   2440 	case WM_T_82547_2:
   2441 		reg = CSR_READ(sc, WMREG_EECD);
   2442 		/*
   2443 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2444 		 * on 8254[17], so set flags and functios before calling it.
   2445 		 */
   2446 		sc->sc_flags |= WM_F_LOCK_EECD;
   2447 		sc->nvm.acquire = wm_get_eecd;
   2448 		sc->nvm.release = wm_put_eecd;
   2449 		if (reg & EECD_EE_TYPE) {
   2450 			/* SPI */
   2451 			sc->nvm.read = wm_nvm_read_spi;
   2452 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2453 			wm_nvm_set_addrbits_size_eecd(sc);
   2454 		} else {
   2455 			/* Microwire */
   2456 			sc->nvm.read = wm_nvm_read_uwire;
   2457 			if ((reg & EECD_EE_ABITS) != 0) {
   2458 				sc->sc_nvm_wordsize = 256;
   2459 				sc->sc_nvm_addrbits = 8;
   2460 			} else {
   2461 				sc->sc_nvm_wordsize = 64;
   2462 				sc->sc_nvm_addrbits = 6;
   2463 			}
   2464 		}
   2465 		break;
   2466 	case WM_T_82571:
   2467 	case WM_T_82572:
   2468 		/* SPI */
   2469 		sc->nvm.read = wm_nvm_read_eerd;
   2470 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2471 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2472 		wm_nvm_set_addrbits_size_eecd(sc);
   2473 		sc->phy.acquire = wm_get_swsm_semaphore;
   2474 		sc->phy.release = wm_put_swsm_semaphore;
   2475 		sc->nvm.acquire = wm_get_nvm_82571;
   2476 		sc->nvm.release = wm_put_nvm_82571;
   2477 		break;
   2478 	case WM_T_82573:
   2479 	case WM_T_82574:
   2480 	case WM_T_82583:
   2481 		sc->nvm.read = wm_nvm_read_eerd;
   2482 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2483 		if (sc->sc_type == WM_T_82573) {
   2484 			sc->phy.acquire = wm_get_swsm_semaphore;
   2485 			sc->phy.release = wm_put_swsm_semaphore;
   2486 			sc->nvm.acquire = wm_get_nvm_82571;
   2487 			sc->nvm.release = wm_put_nvm_82571;
   2488 		} else {
   2489 			/* Both PHY and NVM use the same semaphore. */
   2490 			sc->phy.acquire = sc->nvm.acquire
   2491 			    = wm_get_swfwhw_semaphore;
   2492 			sc->phy.release = sc->nvm.release
   2493 			    = wm_put_swfwhw_semaphore;
   2494 		}
   2495 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2496 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2497 			sc->sc_nvm_wordsize = 2048;
   2498 		} else {
   2499 			/* SPI */
   2500 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2501 			wm_nvm_set_addrbits_size_eecd(sc);
   2502 		}
   2503 		break;
   2504 	case WM_T_82575:
   2505 	case WM_T_82576:
   2506 	case WM_T_82580:
   2507 	case WM_T_I350:
   2508 	case WM_T_I354:
   2509 	case WM_T_80003:
   2510 		/* SPI */
   2511 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2512 		wm_nvm_set_addrbits_size_eecd(sc);
   2513 		if ((sc->sc_type == WM_T_80003)
   2514 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2515 			sc->nvm.read = wm_nvm_read_eerd;
   2516 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2517 		} else {
   2518 			sc->nvm.read = wm_nvm_read_spi;
   2519 			sc->sc_flags |= WM_F_LOCK_EECD;
   2520 		}
   2521 		sc->phy.acquire = wm_get_phy_82575;
   2522 		sc->phy.release = wm_put_phy_82575;
   2523 		sc->nvm.acquire = wm_get_nvm_80003;
   2524 		sc->nvm.release = wm_put_nvm_80003;
   2525 		break;
   2526 	case WM_T_ICH8:
   2527 	case WM_T_ICH9:
   2528 	case WM_T_ICH10:
   2529 	case WM_T_PCH:
   2530 	case WM_T_PCH2:
   2531 	case WM_T_PCH_LPT:
   2532 		sc->nvm.read = wm_nvm_read_ich8;
   2533 		/* FLASH */
   2534 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2535 		sc->sc_nvm_wordsize = 2048;
   2536 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2537 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2538 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2539 			aprint_error_dev(sc->sc_dev,
   2540 			    "can't map FLASH registers\n");
   2541 			goto out;
   2542 		}
   2543 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2544 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2545 		    ICH_FLASH_SECTOR_SIZE;
   2546 		sc->sc_ich8_flash_bank_size =
   2547 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2548 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2549 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2550 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2551 		sc->sc_flashreg_offset = 0;
   2552 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2553 		sc->phy.release = wm_put_swflag_ich8lan;
   2554 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2555 		sc->nvm.release = wm_put_nvm_ich8lan;
   2556 		break;
   2557 	case WM_T_PCH_SPT:
   2558 	case WM_T_PCH_CNP:
   2559 		sc->nvm.read = wm_nvm_read_spt;
   2560 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2561 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2562 		sc->sc_flasht = sc->sc_st;
   2563 		sc->sc_flashh = sc->sc_sh;
   2564 		sc->sc_ich8_flash_base = 0;
   2565 		sc->sc_nvm_wordsize =
   2566 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2567 		    * NVM_SIZE_MULTIPLIER;
   2568 		/* It is size in bytes, we want words */
   2569 		sc->sc_nvm_wordsize /= 2;
   2570 		/* Assume 2 banks */
   2571 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2572 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2573 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2574 		sc->phy.release = wm_put_swflag_ich8lan;
   2575 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2576 		sc->nvm.release = wm_put_nvm_ich8lan;
   2577 		break;
   2578 	case WM_T_I210:
   2579 	case WM_T_I211:
   2580 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2581 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2582 		if (wm_nvm_flash_presence_i210(sc)) {
   2583 			sc->nvm.read = wm_nvm_read_eerd;
   2584 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2585 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2586 			wm_nvm_set_addrbits_size_eecd(sc);
   2587 		} else {
   2588 			sc->nvm.read = wm_nvm_read_invm;
   2589 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2590 			sc->sc_nvm_wordsize = INVM_SIZE;
   2591 		}
   2592 		sc->phy.acquire = wm_get_phy_82575;
   2593 		sc->phy.release = wm_put_phy_82575;
   2594 		sc->nvm.acquire = wm_get_nvm_80003;
   2595 		sc->nvm.release = wm_put_nvm_80003;
   2596 		break;
   2597 	default:
   2598 		break;
   2599 	}
   2600 
   2601 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2602 	switch (sc->sc_type) {
   2603 	case WM_T_82571:
   2604 	case WM_T_82572:
   2605 		reg = CSR_READ(sc, WMREG_SWSM2);
   2606 		if ((reg & SWSM2_LOCK) == 0) {
   2607 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2608 			force_clear_smbi = true;
   2609 		} else
   2610 			force_clear_smbi = false;
   2611 		break;
   2612 	case WM_T_82573:
   2613 	case WM_T_82574:
   2614 	case WM_T_82583:
   2615 		force_clear_smbi = true;
   2616 		break;
   2617 	default:
   2618 		force_clear_smbi = false;
   2619 		break;
   2620 	}
   2621 	if (force_clear_smbi) {
   2622 		reg = CSR_READ(sc, WMREG_SWSM);
   2623 		if ((reg & SWSM_SMBI) != 0)
   2624 			aprint_error_dev(sc->sc_dev,
   2625 			    "Please update the Bootagent\n");
   2626 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2627 	}
   2628 
   2629 	/*
   2630 	 * Defer printing the EEPROM type until after verifying the checksum
   2631 	 * This allows the EEPROM type to be printed correctly in the case
   2632 	 * that no EEPROM is attached.
   2633 	 */
   2634 	/*
   2635 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2636 	 * this for later, so we can fail future reads from the EEPROM.
   2637 	 */
   2638 	if (wm_nvm_validate_checksum(sc)) {
   2639 		/*
   2640 		 * Read twice again because some PCI-e parts fail the
   2641 		 * first check due to the link being in sleep state.
   2642 		 */
   2643 		if (wm_nvm_validate_checksum(sc))
   2644 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2645 	}
   2646 
   2647 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2648 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2649 	else {
   2650 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2651 		    sc->sc_nvm_wordsize);
   2652 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2653 			aprint_verbose("iNVM");
   2654 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2655 			aprint_verbose("FLASH(HW)");
   2656 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2657 			aprint_verbose("FLASH");
   2658 		else {
   2659 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2660 				eetype = "SPI";
   2661 			else
   2662 				eetype = "MicroWire";
   2663 			aprint_verbose("(%d address bits) %s EEPROM",
   2664 			    sc->sc_nvm_addrbits, eetype);
   2665 		}
   2666 	}
   2667 	wm_nvm_version(sc);
   2668 	aprint_verbose("\n");
   2669 
   2670 	/*
   2671 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2672 	 * incorrect.
   2673 	 */
   2674 	wm_gmii_setup_phytype(sc, 0, 0);
   2675 
   2676 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2677 	switch (sc->sc_type) {
   2678 	case WM_T_ICH8:
   2679 	case WM_T_ICH9:
   2680 	case WM_T_ICH10:
   2681 	case WM_T_PCH:
   2682 	case WM_T_PCH2:
   2683 	case WM_T_PCH_LPT:
   2684 	case WM_T_PCH_SPT:
   2685 	case WM_T_PCH_CNP:
   2686 		apme_mask = WUC_APME;
   2687 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2688 		if ((eeprom_data & apme_mask) != 0)
   2689 			sc->sc_flags |= WM_F_WOL;
   2690 		break;
   2691 	default:
   2692 		break;
   2693 	}
   2694 
   2695 	/* Reset the chip to a known state. */
   2696 	wm_reset(sc);
   2697 
   2698 	/*
   2699 	 * Check for I21[01] PLL workaround.
   2700 	 *
   2701 	 * Three cases:
   2702 	 * a) Chip is I211.
   2703 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2704 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2705 	 */
   2706 	if (sc->sc_type == WM_T_I211)
   2707 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2708 	if (sc->sc_type == WM_T_I210) {
   2709 		if (!wm_nvm_flash_presence_i210(sc))
   2710 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2711 		else if ((sc->sc_nvm_ver_major < 3)
   2712 		    || ((sc->sc_nvm_ver_major == 3)
   2713 			&& (sc->sc_nvm_ver_minor < 25))) {
   2714 			aprint_verbose_dev(sc->sc_dev,
   2715 			    "ROM image version %d.%d is older than 3.25\n",
   2716 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2717 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2718 		}
   2719 	}
   2720 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2721 		wm_pll_workaround_i210(sc);
   2722 
   2723 	wm_get_wakeup(sc);
   2724 
   2725 	/* Non-AMT based hardware can now take control from firmware */
   2726 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2727 		wm_get_hw_control(sc);
   2728 
   2729 	/*
   2730 	 * Read the Ethernet address from the EEPROM, if not first found
   2731 	 * in device properties.
   2732 	 */
   2733 	ea = prop_dictionary_get(dict, "mac-address");
   2734 	if (ea != NULL) {
   2735 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2736 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2737 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2738 	} else {
   2739 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2740 			aprint_error_dev(sc->sc_dev,
   2741 			    "unable to read Ethernet address\n");
   2742 			goto out;
   2743 		}
   2744 	}
   2745 
   2746 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2747 	    ether_sprintf(enaddr));
   2748 
   2749 	/*
   2750 	 * Read the config info from the EEPROM, and set up various
   2751 	 * bits in the control registers based on their contents.
   2752 	 */
   2753 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2754 	if (pn != NULL) {
   2755 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2756 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2757 	} else {
   2758 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2759 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2760 			goto out;
   2761 		}
   2762 	}
   2763 
   2764 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2765 	if (pn != NULL) {
   2766 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2767 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2768 	} else {
   2769 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2770 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2771 			goto out;
   2772 		}
   2773 	}
   2774 
   2775 	/* check for WM_F_WOL */
   2776 	switch (sc->sc_type) {
   2777 	case WM_T_82542_2_0:
   2778 	case WM_T_82542_2_1:
   2779 	case WM_T_82543:
   2780 		/* dummy? */
   2781 		eeprom_data = 0;
   2782 		apme_mask = NVM_CFG3_APME;
   2783 		break;
   2784 	case WM_T_82544:
   2785 		apme_mask = NVM_CFG2_82544_APM_EN;
   2786 		eeprom_data = cfg2;
   2787 		break;
   2788 	case WM_T_82546:
   2789 	case WM_T_82546_3:
   2790 	case WM_T_82571:
   2791 	case WM_T_82572:
   2792 	case WM_T_82573:
   2793 	case WM_T_82574:
   2794 	case WM_T_82583:
   2795 	case WM_T_80003:
   2796 	case WM_T_82575:
   2797 	case WM_T_82576:
   2798 		apme_mask = NVM_CFG3_APME;
   2799 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2800 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2801 		break;
   2802 	case WM_T_82580:
   2803 	case WM_T_I350:
   2804 	case WM_T_I354:
   2805 	case WM_T_I210:
   2806 	case WM_T_I211:
   2807 		apme_mask = NVM_CFG3_APME;
   2808 		wm_nvm_read(sc,
   2809 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2810 		    1, &eeprom_data);
   2811 		break;
   2812 	case WM_T_ICH8:
   2813 	case WM_T_ICH9:
   2814 	case WM_T_ICH10:
   2815 	case WM_T_PCH:
   2816 	case WM_T_PCH2:
   2817 	case WM_T_PCH_LPT:
   2818 	case WM_T_PCH_SPT:
   2819 	case WM_T_PCH_CNP:
   2820 		/* Already checked before wm_reset () */
   2821 		apme_mask = eeprom_data = 0;
   2822 		break;
   2823 	default: /* XXX 82540 */
   2824 		apme_mask = NVM_CFG3_APME;
   2825 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2826 		break;
   2827 	}
   2828 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2829 	if ((eeprom_data & apme_mask) != 0)
   2830 		sc->sc_flags |= WM_F_WOL;
   2831 
   2832 	/*
   2833 	 * We have the eeprom settings, now apply the special cases
   2834 	 * where the eeprom may be wrong or the board won't support
   2835 	 * wake on lan on a particular port
   2836 	 */
   2837 	switch (sc->sc_pcidevid) {
   2838 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2839 		sc->sc_flags &= ~WM_F_WOL;
   2840 		break;
   2841 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2842 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2843 		/* Wake events only supported on port A for dual fiber
   2844 		 * regardless of eeprom setting */
   2845 		if (sc->sc_funcid == 1)
   2846 			sc->sc_flags &= ~WM_F_WOL;
   2847 		break;
   2848 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2849 		/* If quad port adapter, disable WoL on all but port A */
   2850 		if (sc->sc_funcid != 0)
   2851 			sc->sc_flags &= ~WM_F_WOL;
   2852 		break;
   2853 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2854 		/* Wake events only supported on port A for dual fiber
   2855 		 * regardless of eeprom setting */
   2856 		if (sc->sc_funcid == 1)
   2857 			sc->sc_flags &= ~WM_F_WOL;
   2858 		break;
   2859 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2860 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2861 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2862 		/* If quad port adapter, disable WoL on all but port A */
   2863 		if (sc->sc_funcid != 0)
   2864 			sc->sc_flags &= ~WM_F_WOL;
   2865 		break;
   2866 	}
   2867 
   2868 	if (sc->sc_type >= WM_T_82575) {
   2869 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2870 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2871 			    nvmword);
   2872 			if ((sc->sc_type == WM_T_82575) ||
   2873 			    (sc->sc_type == WM_T_82576)) {
   2874 				/* Check NVM for autonegotiation */
   2875 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2876 				    != 0)
   2877 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2878 			}
   2879 			if ((sc->sc_type == WM_T_82575) ||
   2880 			    (sc->sc_type == WM_T_I350)) {
   2881 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2882 					sc->sc_flags |= WM_F_MAS;
   2883 			}
   2884 		}
   2885 	}
   2886 
   2887 	/*
   2888 	 * XXX need special handling for some multiple port cards
   2889 	 * to disable a paticular port.
   2890 	 */
   2891 
   2892 	if (sc->sc_type >= WM_T_82544) {
   2893 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2894 		if (pn != NULL) {
   2895 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2896 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2897 		} else {
   2898 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2899 				aprint_error_dev(sc->sc_dev,
   2900 				    "unable to read SWDPIN\n");
   2901 				goto out;
   2902 			}
   2903 		}
   2904 	}
   2905 
   2906 	if (cfg1 & NVM_CFG1_ILOS)
   2907 		sc->sc_ctrl |= CTRL_ILOS;
   2908 
   2909 	/*
   2910 	 * XXX
   2911 	 * This code isn't correct because pin 2 and 3 are located
   2912 	 * in different position on newer chips. Check all datasheet.
   2913 	 *
   2914 	 * Until resolve this problem, check if a chip < 82580
   2915 	 */
   2916 	if (sc->sc_type <= WM_T_82580) {
   2917 		if (sc->sc_type >= WM_T_82544) {
   2918 			sc->sc_ctrl |=
   2919 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2920 			    CTRL_SWDPIO_SHIFT;
   2921 			sc->sc_ctrl |=
   2922 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2923 			    CTRL_SWDPINS_SHIFT;
   2924 		} else {
   2925 			sc->sc_ctrl |=
   2926 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2927 			    CTRL_SWDPIO_SHIFT;
   2928 		}
   2929 	}
   2930 
   2931 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2932 		wm_nvm_read(sc,
   2933 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2934 		    1, &nvmword);
   2935 		if (nvmword & NVM_CFG3_ILOS)
   2936 			sc->sc_ctrl |= CTRL_ILOS;
   2937 	}
   2938 
   2939 #if 0
   2940 	if (sc->sc_type >= WM_T_82544) {
   2941 		if (cfg1 & NVM_CFG1_IPS0)
   2942 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2943 		if (cfg1 & NVM_CFG1_IPS1)
   2944 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2945 		sc->sc_ctrl_ext |=
   2946 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2947 		    CTRL_EXT_SWDPIO_SHIFT;
   2948 		sc->sc_ctrl_ext |=
   2949 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2950 		    CTRL_EXT_SWDPINS_SHIFT;
   2951 	} else {
   2952 		sc->sc_ctrl_ext |=
   2953 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2954 		    CTRL_EXT_SWDPIO_SHIFT;
   2955 	}
   2956 #endif
   2957 
   2958 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2959 #if 0
   2960 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2961 #endif
   2962 
   2963 	if (sc->sc_type == WM_T_PCH) {
   2964 		uint16_t val;
   2965 
   2966 		/* Save the NVM K1 bit setting */
   2967 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2968 
   2969 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2970 			sc->sc_nvm_k1_enabled = 1;
   2971 		else
   2972 			sc->sc_nvm_k1_enabled = 0;
   2973 	}
   2974 
   2975 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2976 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2977 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2978 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2979 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2980 	    || sc->sc_type == WM_T_82573
   2981 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2982 		/* Copper only */
   2983 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2984 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2985 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2986 	    || (sc->sc_type ==WM_T_I211)) {
   2987 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2988 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2989 		switch (link_mode) {
   2990 		case CTRL_EXT_LINK_MODE_1000KX:
   2991 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2992 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2993 			break;
   2994 		case CTRL_EXT_LINK_MODE_SGMII:
   2995 			if (wm_sgmii_uses_mdio(sc)) {
   2996 				aprint_normal_dev(sc->sc_dev,
   2997 				    "SGMII(MDIO)\n");
   2998 				sc->sc_flags |= WM_F_SGMII;
   2999 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3000 				break;
   3001 			}
   3002 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   3003 			/*FALLTHROUGH*/
   3004 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   3005 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   3006 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   3007 				if (link_mode
   3008 				    == CTRL_EXT_LINK_MODE_SGMII) {
   3009 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3010 					sc->sc_flags |= WM_F_SGMII;
   3011 					aprint_verbose_dev(sc->sc_dev,
   3012 					    "SGMII\n");
   3013 				} else {
   3014 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   3015 					aprint_verbose_dev(sc->sc_dev,
   3016 					    "SERDES\n");
   3017 				}
   3018 				break;
   3019 			}
   3020 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   3021 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   3022 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3023 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   3024 				sc->sc_flags |= WM_F_SGMII;
   3025 			}
   3026 			/* Do not change link mode for 100BaseFX */
   3027 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   3028 				break;
   3029 
   3030 			/* Change current link mode setting */
   3031 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   3032 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3033 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   3034 			else
   3035 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   3036 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3037 			break;
   3038 		case CTRL_EXT_LINK_MODE_GMII:
   3039 		default:
   3040 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   3041 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3042 			break;
   3043 		}
   3044 
   3045 		reg &= ~CTRL_EXT_I2C_ENA;
   3046 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3047 			reg |= CTRL_EXT_I2C_ENA;
   3048 		else
   3049 			reg &= ~CTRL_EXT_I2C_ENA;
   3050 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3051 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3052 			if (!wm_sgmii_uses_mdio(sc))
   3053 				wm_gmii_setup_phytype(sc, 0, 0);
   3054 			wm_reset_mdicnfg_82580(sc);
   3055 		}
   3056 	} else if (sc->sc_type < WM_T_82543 ||
   3057 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3058 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3059 			aprint_error_dev(sc->sc_dev,
   3060 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3061 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3062 		}
   3063 	} else {
   3064 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3065 			aprint_error_dev(sc->sc_dev,
   3066 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3067 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3068 		}
   3069 	}
   3070 
   3071 	if (sc->sc_type >= WM_T_PCH2)
   3072 		sc->sc_flags |= WM_F_EEE;
   3073 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3074 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3075 		/* XXX: Need special handling for I354. (not yet) */
   3076 		if (sc->sc_type != WM_T_I354)
   3077 			sc->sc_flags |= WM_F_EEE;
   3078 	}
   3079 
   3080 	/*
   3081 	 * The I350 has a bug where it always strips the CRC whether
   3082 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3083 	 */
   3084 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3085 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3086 		sc->sc_flags |= WM_F_CRC_STRIP;
   3087 
   3088 	/*
   3089 	 * Workaround for some chips to delay sending LINK_STATE_UP.
   3090 	 * Some systems can't send packet soon after linkup. See also
   3091 	 * wm_linkintr_gmii(), wm_tick() and wm_gmii_mediastatus().
   3092 	 */
   3093 	switch (sc->sc_type) {
   3094 	case WM_T_I350:
   3095 	case WM_T_I354:
   3096 	case WM_T_I210:
   3097 	case WM_T_I211:
   3098 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3099 			sc->sc_flags |= WM_F_DELAY_LINKUP;
   3100 		break;
   3101 	default:
   3102 		break;
   3103 	}
   3104 
   3105 	/* Set device properties (macflags) */
   3106 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3107 
   3108 	if (sc->sc_flags != 0) {
   3109 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3110 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3111 	}
   3112 
   3113 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3114 
   3115 	/* Initialize the media structures accordingly. */
   3116 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3117 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3118 	else
   3119 		wm_tbi_mediainit(sc); /* All others */
   3120 
   3121 	ifp = &sc->sc_ethercom.ec_if;
   3122 	xname = device_xname(sc->sc_dev);
   3123 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3124 	ifp->if_softc = sc;
   3125 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3126 	ifp->if_extflags = IFEF_MPSAFE;
   3127 	ifp->if_ioctl = wm_ioctl;
   3128 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3129 		ifp->if_start = wm_nq_start;
   3130 		/*
   3131 		 * When the number of CPUs is one and the controller can use
   3132 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3133 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3134 		 * and the other is used for link status changing.
   3135 		 * In this situation, wm_nq_transmit() is disadvantageous
   3136 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3137 		 */
   3138 		if (wm_is_using_multiqueue(sc))
   3139 			ifp->if_transmit = wm_nq_transmit;
   3140 	} else {
   3141 		ifp->if_start = wm_start;
   3142 		/*
   3143 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3144 		 * described above.
   3145 		 */
   3146 		if (wm_is_using_multiqueue(sc))
   3147 			ifp->if_transmit = wm_transmit;
   3148 	}
   3149 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3150 	ifp->if_init = wm_init;
   3151 	ifp->if_stop = wm_stop;
   3152 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3153 	IFQ_SET_READY(&ifp->if_snd);
   3154 
   3155 	/* Check for jumbo frame */
   3156 	switch (sc->sc_type) {
   3157 	case WM_T_82573:
   3158 		/* XXX limited to 9234 if ASPM is disabled */
   3159 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3160 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3161 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3162 		break;
   3163 	case WM_T_82571:
   3164 	case WM_T_82572:
   3165 	case WM_T_82574:
   3166 	case WM_T_82583:
   3167 	case WM_T_82575:
   3168 	case WM_T_82576:
   3169 	case WM_T_82580:
   3170 	case WM_T_I350:
   3171 	case WM_T_I354:
   3172 	case WM_T_I210:
   3173 	case WM_T_I211:
   3174 	case WM_T_80003:
   3175 	case WM_T_ICH9:
   3176 	case WM_T_ICH10:
   3177 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3178 	case WM_T_PCH_LPT:
   3179 	case WM_T_PCH_SPT:
   3180 	case WM_T_PCH_CNP:
   3181 		/* XXX limited to 9234 */
   3182 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3183 		break;
   3184 	case WM_T_PCH:
   3185 		/* XXX limited to 4096 */
   3186 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3187 		break;
   3188 	case WM_T_82542_2_0:
   3189 	case WM_T_82542_2_1:
   3190 	case WM_T_ICH8:
   3191 		/* No support for jumbo frame */
   3192 		break;
   3193 	default:
   3194 		/* ETHER_MAX_LEN_JUMBO */
   3195 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3196 		break;
   3197 	}
   3198 
   3199 	/* If we're a i82543 or greater, we can support VLANs. */
   3200 	if (sc->sc_type >= WM_T_82543) {
   3201 		sc->sc_ethercom.ec_capabilities |=
   3202 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3203 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3204 	}
   3205 
   3206 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3207 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3208 
   3209 	/*
   3210 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3211 	 * on i82543 and later.
   3212 	 */
   3213 	if (sc->sc_type >= WM_T_82543) {
   3214 		ifp->if_capabilities |=
   3215 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3216 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3217 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3218 		    IFCAP_CSUM_TCPv6_Tx |
   3219 		    IFCAP_CSUM_UDPv6_Tx;
   3220 	}
   3221 
   3222 	/*
   3223 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3224 	 *
   3225 	 *	82541GI (8086:1076) ... no
   3226 	 *	82572EI (8086:10b9) ... yes
   3227 	 */
   3228 	if (sc->sc_type >= WM_T_82571) {
   3229 		ifp->if_capabilities |=
   3230 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3231 	}
   3232 
   3233 	/*
   3234 	 * If we're a i82544 or greater (except i82547), we can do
   3235 	 * TCP segmentation offload.
   3236 	 */
   3237 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3238 		ifp->if_capabilities |= IFCAP_TSOv4;
   3239 
   3240 	if (sc->sc_type >= WM_T_82571)
   3241 		ifp->if_capabilities |= IFCAP_TSOv6;
   3242 
   3243 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3244 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3245 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3246 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3247 
   3248 	/* Attach the interface. */
   3249 	if_initialize(ifp);
   3250 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3251 	ether_ifattach(ifp, enaddr);
   3252 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3253 	if_register(ifp);
   3254 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3255 	    RND_FLAG_DEFAULT);
   3256 
   3257 #ifdef WM_EVENT_COUNTERS
   3258 	/* Attach event counters. */
   3259 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3260 	    NULL, xname, "linkintr");
   3261 
   3262 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3263 	    NULL, xname, "CRC Error");
   3264 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3265 	    NULL, xname, "Symbol Error");
   3266 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3267 	    NULL, xname, "Missed Packets");
   3268 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3269 	    NULL, xname, "Collision");
   3270 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3271 	    NULL, xname, "Sequence Error");
   3272 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3273 	    NULL, xname, "Receive Length Error");
   3274 
   3275 	if (sc->sc_type >= WM_T_82543) {
   3276 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3277 		    NULL, xname, "Alignment Error");
   3278 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3279 		    NULL, xname, "Receive Error");
   3280 		/* XXX Does 82575 have HTDPMC? */
   3281 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3282 			evcnt_attach_dynamic(&sc->sc_ev_cexterr,
   3283 			    EVCNT_TYPE_MISC, NULL, xname,
   3284 			    "Carrier Extension Error");
   3285 		else
   3286 			evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
   3287 			    EVCNT_TYPE_MISC, NULL, xname,
   3288 			    "Host Transmit Discarded Packets by MAC");
   3289 
   3290 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3291 		    NULL, xname, "Tx with No CRS");
   3292 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3293 		    NULL, xname, "TCP Segmentation Context Tx");
   3294 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3295 			evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
   3296 			    EVCNT_TYPE_MISC, NULL, xname,
   3297 			    "TCP Segmentation Context Tx Fail");
   3298 		else {
   3299 			/* XXX Is the circuit breaker only for 82576? */
   3300 			evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
   3301 			    EVCNT_TYPE_MISC, NULL, xname,
   3302 			    "Circuit Breaker Rx Dropped Packet");
   3303 			evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
   3304 			    EVCNT_TYPE_MISC, NULL, xname,
   3305 			    "Circuit Breaker Rx Manageability Packet");
   3306 		}
   3307 	}
   3308 
   3309 	if (sc->sc_type >= WM_T_82542_2_1) {
   3310 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3311 		    NULL, xname, "tx_xoff");
   3312 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3313 		    NULL, xname, "tx_xon");
   3314 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3315 		    NULL, xname, "rx_xoff");
   3316 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3317 		    NULL, xname, "rx_xon");
   3318 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3319 		    NULL, xname, "rx_macctl");
   3320 	}
   3321 
   3322 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3323 	    NULL, xname, "Single Collision");
   3324 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3325 	    NULL, xname, "Excessive Collisions");
   3326 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3327 	    NULL, xname, "Multiple Collision");
   3328 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3329 	    NULL, xname, "Late Collisions");
   3330 
   3331 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3332 		evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
   3333 		    NULL, xname, "Circuit Breaker Tx Manageability Packet");
   3334 
   3335 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3336 	    NULL, xname, "Defer");
   3337 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3338 	    NULL, xname, "Packets Rx (64 bytes)");
   3339 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3340 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3341 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3342 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3343 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3344 	    NULL, xname, "Packets Rx (256-511 bytes)");
   3345 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3346 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3347 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3348 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3349 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3350 	    NULL, xname, "Good Packets Rx");
   3351 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3352 	    NULL, xname, "Broadcast Packets Rx");
   3353 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3354 	    NULL, xname, "Multicast Packets Rx");
   3355 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3356 	    NULL, xname, "Good Packets Tx");
   3357 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3358 	    NULL, xname, "Good Octets Rx");
   3359 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3360 	    NULL, xname, "Good Octets Tx");
   3361 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3362 	    NULL, xname, "Rx No Buffers");
   3363 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3364 	    NULL, xname, "Rx Undersize");
   3365 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3366 	    NULL, xname, "Rx Fragment");
   3367 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3368 	    NULL, xname, "Rx Oversize");
   3369 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3370 	    NULL, xname, "Rx Jabber");
   3371 	if (sc->sc_type >= WM_T_82540) {
   3372 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3373 		    NULL, xname, "Management Packets RX");
   3374 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3375 		    NULL, xname, "Management Packets Dropped");
   3376 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3377 		    NULL, xname, "Management Packets TX");
   3378 	}
   3379 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3380 	    NULL, xname, "Total Octets Rx");
   3381 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3382 	    NULL, xname, "Total Octets Tx");
   3383 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3384 	    NULL, xname, "Total Packets Rx");
   3385 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3386 	    NULL, xname, "Total Packets Tx");
   3387 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3388 	    NULL, xname, "Packets Tx (64 bytes)");
   3389 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3390 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3391 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3392 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3393 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3394 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3395 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3396 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3397 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3398 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3399 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3400 	    NULL, xname, "Multicast Packets Tx");
   3401 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3402 	    NULL, xname, "Broadcast Packets Tx");
   3403 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3404 	    NULL, xname, "Interrupt Assertion");
   3405 	if (sc->sc_type < WM_T_82575) {
   3406 		evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3407 		    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3408 		evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3409 		    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3410 		evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3411 		    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3412 		evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC,
   3413 		    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3414 		evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3415 		    NULL, xname, "Intr. Cause Tx Queue Empty");
   3416 		evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3417 		    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3418 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3419 		    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3420 
   3421 		/* XXX 82575 document says it has ICRXOC. Is that right? */
   3422 		evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3423 		    NULL, xname, "Interrupt Cause Receiver Overrun");
   3424 	} else if (!WM_IS_ICHPCH(sc)) {
   3425 		/*
   3426 		 * For 82575 and newer.
   3427 		 *
   3428 		 * On 80003, ICHs and PCHs, it seems all of the following
   3429 		 * registers are zero.
   3430 		 */
   3431 		evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
   3432 		    NULL, xname, "Rx Packets To Host");
   3433 		evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
   3434 		    NULL, xname, "Debug Counter 1");
   3435 		evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
   3436 		    NULL, xname, "Debug Counter 2");
   3437 		evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
   3438 		    NULL, xname, "Debug Counter 3");
   3439 
   3440 		/*
   3441 		 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
   3442 		 * I think it's wrong. The real count I observed is the same
   3443 		 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
   3444 		 * It's HGPTC(Host Good Packets Tx) which is described in
   3445 		 * 82576's datasheet.
   3446 		 */
   3447 		evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
   3448 		    NULL, xname, "Host Good Packets TX");
   3449 
   3450 		evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
   3451 		    NULL, xname, "Debug Counter 4");
   3452 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3453 		    NULL, xname, "Rx Desc Min Thresh");
   3454 		/* XXX Is the circuit breaker only for 82576? */
   3455 		evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
   3456 		    NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
   3457 
   3458 		evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
   3459 		    NULL, xname, "Host Good Octets Rx");
   3460 		evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
   3461 		    NULL, xname, "Host Good Octets Tx");
   3462 		evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
   3463 		    NULL, xname, "Length Errors");
   3464 	}
   3465 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3466 		evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
   3467 		    NULL, xname, "EEE Tx LPI");
   3468 		evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
   3469 		    NULL, xname, "EEE Rx LPI");
   3470 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3471 		    NULL, xname, "BMC2OS Packets received by host");
   3472 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3473 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3474 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3475 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3476 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3477 		    NULL, xname, "OS2BMC Packets received by BMC");
   3478 		evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
   3479 		    NULL, xname, "SerDes/SGMII Code Violation Packet");
   3480 		evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
   3481 		    NULL, xname, "Header Redirection Missed Packet");
   3482 	}
   3483 #endif /* WM_EVENT_COUNTERS */
   3484 
   3485 	sc->sc_txrx_use_workqueue = false;
   3486 
   3487 	if (wm_phy_need_linkdown_discard(sc)) {
   3488 		DPRINTF(sc, WM_DEBUG_LINK,
   3489 		    ("%s: %s: Set linkdown discard flag\n",
   3490 			device_xname(sc->sc_dev), __func__));
   3491 		wm_set_linkdown_discard(sc);
   3492 	}
   3493 
   3494 	wm_init_sysctls(sc);
   3495 
   3496 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3497 		pmf_class_network_register(self, ifp);
   3498 	else
   3499 		aprint_error_dev(self, "couldn't establish power handler\n");
   3500 
   3501 	sc->sc_flags |= WM_F_ATTACHED;
   3502 out:
   3503 	return;
   3504 }
   3505 
   3506 /* The detach function (ca_detach) */
   3507 static int
   3508 wm_detach(device_t self, int flags __unused)
   3509 {
   3510 	struct wm_softc *sc = device_private(self);
   3511 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3512 	int i;
   3513 
   3514 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3515 		return 0;
   3516 
   3517 	/* Stop the interface. Callouts are stopped in it. */
   3518 	IFNET_LOCK(ifp);
   3519 	sc->sc_dying = true;
   3520 	wm_stop(ifp, 1);
   3521 	IFNET_UNLOCK(ifp);
   3522 
   3523 	pmf_device_deregister(self);
   3524 
   3525 	sysctl_teardown(&sc->sc_sysctllog);
   3526 
   3527 #ifdef WM_EVENT_COUNTERS
   3528 	evcnt_detach(&sc->sc_ev_linkintr);
   3529 
   3530 	evcnt_detach(&sc->sc_ev_crcerrs);
   3531 	evcnt_detach(&sc->sc_ev_symerrc);
   3532 	evcnt_detach(&sc->sc_ev_mpc);
   3533 	evcnt_detach(&sc->sc_ev_colc);
   3534 	evcnt_detach(&sc->sc_ev_sec);
   3535 	evcnt_detach(&sc->sc_ev_rlec);
   3536 
   3537 	if (sc->sc_type >= WM_T_82543) {
   3538 		evcnt_detach(&sc->sc_ev_algnerrc);
   3539 		evcnt_detach(&sc->sc_ev_rxerrc);
   3540 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3541 			evcnt_detach(&sc->sc_ev_cexterr);
   3542 		else
   3543 			evcnt_detach(&sc->sc_ev_htdpmc);
   3544 
   3545 		evcnt_detach(&sc->sc_ev_tncrs);
   3546 		evcnt_detach(&sc->sc_ev_tsctc);
   3547 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3548 			evcnt_detach(&sc->sc_ev_tsctfc);
   3549 		else {
   3550 			evcnt_detach(&sc->sc_ev_cbrdpc);
   3551 			evcnt_detach(&sc->sc_ev_cbrmpc);
   3552 		}
   3553 	}
   3554 
   3555 	if (sc->sc_type >= WM_T_82542_2_1) {
   3556 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3557 		evcnt_detach(&sc->sc_ev_tx_xon);
   3558 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3559 		evcnt_detach(&sc->sc_ev_rx_xon);
   3560 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3561 	}
   3562 
   3563 	evcnt_detach(&sc->sc_ev_scc);
   3564 	evcnt_detach(&sc->sc_ev_ecol);
   3565 	evcnt_detach(&sc->sc_ev_mcc);
   3566 	evcnt_detach(&sc->sc_ev_latecol);
   3567 
   3568 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3569 		evcnt_detach(&sc->sc_ev_cbtmpc);
   3570 
   3571 	evcnt_detach(&sc->sc_ev_dc);
   3572 	evcnt_detach(&sc->sc_ev_prc64);
   3573 	evcnt_detach(&sc->sc_ev_prc127);
   3574 	evcnt_detach(&sc->sc_ev_prc255);
   3575 	evcnt_detach(&sc->sc_ev_prc511);
   3576 	evcnt_detach(&sc->sc_ev_prc1023);
   3577 	evcnt_detach(&sc->sc_ev_prc1522);
   3578 	evcnt_detach(&sc->sc_ev_gprc);
   3579 	evcnt_detach(&sc->sc_ev_bprc);
   3580 	evcnt_detach(&sc->sc_ev_mprc);
   3581 	evcnt_detach(&sc->sc_ev_gptc);
   3582 	evcnt_detach(&sc->sc_ev_gorc);
   3583 	evcnt_detach(&sc->sc_ev_gotc);
   3584 	evcnt_detach(&sc->sc_ev_rnbc);
   3585 	evcnt_detach(&sc->sc_ev_ruc);
   3586 	evcnt_detach(&sc->sc_ev_rfc);
   3587 	evcnt_detach(&sc->sc_ev_roc);
   3588 	evcnt_detach(&sc->sc_ev_rjc);
   3589 	if (sc->sc_type >= WM_T_82540) {
   3590 		evcnt_detach(&sc->sc_ev_mgtprc);
   3591 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3592 		evcnt_detach(&sc->sc_ev_mgtptc);
   3593 	}
   3594 	evcnt_detach(&sc->sc_ev_tor);
   3595 	evcnt_detach(&sc->sc_ev_tot);
   3596 	evcnt_detach(&sc->sc_ev_tpr);
   3597 	evcnt_detach(&sc->sc_ev_tpt);
   3598 	evcnt_detach(&sc->sc_ev_ptc64);
   3599 	evcnt_detach(&sc->sc_ev_ptc127);
   3600 	evcnt_detach(&sc->sc_ev_ptc255);
   3601 	evcnt_detach(&sc->sc_ev_ptc511);
   3602 	evcnt_detach(&sc->sc_ev_ptc1023);
   3603 	evcnt_detach(&sc->sc_ev_ptc1522);
   3604 	evcnt_detach(&sc->sc_ev_mptc);
   3605 	evcnt_detach(&sc->sc_ev_bptc);
   3606 	evcnt_detach(&sc->sc_ev_iac);
   3607 	if (sc->sc_type < WM_T_82575) {
   3608 		evcnt_detach(&sc->sc_ev_icrxptc);
   3609 		evcnt_detach(&sc->sc_ev_icrxatc);
   3610 		evcnt_detach(&sc->sc_ev_ictxptc);
   3611 		evcnt_detach(&sc->sc_ev_ictxatc);
   3612 		evcnt_detach(&sc->sc_ev_ictxqec);
   3613 		evcnt_detach(&sc->sc_ev_ictxqmtc);
   3614 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3615 		evcnt_detach(&sc->sc_ev_icrxoc);
   3616 	} else if (!WM_IS_ICHPCH(sc)) {
   3617 		evcnt_detach(&sc->sc_ev_rpthc);
   3618 		evcnt_detach(&sc->sc_ev_debug1);
   3619 		evcnt_detach(&sc->sc_ev_debug2);
   3620 		evcnt_detach(&sc->sc_ev_debug3);
   3621 		evcnt_detach(&sc->sc_ev_hgptc);
   3622 		evcnt_detach(&sc->sc_ev_debug4);
   3623 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3624 		evcnt_detach(&sc->sc_ev_htcbdpc);
   3625 
   3626 		evcnt_detach(&sc->sc_ev_hgorc);
   3627 		evcnt_detach(&sc->sc_ev_hgotc);
   3628 		evcnt_detach(&sc->sc_ev_lenerrs);
   3629 	}
   3630 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3631 		evcnt_detach(&sc->sc_ev_tlpic);
   3632 		evcnt_detach(&sc->sc_ev_rlpic);
   3633 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3634 		evcnt_detach(&sc->sc_ev_o2bspc);
   3635 		evcnt_detach(&sc->sc_ev_b2ospc);
   3636 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3637 		evcnt_detach(&sc->sc_ev_scvpc);
   3638 		evcnt_detach(&sc->sc_ev_hrmpc);
   3639 	}
   3640 #endif /* WM_EVENT_COUNTERS */
   3641 
   3642 	rnd_detach_source(&sc->rnd_source);
   3643 
   3644 	/* Tell the firmware about the release */
   3645 	mutex_enter(sc->sc_core_lock);
   3646 	wm_release_manageability(sc);
   3647 	wm_release_hw_control(sc);
   3648 	wm_enable_wakeup(sc);
   3649 	mutex_exit(sc->sc_core_lock);
   3650 
   3651 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3652 
   3653 	ether_ifdetach(ifp);
   3654 	if_detach(ifp);
   3655 	if_percpuq_destroy(sc->sc_ipq);
   3656 
   3657 	/* Delete all remaining media. */
   3658 	ifmedia_fini(&sc->sc_mii.mii_media);
   3659 
   3660 	/* Unload RX dmamaps and free mbufs */
   3661 	for (i = 0; i < sc->sc_nqueues; i++) {
   3662 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3663 		mutex_enter(rxq->rxq_lock);
   3664 		wm_rxdrain(rxq);
   3665 		mutex_exit(rxq->rxq_lock);
   3666 	}
   3667 	/* Must unlock here */
   3668 
   3669 	/* Disestablish the interrupt handler */
   3670 	for (i = 0; i < sc->sc_nintrs; i++) {
   3671 		if (sc->sc_ihs[i] != NULL) {
   3672 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3673 			sc->sc_ihs[i] = NULL;
   3674 		}
   3675 	}
   3676 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3677 
   3678 	/* wm_stop() ensured that the workqueues are stopped. */
   3679 	workqueue_destroy(sc->sc_queue_wq);
   3680 	workqueue_destroy(sc->sc_reset_wq);
   3681 
   3682 	for (i = 0; i < sc->sc_nqueues; i++)
   3683 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3684 
   3685 	wm_free_txrx_queues(sc);
   3686 
   3687 	/* Unmap the registers */
   3688 	if (sc->sc_ss) {
   3689 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3690 		sc->sc_ss = 0;
   3691 	}
   3692 	if (sc->sc_ios) {
   3693 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3694 		sc->sc_ios = 0;
   3695 	}
   3696 	if (sc->sc_flashs) {
   3697 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3698 		sc->sc_flashs = 0;
   3699 	}
   3700 
   3701 	if (sc->sc_core_lock)
   3702 		mutex_obj_free(sc->sc_core_lock);
   3703 	if (sc->sc_ich_phymtx)
   3704 		mutex_obj_free(sc->sc_ich_phymtx);
   3705 	if (sc->sc_ich_nvmmtx)
   3706 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3707 
   3708 	return 0;
   3709 }
   3710 
   3711 static bool
   3712 wm_suspend(device_t self, const pmf_qual_t *qual)
   3713 {
   3714 	struct wm_softc *sc = device_private(self);
   3715 
   3716 	wm_release_manageability(sc);
   3717 	wm_release_hw_control(sc);
   3718 	wm_enable_wakeup(sc);
   3719 
   3720 	return true;
   3721 }
   3722 
   3723 static bool
   3724 wm_resume(device_t self, const pmf_qual_t *qual)
   3725 {
   3726 	struct wm_softc *sc = device_private(self);
   3727 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3728 	pcireg_t reg;
   3729 	char buf[256];
   3730 
   3731 	reg = CSR_READ(sc, WMREG_WUS);
   3732 	if (reg != 0) {
   3733 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3734 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3735 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3736 	}
   3737 
   3738 	if (sc->sc_type >= WM_T_PCH2)
   3739 		wm_resume_workarounds_pchlan(sc);
   3740 	IFNET_LOCK(ifp);
   3741 	if ((ifp->if_flags & IFF_UP) == 0) {
   3742 		/* >= PCH_SPT hardware workaround before reset. */
   3743 		if (sc->sc_type >= WM_T_PCH_SPT)
   3744 			wm_flush_desc_rings(sc);
   3745 
   3746 		wm_reset(sc);
   3747 		/* Non-AMT based hardware can now take control from firmware */
   3748 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3749 			wm_get_hw_control(sc);
   3750 		wm_init_manageability(sc);
   3751 	} else {
   3752 		/*
   3753 		 * We called pmf_class_network_register(), so if_init() is
   3754 		 * automatically called when IFF_UP. wm_reset(),
   3755 		 * wm_get_hw_control() and wm_init_manageability() are called
   3756 		 * via wm_init().
   3757 		 */
   3758 	}
   3759 	IFNET_UNLOCK(ifp);
   3760 
   3761 	return true;
   3762 }
   3763 
   3764 /*
   3765  * wm_watchdog:
   3766  *
   3767  *	Watchdog checker.
   3768  */
   3769 static bool
   3770 wm_watchdog(struct ifnet *ifp)
   3771 {
   3772 	int qid;
   3773 	struct wm_softc *sc = ifp->if_softc;
   3774 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3775 
   3776 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3777 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3778 
   3779 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3780 	}
   3781 
   3782 #ifdef WM_DEBUG
   3783 	if (sc->sc_trigger_reset) {
   3784 		/* debug operation, no need for atomicity or reliability */
   3785 		sc->sc_trigger_reset = 0;
   3786 		hang_queue++;
   3787 	}
   3788 #endif
   3789 
   3790 	if (hang_queue == 0)
   3791 		return true;
   3792 
   3793 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3794 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3795 
   3796 	return false;
   3797 }
   3798 
   3799 /*
   3800  * Perform an interface watchdog reset.
   3801  */
   3802 static void
   3803 wm_handle_reset_work(struct work *work, void *arg)
   3804 {
   3805 	struct wm_softc * const sc = arg;
   3806 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3807 
   3808 	/* Don't want ioctl operations to happen */
   3809 	IFNET_LOCK(ifp);
   3810 
   3811 	/* reset the interface. */
   3812 	wm_init(ifp);
   3813 
   3814 	IFNET_UNLOCK(ifp);
   3815 
   3816 	/*
   3817 	 * There are still some upper layer processing which call
   3818 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3819 	 */
   3820 	/* Try to get more packets going. */
   3821 	ifp->if_start(ifp);
   3822 
   3823 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3824 }
   3825 
   3826 
   3827 static void
   3828 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3829 {
   3830 
   3831 	mutex_enter(txq->txq_lock);
   3832 	if (txq->txq_sending &&
   3833 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3834 		wm_watchdog_txq_locked(ifp, txq, hang);
   3835 
   3836 	mutex_exit(txq->txq_lock);
   3837 }
   3838 
   3839 static void
   3840 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3841     uint16_t *hang)
   3842 {
   3843 	struct wm_softc *sc = ifp->if_softc;
   3844 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3845 
   3846 	KASSERT(mutex_owned(txq->txq_lock));
   3847 
   3848 	/*
   3849 	 * Since we're using delayed interrupts, sweep up
   3850 	 * before we report an error.
   3851 	 */
   3852 	wm_txeof(txq, UINT_MAX);
   3853 
   3854 	if (txq->txq_sending)
   3855 		*hang |= __BIT(wmq->wmq_id);
   3856 
   3857 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3858 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3859 		    device_xname(sc->sc_dev));
   3860 	} else {
   3861 #ifdef WM_DEBUG
   3862 		int i, j;
   3863 		struct wm_txsoft *txs;
   3864 #endif
   3865 		log(LOG_ERR,
   3866 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3867 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3868 		    txq->txq_next);
   3869 		if_statinc(ifp, if_oerrors);
   3870 #ifdef WM_DEBUG
   3871 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3872 		     i = WM_NEXTTXS(txq, i)) {
   3873 			txs = &txq->txq_soft[i];
   3874 			printf("txs %d tx %d -> %d\n",
   3875 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3876 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3877 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3878 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3879 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3880 					printf("\t %#08x%08x\n",
   3881 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3882 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3883 				} else {
   3884 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3885 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3886 					    txq->txq_descs[j].wtx_addr.wa_low);
   3887 					printf("\t %#04x%02x%02x%08x\n",
   3888 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3889 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3890 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3891 					    txq->txq_descs[j].wtx_cmdlen);
   3892 				}
   3893 				if (j == txs->txs_lastdesc)
   3894 					break;
   3895 			}
   3896 		}
   3897 #endif
   3898 	}
   3899 }
   3900 
   3901 /*
   3902  * wm_tick:
   3903  *
   3904  *	One second timer, used to check link status, sweep up
   3905  *	completed transmit jobs, etc.
   3906  */
   3907 static void
   3908 wm_tick(void *arg)
   3909 {
   3910 	struct wm_softc *sc = arg;
   3911 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3912 
   3913 	mutex_enter(sc->sc_core_lock);
   3914 
   3915 	if (sc->sc_core_stopping) {
   3916 		mutex_exit(sc->sc_core_lock);
   3917 		return;
   3918 	}
   3919 
   3920 	wm_update_stats(sc);
   3921 
   3922 	if (sc->sc_flags & WM_F_HAS_MII) {
   3923 		bool dotick = true;
   3924 
   3925 		/*
   3926 		 * Workaround for some chips to delay sending LINK_STATE_UP.
   3927 		 * See also wm_linkintr_gmii() and wm_gmii_mediastatus().
   3928 		 */
   3929 		if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   3930 			struct timeval now;
   3931 
   3932 			getmicrotime(&now);
   3933 			if (timercmp(&now, &sc->sc_linkup_delay_time, <))
   3934 				dotick = false;
   3935 			else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   3936 				/* Simplify by checking tv_sec only. */
   3937 
   3938 				sc->sc_linkup_delay_time.tv_sec = 0;
   3939 				sc->sc_linkup_delay_time.tv_usec = 0;
   3940 			}
   3941 		}
   3942 		if (dotick)
   3943 			mii_tick(&sc->sc_mii);
   3944 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3945 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3946 		wm_serdes_tick(sc);
   3947 	else
   3948 		wm_tbi_tick(sc);
   3949 
   3950 	mutex_exit(sc->sc_core_lock);
   3951 
   3952 	if (wm_watchdog(ifp))
   3953 		callout_schedule(&sc->sc_tick_ch, hz);
   3954 }
   3955 
   3956 static int
   3957 wm_ifflags_cb(struct ethercom *ec)
   3958 {
   3959 	struct ifnet *ifp = &ec->ec_if;
   3960 	struct wm_softc *sc = ifp->if_softc;
   3961 	u_short iffchange;
   3962 	int ecchange;
   3963 	bool needreset = false;
   3964 	int rc = 0;
   3965 
   3966 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3967 		device_xname(sc->sc_dev), __func__));
   3968 
   3969 	KASSERT(IFNET_LOCKED(ifp));
   3970 
   3971 	mutex_enter(sc->sc_core_lock);
   3972 
   3973 	/*
   3974 	 * Check for if_flags.
   3975 	 * Main usage is to prevent linkdown when opening bpf.
   3976 	 */
   3977 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3978 	sc->sc_if_flags = ifp->if_flags;
   3979 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3980 		needreset = true;
   3981 		goto ec;
   3982 	}
   3983 
   3984 	/* iff related updates */
   3985 	if ((iffchange & IFF_PROMISC) != 0)
   3986 		wm_set_filter(sc);
   3987 
   3988 	wm_set_vlan(sc);
   3989 
   3990 ec:
   3991 	/* Check for ec_capenable. */
   3992 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3993 	sc->sc_ec_capenable = ec->ec_capenable;
   3994 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3995 		needreset = true;
   3996 		goto out;
   3997 	}
   3998 
   3999 	/* ec related updates */
   4000 	wm_set_eee(sc);
   4001 
   4002 out:
   4003 	if (needreset)
   4004 		rc = ENETRESET;
   4005 	mutex_exit(sc->sc_core_lock);
   4006 
   4007 	return rc;
   4008 }
   4009 
   4010 static bool
   4011 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   4012 {
   4013 
   4014 	switch (sc->sc_phytype) {
   4015 	case WMPHY_82577: /* ihphy */
   4016 	case WMPHY_82578: /* atphy */
   4017 	case WMPHY_82579: /* ihphy */
   4018 	case WMPHY_I217: /* ihphy */
   4019 	case WMPHY_82580: /* ihphy */
   4020 	case WMPHY_I350: /* ihphy */
   4021 		return true;
   4022 	default:
   4023 		return false;
   4024 	}
   4025 }
   4026 
   4027 static void
   4028 wm_set_linkdown_discard(struct wm_softc *sc)
   4029 {
   4030 
   4031 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4032 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4033 
   4034 		mutex_enter(txq->txq_lock);
   4035 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   4036 		mutex_exit(txq->txq_lock);
   4037 	}
   4038 }
   4039 
   4040 static void
   4041 wm_clear_linkdown_discard(struct wm_softc *sc)
   4042 {
   4043 
   4044 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4045 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4046 
   4047 		mutex_enter(txq->txq_lock);
   4048 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   4049 		mutex_exit(txq->txq_lock);
   4050 	}
   4051 }
   4052 
   4053 /*
   4054  * wm_ioctl:		[ifnet interface function]
   4055  *
   4056  *	Handle control requests from the operator.
   4057  */
   4058 static int
   4059 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   4060 {
   4061 	struct wm_softc *sc = ifp->if_softc;
   4062 	struct ifreq *ifr = (struct ifreq *)data;
   4063 	struct ifaddr *ifa = (struct ifaddr *)data;
   4064 	struct sockaddr_dl *sdl;
   4065 	int error;
   4066 
   4067 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4068 		device_xname(sc->sc_dev), __func__));
   4069 
   4070 	switch (cmd) {
   4071 	case SIOCADDMULTI:
   4072 	case SIOCDELMULTI:
   4073 		break;
   4074 	default:
   4075 		KASSERT(IFNET_LOCKED(ifp));
   4076 	}
   4077 
   4078 	if (cmd == SIOCZIFDATA) {
   4079 		/*
   4080 		 * Special handling for SIOCZIFDATA.
   4081 		 * Copying and clearing the if_data structure is done with
   4082 		 * ether_ioctl() below.
   4083 		 */
   4084 		mutex_enter(sc->sc_core_lock);
   4085 		wm_update_stats(sc);
   4086 		wm_clear_evcnt(sc);
   4087 		mutex_exit(sc->sc_core_lock);
   4088 	}
   4089 
   4090 	switch (cmd) {
   4091 	case SIOCSIFMEDIA:
   4092 		mutex_enter(sc->sc_core_lock);
   4093 		/* Flow control requires full-duplex mode. */
   4094 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4095 		    (ifr->ifr_media & IFM_FDX) == 0)
   4096 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4097 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4098 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4099 				/* We can do both TXPAUSE and RXPAUSE. */
   4100 				ifr->ifr_media |=
   4101 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4102 			}
   4103 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4104 		}
   4105 		mutex_exit(sc->sc_core_lock);
   4106 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4107 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4108 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4109 				DPRINTF(sc, WM_DEBUG_LINK,
   4110 				    ("%s: %s: Set linkdown discard flag\n",
   4111 					device_xname(sc->sc_dev), __func__));
   4112 				wm_set_linkdown_discard(sc);
   4113 			}
   4114 		}
   4115 		break;
   4116 	case SIOCINITIFADDR:
   4117 		mutex_enter(sc->sc_core_lock);
   4118 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4119 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4120 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4121 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4122 			/* Unicast address is the first multicast entry */
   4123 			wm_set_filter(sc);
   4124 			error = 0;
   4125 			mutex_exit(sc->sc_core_lock);
   4126 			break;
   4127 		}
   4128 		mutex_exit(sc->sc_core_lock);
   4129 		/*FALLTHROUGH*/
   4130 	default:
   4131 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4132 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4133 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4134 				DPRINTF(sc, WM_DEBUG_LINK,
   4135 				    ("%s: %s: Set linkdown discard flag\n",
   4136 					device_xname(sc->sc_dev), __func__));
   4137 				wm_set_linkdown_discard(sc);
   4138 			}
   4139 		}
   4140 		const int s = splnet();
   4141 		/* It may call wm_start, so unlock here */
   4142 		error = ether_ioctl(ifp, cmd, data);
   4143 		splx(s);
   4144 		if (error != ENETRESET)
   4145 			break;
   4146 
   4147 		error = 0;
   4148 
   4149 		if (cmd == SIOCSIFCAP)
   4150 			error = if_init(ifp);
   4151 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4152 			mutex_enter(sc->sc_core_lock);
   4153 			if (sc->sc_if_flags & IFF_RUNNING) {
   4154 				/*
   4155 				 * Multicast list has changed; set the
   4156 				 * hardware filter accordingly.
   4157 				 */
   4158 				wm_set_filter(sc);
   4159 			}
   4160 			mutex_exit(sc->sc_core_lock);
   4161 		}
   4162 		break;
   4163 	}
   4164 
   4165 	return error;
   4166 }
   4167 
   4168 /* MAC address related */
   4169 
   4170 /*
   4171  * Get the offset of MAC address and return it.
   4172  * If error occured, use offset 0.
   4173  */
   4174 static uint16_t
   4175 wm_check_alt_mac_addr(struct wm_softc *sc)
   4176 {
   4177 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4178 	uint16_t offset = NVM_OFF_MACADDR;
   4179 
   4180 	/* Try to read alternative MAC address pointer */
   4181 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4182 		return 0;
   4183 
   4184 	/* Check pointer if it's valid or not. */
   4185 	if ((offset == 0x0000) || (offset == 0xffff))
   4186 		return 0;
   4187 
   4188 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4189 	/*
   4190 	 * Check whether alternative MAC address is valid or not.
   4191 	 * Some cards have non 0xffff pointer but those don't use
   4192 	 * alternative MAC address in reality.
   4193 	 *
   4194 	 * Check whether the broadcast bit is set or not.
   4195 	 */
   4196 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4197 		if (((myea[0] & 0xff) & 0x01) == 0)
   4198 			return offset; /* Found */
   4199 
   4200 	/* Not found */
   4201 	return 0;
   4202 }
   4203 
   4204 static int
   4205 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4206 {
   4207 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4208 	uint16_t offset = NVM_OFF_MACADDR;
   4209 	int do_invert = 0;
   4210 
   4211 	switch (sc->sc_type) {
   4212 	case WM_T_82580:
   4213 	case WM_T_I350:
   4214 	case WM_T_I354:
   4215 		/* EEPROM Top Level Partitioning */
   4216 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4217 		break;
   4218 	case WM_T_82571:
   4219 	case WM_T_82575:
   4220 	case WM_T_82576:
   4221 	case WM_T_80003:
   4222 	case WM_T_I210:
   4223 	case WM_T_I211:
   4224 		offset = wm_check_alt_mac_addr(sc);
   4225 		if (offset == 0)
   4226 			if ((sc->sc_funcid & 0x01) == 1)
   4227 				do_invert = 1;
   4228 		break;
   4229 	default:
   4230 		if ((sc->sc_funcid & 0x01) == 1)
   4231 			do_invert = 1;
   4232 		break;
   4233 	}
   4234 
   4235 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4236 		goto bad;
   4237 
   4238 	enaddr[0] = myea[0] & 0xff;
   4239 	enaddr[1] = myea[0] >> 8;
   4240 	enaddr[2] = myea[1] & 0xff;
   4241 	enaddr[3] = myea[1] >> 8;
   4242 	enaddr[4] = myea[2] & 0xff;
   4243 	enaddr[5] = myea[2] >> 8;
   4244 
   4245 	/*
   4246 	 * Toggle the LSB of the MAC address on the second port
   4247 	 * of some dual port cards.
   4248 	 */
   4249 	if (do_invert != 0)
   4250 		enaddr[5] ^= 1;
   4251 
   4252 	return 0;
   4253 
   4254 bad:
   4255 	return -1;
   4256 }
   4257 
   4258 /*
   4259  * wm_set_ral:
   4260  *
   4261  *	Set an entery in the receive address list.
   4262  */
   4263 static void
   4264 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4265 {
   4266 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4267 	uint32_t wlock_mac;
   4268 	int rv;
   4269 
   4270 	if (enaddr != NULL) {
   4271 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4272 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4273 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4274 		ral_hi |= RAL_AV;
   4275 	} else {
   4276 		ral_lo = 0;
   4277 		ral_hi = 0;
   4278 	}
   4279 
   4280 	switch (sc->sc_type) {
   4281 	case WM_T_82542_2_0:
   4282 	case WM_T_82542_2_1:
   4283 	case WM_T_82543:
   4284 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4285 		CSR_WRITE_FLUSH(sc);
   4286 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4287 		CSR_WRITE_FLUSH(sc);
   4288 		break;
   4289 	case WM_T_PCH2:
   4290 	case WM_T_PCH_LPT:
   4291 	case WM_T_PCH_SPT:
   4292 	case WM_T_PCH_CNP:
   4293 		if (idx == 0) {
   4294 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4295 			CSR_WRITE_FLUSH(sc);
   4296 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4297 			CSR_WRITE_FLUSH(sc);
   4298 			return;
   4299 		}
   4300 		if (sc->sc_type != WM_T_PCH2) {
   4301 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4302 			    FWSM_WLOCK_MAC);
   4303 			addrl = WMREG_SHRAL(idx - 1);
   4304 			addrh = WMREG_SHRAH(idx - 1);
   4305 		} else {
   4306 			wlock_mac = 0;
   4307 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4308 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4309 		}
   4310 
   4311 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4312 			rv = wm_get_swflag_ich8lan(sc);
   4313 			if (rv != 0)
   4314 				return;
   4315 			CSR_WRITE(sc, addrl, ral_lo);
   4316 			CSR_WRITE_FLUSH(sc);
   4317 			CSR_WRITE(sc, addrh, ral_hi);
   4318 			CSR_WRITE_FLUSH(sc);
   4319 			wm_put_swflag_ich8lan(sc);
   4320 		}
   4321 
   4322 		break;
   4323 	default:
   4324 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4325 		CSR_WRITE_FLUSH(sc);
   4326 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4327 		CSR_WRITE_FLUSH(sc);
   4328 		break;
   4329 	}
   4330 }
   4331 
   4332 /*
   4333  * wm_mchash:
   4334  *
   4335  *	Compute the hash of the multicast address for the 4096-bit
   4336  *	multicast filter.
   4337  */
   4338 static uint32_t
   4339 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4340 {
   4341 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4342 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4343 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4344 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4345 	uint32_t hash;
   4346 
   4347 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4348 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4349 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4350 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4351 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4352 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4353 		return (hash & 0x3ff);
   4354 	}
   4355 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4356 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4357 
   4358 	return (hash & 0xfff);
   4359 }
   4360 
   4361 /*
   4362  *
   4363  *
   4364  */
   4365 static int
   4366 wm_rar_count(struct wm_softc *sc)
   4367 {
   4368 	int size;
   4369 
   4370 	switch (sc->sc_type) {
   4371 	case WM_T_ICH8:
   4372 		size = WM_RAL_TABSIZE_ICH8 -1;
   4373 		break;
   4374 	case WM_T_ICH9:
   4375 	case WM_T_ICH10:
   4376 	case WM_T_PCH:
   4377 		size = WM_RAL_TABSIZE_ICH8;
   4378 		break;
   4379 	case WM_T_PCH2:
   4380 		size = WM_RAL_TABSIZE_PCH2;
   4381 		break;
   4382 	case WM_T_PCH_LPT:
   4383 	case WM_T_PCH_SPT:
   4384 	case WM_T_PCH_CNP:
   4385 		size = WM_RAL_TABSIZE_PCH_LPT;
   4386 		break;
   4387 	case WM_T_82575:
   4388 	case WM_T_I210:
   4389 	case WM_T_I211:
   4390 		size = WM_RAL_TABSIZE_82575;
   4391 		break;
   4392 	case WM_T_82576:
   4393 	case WM_T_82580:
   4394 		size = WM_RAL_TABSIZE_82576;
   4395 		break;
   4396 	case WM_T_I350:
   4397 	case WM_T_I354:
   4398 		size = WM_RAL_TABSIZE_I350;
   4399 		break;
   4400 	default:
   4401 		size = WM_RAL_TABSIZE;
   4402 	}
   4403 
   4404 	return size;
   4405 }
   4406 
   4407 /*
   4408  * wm_set_filter:
   4409  *
   4410  *	Set up the receive filter.
   4411  */
   4412 static void
   4413 wm_set_filter(struct wm_softc *sc)
   4414 {
   4415 	struct ethercom *ec = &sc->sc_ethercom;
   4416 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4417 	struct ether_multi *enm;
   4418 	struct ether_multistep step;
   4419 	bus_addr_t mta_reg;
   4420 	uint32_t hash, reg, bit;
   4421 	int i, size, ralmax, rv;
   4422 
   4423 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4424 		device_xname(sc->sc_dev), __func__));
   4425 	KASSERT(mutex_owned(sc->sc_core_lock));
   4426 
   4427 	if (sc->sc_type >= WM_T_82544)
   4428 		mta_reg = WMREG_CORDOVA_MTA;
   4429 	else
   4430 		mta_reg = WMREG_MTA;
   4431 
   4432 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4433 
   4434 	if (sc->sc_if_flags & IFF_BROADCAST)
   4435 		sc->sc_rctl |= RCTL_BAM;
   4436 	if (sc->sc_if_flags & IFF_PROMISC) {
   4437 		sc->sc_rctl |= RCTL_UPE;
   4438 		ETHER_LOCK(ec);
   4439 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4440 		ETHER_UNLOCK(ec);
   4441 		goto allmulti;
   4442 	}
   4443 
   4444 	/*
   4445 	 * Set the station address in the first RAL slot, and
   4446 	 * clear the remaining slots.
   4447 	 */
   4448 	size = wm_rar_count(sc);
   4449 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4450 
   4451 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4452 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4453 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4454 		switch (i) {
   4455 		case 0:
   4456 			/* We can use all entries */
   4457 			ralmax = size;
   4458 			break;
   4459 		case 1:
   4460 			/* Only RAR[0] */
   4461 			ralmax = 1;
   4462 			break;
   4463 		default:
   4464 			/* Available SHRA + RAR[0] */
   4465 			ralmax = i + 1;
   4466 		}
   4467 	} else
   4468 		ralmax = size;
   4469 	for (i = 1; i < size; i++) {
   4470 		if (i < ralmax)
   4471 			wm_set_ral(sc, NULL, i);
   4472 	}
   4473 
   4474 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4475 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4476 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4477 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4478 		size = WM_ICH8_MC_TABSIZE;
   4479 	else
   4480 		size = WM_MC_TABSIZE;
   4481 	/* Clear out the multicast table. */
   4482 	for (i = 0; i < size; i++) {
   4483 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4484 		CSR_WRITE_FLUSH(sc);
   4485 	}
   4486 
   4487 	ETHER_LOCK(ec);
   4488 	ETHER_FIRST_MULTI(step, ec, enm);
   4489 	while (enm != NULL) {
   4490 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4491 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4492 			ETHER_UNLOCK(ec);
   4493 			/*
   4494 			 * We must listen to a range of multicast addresses.
   4495 			 * For now, just accept all multicasts, rather than
   4496 			 * trying to set only those filter bits needed to match
   4497 			 * the range.  (At this time, the only use of address
   4498 			 * ranges is for IP multicast routing, for which the
   4499 			 * range is big enough to require all bits set.)
   4500 			 */
   4501 			goto allmulti;
   4502 		}
   4503 
   4504 		hash = wm_mchash(sc, enm->enm_addrlo);
   4505 
   4506 		reg = (hash >> 5);
   4507 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4508 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4509 		    || (sc->sc_type == WM_T_PCH2)
   4510 		    || (sc->sc_type == WM_T_PCH_LPT)
   4511 		    || (sc->sc_type == WM_T_PCH_SPT)
   4512 		    || (sc->sc_type == WM_T_PCH_CNP))
   4513 			reg &= 0x1f;
   4514 		else
   4515 			reg &= 0x7f;
   4516 		bit = hash & 0x1f;
   4517 
   4518 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4519 		hash |= 1U << bit;
   4520 
   4521 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4522 			/*
   4523 			 * 82544 Errata 9: Certain register cannot be written
   4524 			 * with particular alignments in PCI-X bus operation
   4525 			 * (FCAH, MTA and VFTA).
   4526 			 */
   4527 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4528 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4529 			CSR_WRITE_FLUSH(sc);
   4530 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4531 			CSR_WRITE_FLUSH(sc);
   4532 		} else {
   4533 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4534 			CSR_WRITE_FLUSH(sc);
   4535 		}
   4536 
   4537 		ETHER_NEXT_MULTI(step, enm);
   4538 	}
   4539 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4540 	ETHER_UNLOCK(ec);
   4541 
   4542 	goto setit;
   4543 
   4544 allmulti:
   4545 	sc->sc_rctl |= RCTL_MPE;
   4546 
   4547 setit:
   4548 	if (sc->sc_type >= WM_T_PCH2) {
   4549 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4550 		    && (ifp->if_mtu > ETHERMTU))
   4551 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4552 		else
   4553 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4554 		if (rv != 0)
   4555 			device_printf(sc->sc_dev,
   4556 			    "Failed to do workaround for jumbo frame.\n");
   4557 	}
   4558 
   4559 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4560 }
   4561 
   4562 /* Reset and init related */
   4563 
   4564 static void
   4565 wm_set_vlan(struct wm_softc *sc)
   4566 {
   4567 
   4568 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4569 		device_xname(sc->sc_dev), __func__));
   4570 
   4571 	/* Deal with VLAN enables. */
   4572 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4573 		sc->sc_ctrl |= CTRL_VME;
   4574 	else
   4575 		sc->sc_ctrl &= ~CTRL_VME;
   4576 
   4577 	/* Write the control registers. */
   4578 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4579 }
   4580 
   4581 static void
   4582 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4583 {
   4584 	uint32_t gcr;
   4585 	pcireg_t ctrl2;
   4586 
   4587 	gcr = CSR_READ(sc, WMREG_GCR);
   4588 
   4589 	/* Only take action if timeout value is defaulted to 0 */
   4590 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4591 		goto out;
   4592 
   4593 	if ((gcr & GCR_CAP_VER2) == 0) {
   4594 		gcr |= GCR_CMPL_TMOUT_10MS;
   4595 		goto out;
   4596 	}
   4597 
   4598 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4599 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4600 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4601 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4602 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4603 
   4604 out:
   4605 	/* Disable completion timeout resend */
   4606 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4607 
   4608 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4609 }
   4610 
   4611 void
   4612 wm_get_auto_rd_done(struct wm_softc *sc)
   4613 {
   4614 	int i;
   4615 
   4616 	/* wait for eeprom to reload */
   4617 	switch (sc->sc_type) {
   4618 	case WM_T_82571:
   4619 	case WM_T_82572:
   4620 	case WM_T_82573:
   4621 	case WM_T_82574:
   4622 	case WM_T_82583:
   4623 	case WM_T_82575:
   4624 	case WM_T_82576:
   4625 	case WM_T_82580:
   4626 	case WM_T_I350:
   4627 	case WM_T_I354:
   4628 	case WM_T_I210:
   4629 	case WM_T_I211:
   4630 	case WM_T_80003:
   4631 	case WM_T_ICH8:
   4632 	case WM_T_ICH9:
   4633 		for (i = 0; i < 10; i++) {
   4634 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4635 				break;
   4636 			delay(1000);
   4637 		}
   4638 		if (i == 10) {
   4639 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4640 			    "complete\n", device_xname(sc->sc_dev));
   4641 		}
   4642 		break;
   4643 	default:
   4644 		break;
   4645 	}
   4646 }
   4647 
   4648 void
   4649 wm_lan_init_done(struct wm_softc *sc)
   4650 {
   4651 	uint32_t reg = 0;
   4652 	int i;
   4653 
   4654 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4655 		device_xname(sc->sc_dev), __func__));
   4656 
   4657 	/* Wait for eeprom to reload */
   4658 	switch (sc->sc_type) {
   4659 	case WM_T_ICH10:
   4660 	case WM_T_PCH:
   4661 	case WM_T_PCH2:
   4662 	case WM_T_PCH_LPT:
   4663 	case WM_T_PCH_SPT:
   4664 	case WM_T_PCH_CNP:
   4665 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4666 			reg = CSR_READ(sc, WMREG_STATUS);
   4667 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4668 				break;
   4669 			delay(100);
   4670 		}
   4671 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4672 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4673 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4674 		}
   4675 		break;
   4676 	default:
   4677 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4678 		    __func__);
   4679 		break;
   4680 	}
   4681 
   4682 	reg &= ~STATUS_LAN_INIT_DONE;
   4683 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4684 }
   4685 
   4686 void
   4687 wm_get_cfg_done(struct wm_softc *sc)
   4688 {
   4689 	int mask;
   4690 	uint32_t reg;
   4691 	int i;
   4692 
   4693 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4694 		device_xname(sc->sc_dev), __func__));
   4695 
   4696 	/* Wait for eeprom to reload */
   4697 	switch (sc->sc_type) {
   4698 	case WM_T_82542_2_0:
   4699 	case WM_T_82542_2_1:
   4700 		/* null */
   4701 		break;
   4702 	case WM_T_82543:
   4703 	case WM_T_82544:
   4704 	case WM_T_82540:
   4705 	case WM_T_82545:
   4706 	case WM_T_82545_3:
   4707 	case WM_T_82546:
   4708 	case WM_T_82546_3:
   4709 	case WM_T_82541:
   4710 	case WM_T_82541_2:
   4711 	case WM_T_82547:
   4712 	case WM_T_82547_2:
   4713 	case WM_T_82573:
   4714 	case WM_T_82574:
   4715 	case WM_T_82583:
   4716 		/* generic */
   4717 		delay(10*1000);
   4718 		break;
   4719 	case WM_T_80003:
   4720 	case WM_T_82571:
   4721 	case WM_T_82572:
   4722 	case WM_T_82575:
   4723 	case WM_T_82576:
   4724 	case WM_T_82580:
   4725 	case WM_T_I350:
   4726 	case WM_T_I354:
   4727 	case WM_T_I210:
   4728 	case WM_T_I211:
   4729 		if (sc->sc_type == WM_T_82571) {
   4730 			/* Only 82571 shares port 0 */
   4731 			mask = EEMNGCTL_CFGDONE_0;
   4732 		} else
   4733 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4734 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4735 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4736 				break;
   4737 			delay(1000);
   4738 		}
   4739 		if (i >= WM_PHY_CFG_TIMEOUT)
   4740 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4741 				device_xname(sc->sc_dev), __func__));
   4742 		break;
   4743 	case WM_T_ICH8:
   4744 	case WM_T_ICH9:
   4745 	case WM_T_ICH10:
   4746 	case WM_T_PCH:
   4747 	case WM_T_PCH2:
   4748 	case WM_T_PCH_LPT:
   4749 	case WM_T_PCH_SPT:
   4750 	case WM_T_PCH_CNP:
   4751 		delay(10*1000);
   4752 		if (sc->sc_type >= WM_T_ICH10)
   4753 			wm_lan_init_done(sc);
   4754 		else
   4755 			wm_get_auto_rd_done(sc);
   4756 
   4757 		/* Clear PHY Reset Asserted bit */
   4758 		reg = CSR_READ(sc, WMREG_STATUS);
   4759 		if ((reg & STATUS_PHYRA) != 0)
   4760 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4761 		break;
   4762 	default:
   4763 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4764 		    __func__);
   4765 		break;
   4766 	}
   4767 }
   4768 
   4769 int
   4770 wm_phy_post_reset(struct wm_softc *sc)
   4771 {
   4772 	device_t dev = sc->sc_dev;
   4773 	uint16_t reg;
   4774 	int rv = 0;
   4775 
   4776 	/* This function is only for ICH8 and newer. */
   4777 	if (sc->sc_type < WM_T_ICH8)
   4778 		return 0;
   4779 
   4780 	if (wm_phy_resetisblocked(sc)) {
   4781 		/* XXX */
   4782 		device_printf(dev, "PHY is blocked\n");
   4783 		return -1;
   4784 	}
   4785 
   4786 	/* Allow time for h/w to get to quiescent state after reset */
   4787 	delay(10*1000);
   4788 
   4789 	/* Perform any necessary post-reset workarounds */
   4790 	if (sc->sc_type == WM_T_PCH)
   4791 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4792 	else if (sc->sc_type == WM_T_PCH2)
   4793 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4794 	if (rv != 0)
   4795 		return rv;
   4796 
   4797 	/* Clear the host wakeup bit after lcd reset */
   4798 	if (sc->sc_type >= WM_T_PCH) {
   4799 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4800 		reg &= ~BM_WUC_HOST_WU_BIT;
   4801 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4802 	}
   4803 
   4804 	/* Configure the LCD with the extended configuration region in NVM */
   4805 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4806 		return rv;
   4807 
   4808 	/* Configure the LCD with the OEM bits in NVM */
   4809 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4810 
   4811 	if (sc->sc_type == WM_T_PCH2) {
   4812 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4813 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4814 			delay(10 * 1000);
   4815 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4816 		}
   4817 		/* Set EEE LPI Update Timer to 200usec */
   4818 		rv = sc->phy.acquire(sc);
   4819 		if (rv)
   4820 			return rv;
   4821 		rv = wm_write_emi_reg_locked(dev,
   4822 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4823 		sc->phy.release(sc);
   4824 	}
   4825 
   4826 	return rv;
   4827 }
   4828 
   4829 /* Only for PCH and newer */
   4830 static int
   4831 wm_write_smbus_addr(struct wm_softc *sc)
   4832 {
   4833 	uint32_t strap, freq;
   4834 	uint16_t phy_data;
   4835 	int rv;
   4836 
   4837 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4838 		device_xname(sc->sc_dev), __func__));
   4839 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4840 
   4841 	strap = CSR_READ(sc, WMREG_STRAP);
   4842 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4843 
   4844 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4845 	if (rv != 0)
   4846 		return rv;
   4847 
   4848 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4849 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4850 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4851 
   4852 	if (sc->sc_phytype == WMPHY_I217) {
   4853 		/* Restore SMBus frequency */
   4854 		if (freq --) {
   4855 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4856 			    | HV_SMB_ADDR_FREQ_HIGH);
   4857 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4858 			    HV_SMB_ADDR_FREQ_LOW);
   4859 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4860 			    HV_SMB_ADDR_FREQ_HIGH);
   4861 		} else
   4862 			DPRINTF(sc, WM_DEBUG_INIT,
   4863 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4864 				device_xname(sc->sc_dev), __func__));
   4865 	}
   4866 
   4867 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4868 	    phy_data);
   4869 }
   4870 
   4871 static int
   4872 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4873 {
   4874 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4875 	uint16_t phy_page = 0;
   4876 	int rv = 0;
   4877 
   4878 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4879 		device_xname(sc->sc_dev), __func__));
   4880 
   4881 	switch (sc->sc_type) {
   4882 	case WM_T_ICH8:
   4883 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4884 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4885 			return 0;
   4886 
   4887 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4888 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4889 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4890 			break;
   4891 		}
   4892 		/* FALLTHROUGH */
   4893 	case WM_T_PCH:
   4894 	case WM_T_PCH2:
   4895 	case WM_T_PCH_LPT:
   4896 	case WM_T_PCH_SPT:
   4897 	case WM_T_PCH_CNP:
   4898 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4899 		break;
   4900 	default:
   4901 		return 0;
   4902 	}
   4903 
   4904 	if ((rv = sc->phy.acquire(sc)) != 0)
   4905 		return rv;
   4906 
   4907 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4908 	if ((reg & sw_cfg_mask) == 0)
   4909 		goto release;
   4910 
   4911 	/*
   4912 	 * Make sure HW does not configure LCD from PHY extended configuration
   4913 	 * before SW configuration
   4914 	 */
   4915 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4916 	if ((sc->sc_type < WM_T_PCH2)
   4917 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4918 		goto release;
   4919 
   4920 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4921 		device_xname(sc->sc_dev), __func__));
   4922 	/* word_addr is in DWORD */
   4923 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4924 
   4925 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4926 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4927 	if (cnf_size == 0)
   4928 		goto release;
   4929 
   4930 	if (((sc->sc_type == WM_T_PCH)
   4931 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4932 	    || (sc->sc_type > WM_T_PCH)) {
   4933 		/*
   4934 		 * HW configures the SMBus address and LEDs when the OEM and
   4935 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4936 		 * are cleared, SW will configure them instead.
   4937 		 */
   4938 		DPRINTF(sc, WM_DEBUG_INIT,
   4939 		    ("%s: %s: Configure SMBus and LED\n",
   4940 			device_xname(sc->sc_dev), __func__));
   4941 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4942 			goto release;
   4943 
   4944 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4945 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4946 		    (uint16_t)reg);
   4947 		if (rv != 0)
   4948 			goto release;
   4949 	}
   4950 
   4951 	/* Configure LCD from extended configuration region. */
   4952 	for (i = 0; i < cnf_size; i++) {
   4953 		uint16_t reg_data, reg_addr;
   4954 
   4955 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4956 			goto release;
   4957 
   4958 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4959 			goto release;
   4960 
   4961 		if (reg_addr == IGPHY_PAGE_SELECT)
   4962 			phy_page = reg_data;
   4963 
   4964 		reg_addr &= IGPHY_MAXREGADDR;
   4965 		reg_addr |= phy_page;
   4966 
   4967 		KASSERT(sc->phy.writereg_locked != NULL);
   4968 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4969 		    reg_data);
   4970 	}
   4971 
   4972 release:
   4973 	sc->phy.release(sc);
   4974 	return rv;
   4975 }
   4976 
   4977 /*
   4978  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4979  *  @sc:       pointer to the HW structure
   4980  *  @d0_state: boolean if entering d0 or d3 device state
   4981  *
   4982  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4983  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4984  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4985  */
   4986 int
   4987 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4988 {
   4989 	uint32_t mac_reg;
   4990 	uint16_t oem_reg;
   4991 	int rv;
   4992 
   4993 	if (sc->sc_type < WM_T_PCH)
   4994 		return 0;
   4995 
   4996 	rv = sc->phy.acquire(sc);
   4997 	if (rv != 0)
   4998 		return rv;
   4999 
   5000 	if (sc->sc_type == WM_T_PCH) {
   5001 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   5002 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   5003 			goto release;
   5004 	}
   5005 
   5006 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   5007 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   5008 		goto release;
   5009 
   5010 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   5011 
   5012 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   5013 	if (rv != 0)
   5014 		goto release;
   5015 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   5016 
   5017 	if (d0_state) {
   5018 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   5019 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5020 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   5021 			oem_reg |= HV_OEM_BITS_LPLU;
   5022 	} else {
   5023 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   5024 		    != 0)
   5025 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5026 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   5027 		    != 0)
   5028 			oem_reg |= HV_OEM_BITS_LPLU;
   5029 	}
   5030 
   5031 	/* Set Restart auto-neg to activate the bits */
   5032 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   5033 	    && (wm_phy_resetisblocked(sc) == false))
   5034 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   5035 
   5036 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   5037 
   5038 release:
   5039 	sc->phy.release(sc);
   5040 
   5041 	return rv;
   5042 }
   5043 
   5044 /* Init hardware bits */
   5045 void
   5046 wm_initialize_hardware_bits(struct wm_softc *sc)
   5047 {
   5048 	uint32_t tarc0, tarc1, reg;
   5049 
   5050 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5051 		device_xname(sc->sc_dev), __func__));
   5052 
   5053 	/* For 82571 variant, 80003 and ICHs */
   5054 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   5055 	    || WM_IS_ICHPCH(sc)) {
   5056 
   5057 		/* Transmit Descriptor Control 0 */
   5058 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   5059 		reg |= TXDCTL_COUNT_DESC;
   5060 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   5061 
   5062 		/* Transmit Descriptor Control 1 */
   5063 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   5064 		reg |= TXDCTL_COUNT_DESC;
   5065 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   5066 
   5067 		/* TARC0 */
   5068 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   5069 		switch (sc->sc_type) {
   5070 		case WM_T_82571:
   5071 		case WM_T_82572:
   5072 		case WM_T_82573:
   5073 		case WM_T_82574:
   5074 		case WM_T_82583:
   5075 		case WM_T_80003:
   5076 			/* Clear bits 30..27 */
   5077 			tarc0 &= ~__BITS(30, 27);
   5078 			break;
   5079 		default:
   5080 			break;
   5081 		}
   5082 
   5083 		switch (sc->sc_type) {
   5084 		case WM_T_82571:
   5085 		case WM_T_82572:
   5086 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5087 
   5088 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5089 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5090 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5091 			/* 8257[12] Errata No.7 */
   5092 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5093 
   5094 			/* TARC1 bit 28 */
   5095 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5096 				tarc1 &= ~__BIT(28);
   5097 			else
   5098 				tarc1 |= __BIT(28);
   5099 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5100 
   5101 			/*
   5102 			 * 8257[12] Errata No.13
   5103 			 * Disable Dyamic Clock Gating.
   5104 			 */
   5105 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5106 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5107 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5108 			break;
   5109 		case WM_T_82573:
   5110 		case WM_T_82574:
   5111 		case WM_T_82583:
   5112 			if ((sc->sc_type == WM_T_82574)
   5113 			    || (sc->sc_type == WM_T_82583))
   5114 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5115 
   5116 			/* Extended Device Control */
   5117 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5118 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5119 			reg |= __BIT(22);	/* Set bit 22 */
   5120 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5121 
   5122 			/* Device Control */
   5123 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5124 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5125 
   5126 			/* PCIe Control Register */
   5127 			/*
   5128 			 * 82573 Errata (unknown).
   5129 			 *
   5130 			 * 82574 Errata 25 and 82583 Errata 12
   5131 			 * "Dropped Rx Packets":
   5132 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5133 			 */
   5134 			reg = CSR_READ(sc, WMREG_GCR);
   5135 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5136 			CSR_WRITE(sc, WMREG_GCR, reg);
   5137 
   5138 			if ((sc->sc_type == WM_T_82574)
   5139 			    || (sc->sc_type == WM_T_82583)) {
   5140 				/*
   5141 				 * Document says this bit must be set for
   5142 				 * proper operation.
   5143 				 */
   5144 				reg = CSR_READ(sc, WMREG_GCR);
   5145 				reg |= __BIT(22);
   5146 				CSR_WRITE(sc, WMREG_GCR, reg);
   5147 
   5148 				/*
   5149 				 * Apply workaround for hardware errata
   5150 				 * documented in errata docs Fixes issue where
   5151 				 * some error prone or unreliable PCIe
   5152 				 * completions are occurring, particularly
   5153 				 * with ASPM enabled. Without fix, issue can
   5154 				 * cause Tx timeouts.
   5155 				 */
   5156 				reg = CSR_READ(sc, WMREG_GCR2);
   5157 				reg |= __BIT(0);
   5158 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5159 			}
   5160 			break;
   5161 		case WM_T_80003:
   5162 			/* TARC0 */
   5163 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5164 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5165 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5166 
   5167 			/* TARC1 bit 28 */
   5168 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5169 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5170 				tarc1 &= ~__BIT(28);
   5171 			else
   5172 				tarc1 |= __BIT(28);
   5173 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5174 			break;
   5175 		case WM_T_ICH8:
   5176 		case WM_T_ICH9:
   5177 		case WM_T_ICH10:
   5178 		case WM_T_PCH:
   5179 		case WM_T_PCH2:
   5180 		case WM_T_PCH_LPT:
   5181 		case WM_T_PCH_SPT:
   5182 		case WM_T_PCH_CNP:
   5183 			/* TARC0 */
   5184 			if (sc->sc_type == WM_T_ICH8) {
   5185 				/* Set TARC0 bits 29 and 28 */
   5186 				tarc0 |= __BITS(29, 28);
   5187 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5188 				tarc0 |= __BIT(29);
   5189 				/*
   5190 				 *  Drop bit 28. From Linux.
   5191 				 * See I218/I219 spec update
   5192 				 * "5. Buffer Overrun While the I219 is
   5193 				 * Processing DMA Transactions"
   5194 				 */
   5195 				tarc0 &= ~__BIT(28);
   5196 			}
   5197 			/* Set TARC0 bits 23,24,26,27 */
   5198 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5199 
   5200 			/* CTRL_EXT */
   5201 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5202 			reg |= __BIT(22);	/* Set bit 22 */
   5203 			/*
   5204 			 * Enable PHY low-power state when MAC is at D3
   5205 			 * w/o WoL
   5206 			 */
   5207 			if (sc->sc_type >= WM_T_PCH)
   5208 				reg |= CTRL_EXT_PHYPDEN;
   5209 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5210 
   5211 			/* TARC1 */
   5212 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5213 			/* bit 28 */
   5214 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5215 				tarc1 &= ~__BIT(28);
   5216 			else
   5217 				tarc1 |= __BIT(28);
   5218 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5219 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5220 
   5221 			/* Device Status */
   5222 			if (sc->sc_type == WM_T_ICH8) {
   5223 				reg = CSR_READ(sc, WMREG_STATUS);
   5224 				reg &= ~__BIT(31);
   5225 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5226 
   5227 			}
   5228 
   5229 			/* IOSFPC */
   5230 			if (sc->sc_type == WM_T_PCH_SPT) {
   5231 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5232 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5233 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5234 			}
   5235 			/*
   5236 			 * Work-around descriptor data corruption issue during
   5237 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5238 			 * capability.
   5239 			 */
   5240 			reg = CSR_READ(sc, WMREG_RFCTL);
   5241 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5242 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5243 			break;
   5244 		default:
   5245 			break;
   5246 		}
   5247 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5248 
   5249 		switch (sc->sc_type) {
   5250 		case WM_T_82571:
   5251 		case WM_T_82572:
   5252 		case WM_T_82573:
   5253 		case WM_T_80003:
   5254 		case WM_T_ICH8:
   5255 			/*
   5256 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
   5257 			 * others to avoid RSS Hash Value bug.
   5258 			 */
   5259 			reg = CSR_READ(sc, WMREG_RFCTL);
   5260 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5261 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5262 			break;
   5263 		case WM_T_82574:
   5264 			/* Use extened Rx descriptor. */
   5265 			reg = CSR_READ(sc, WMREG_RFCTL);
   5266 			reg |= WMREG_RFCTL_EXSTEN;
   5267 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5268 			break;
   5269 		default:
   5270 			break;
   5271 		}
   5272 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5273 		/*
   5274 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5275 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5276 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5277 		 * Correctly by the Device"
   5278 		 *
   5279 		 * I354(C2000) Errata AVR53:
   5280 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5281 		 * Hang"
   5282 		 */
   5283 		reg = CSR_READ(sc, WMREG_RFCTL);
   5284 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5285 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5286 	}
   5287 }
   5288 
   5289 static uint32_t
   5290 wm_rxpbs_adjust_82580(uint32_t val)
   5291 {
   5292 	uint32_t rv = 0;
   5293 
   5294 	if (val < __arraycount(wm_82580_rxpbs_table))
   5295 		rv = wm_82580_rxpbs_table[val];
   5296 
   5297 	return rv;
   5298 }
   5299 
   5300 /*
   5301  * wm_reset_phy:
   5302  *
   5303  *	generic PHY reset function.
   5304  *	Same as e1000_phy_hw_reset_generic()
   5305  */
   5306 static int
   5307 wm_reset_phy(struct wm_softc *sc)
   5308 {
   5309 	uint32_t reg;
   5310 	int rv;
   5311 
   5312 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5313 		device_xname(sc->sc_dev), __func__));
   5314 	if (wm_phy_resetisblocked(sc))
   5315 		return -1;
   5316 
   5317 	rv = sc->phy.acquire(sc);
   5318 	if (rv) {
   5319 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5320 		    __func__, rv);
   5321 		return rv;
   5322 	}
   5323 
   5324 	reg = CSR_READ(sc, WMREG_CTRL);
   5325 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5326 	CSR_WRITE_FLUSH(sc);
   5327 
   5328 	delay(sc->phy.reset_delay_us);
   5329 
   5330 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5331 	CSR_WRITE_FLUSH(sc);
   5332 
   5333 	delay(150);
   5334 
   5335 	sc->phy.release(sc);
   5336 
   5337 	wm_get_cfg_done(sc);
   5338 	wm_phy_post_reset(sc);
   5339 
   5340 	return 0;
   5341 }
   5342 
   5343 /*
   5344  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5345  *
   5346  * In i219, the descriptor rings must be emptied before resetting the HW
   5347  * or before changing the device state to D3 during runtime (runtime PM).
   5348  *
   5349  * Failure to do this will cause the HW to enter a unit hang state which can
   5350  * only be released by PCI reset on the device.
   5351  *
   5352  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5353  */
   5354 static void
   5355 wm_flush_desc_rings(struct wm_softc *sc)
   5356 {
   5357 	pcireg_t preg;
   5358 	uint32_t reg;
   5359 	struct wm_txqueue *txq;
   5360 	wiseman_txdesc_t *txd;
   5361 	int nexttx;
   5362 	uint32_t rctl;
   5363 
   5364 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5365 
   5366 	/* First, disable MULR fix in FEXTNVM11 */
   5367 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5368 	reg |= FEXTNVM11_DIS_MULRFIX;
   5369 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5370 
   5371 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5372 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5373 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5374 		return;
   5375 
   5376 	/*
   5377 	 * Remove all descriptors from the tx_ring.
   5378 	 *
   5379 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5380 	 * happens when the HW reads the regs. We assign the ring itself as
   5381 	 * the data of the next descriptor. We don't care about the data we are
   5382 	 * about to reset the HW.
   5383 	 */
   5384 #ifdef WM_DEBUG
   5385 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5386 #endif
   5387 	reg = CSR_READ(sc, WMREG_TCTL);
   5388 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5389 
   5390 	txq = &sc->sc_queue[0].wmq_txq;
   5391 	nexttx = txq->txq_next;
   5392 	txd = &txq->txq_descs[nexttx];
   5393 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5394 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5395 	txd->wtx_fields.wtxu_status = 0;
   5396 	txd->wtx_fields.wtxu_options = 0;
   5397 	txd->wtx_fields.wtxu_vlan = 0;
   5398 
   5399 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5400 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5401 
   5402 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5403 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5404 	CSR_WRITE_FLUSH(sc);
   5405 	delay(250);
   5406 
   5407 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5408 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5409 		return;
   5410 
   5411 	/*
   5412 	 * Mark all descriptors in the RX ring as consumed and disable the
   5413 	 * rx ring.
   5414 	 */
   5415 #ifdef WM_DEBUG
   5416 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5417 #endif
   5418 	rctl = CSR_READ(sc, WMREG_RCTL);
   5419 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5420 	CSR_WRITE_FLUSH(sc);
   5421 	delay(150);
   5422 
   5423 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5424 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5425 	reg &= 0xffffc000;
   5426 	/*
   5427 	 * Update thresholds: prefetch threshold to 31, host threshold
   5428 	 * to 1 and make sure the granularity is "descriptors" and not
   5429 	 * "cache lines"
   5430 	 */
   5431 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5432 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5433 
   5434 	/* Momentarily enable the RX ring for the changes to take effect */
   5435 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5436 	CSR_WRITE_FLUSH(sc);
   5437 	delay(150);
   5438 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5439 }
   5440 
   5441 /*
   5442  * wm_reset:
   5443  *
   5444  *	Reset the i82542 chip.
   5445  */
   5446 static void
   5447 wm_reset(struct wm_softc *sc)
   5448 {
   5449 	int phy_reset = 0;
   5450 	int i, error = 0;
   5451 	uint32_t reg;
   5452 	uint16_t kmreg;
   5453 	int rv;
   5454 
   5455 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5456 		device_xname(sc->sc_dev), __func__));
   5457 	KASSERT(sc->sc_type != 0);
   5458 
   5459 	/*
   5460 	 * Allocate on-chip memory according to the MTU size.
   5461 	 * The Packet Buffer Allocation register must be written
   5462 	 * before the chip is reset.
   5463 	 */
   5464 	switch (sc->sc_type) {
   5465 	case WM_T_82547:
   5466 	case WM_T_82547_2:
   5467 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5468 		    PBA_22K : PBA_30K;
   5469 		for (i = 0; i < sc->sc_nqueues; i++) {
   5470 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5471 			txq->txq_fifo_head = 0;
   5472 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5473 			txq->txq_fifo_size =
   5474 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5475 			txq->txq_fifo_stall = 0;
   5476 		}
   5477 		break;
   5478 	case WM_T_82571:
   5479 	case WM_T_82572:
   5480 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5481 	case WM_T_80003:
   5482 		sc->sc_pba = PBA_32K;
   5483 		break;
   5484 	case WM_T_82573:
   5485 		sc->sc_pba = PBA_12K;
   5486 		break;
   5487 	case WM_T_82574:
   5488 	case WM_T_82583:
   5489 		sc->sc_pba = PBA_20K;
   5490 		break;
   5491 	case WM_T_82576:
   5492 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5493 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5494 		break;
   5495 	case WM_T_82580:
   5496 	case WM_T_I350:
   5497 	case WM_T_I354:
   5498 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5499 		break;
   5500 	case WM_T_I210:
   5501 	case WM_T_I211:
   5502 		sc->sc_pba = PBA_34K;
   5503 		break;
   5504 	case WM_T_ICH8:
   5505 		/* Workaround for a bit corruption issue in FIFO memory */
   5506 		sc->sc_pba = PBA_8K;
   5507 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5508 		break;
   5509 	case WM_T_ICH9:
   5510 	case WM_T_ICH10:
   5511 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5512 		    PBA_14K : PBA_10K;
   5513 		break;
   5514 	case WM_T_PCH:
   5515 	case WM_T_PCH2:	/* XXX 14K? */
   5516 	case WM_T_PCH_LPT:
   5517 	case WM_T_PCH_SPT:
   5518 	case WM_T_PCH_CNP:
   5519 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5520 		    PBA_12K : PBA_26K;
   5521 		break;
   5522 	default:
   5523 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5524 		    PBA_40K : PBA_48K;
   5525 		break;
   5526 	}
   5527 	/*
   5528 	 * Only old or non-multiqueue devices have the PBA register
   5529 	 * XXX Need special handling for 82575.
   5530 	 */
   5531 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5532 	    || (sc->sc_type == WM_T_82575))
   5533 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5534 
   5535 	/* Prevent the PCI-E bus from sticking */
   5536 	if (sc->sc_flags & WM_F_PCIE) {
   5537 		int timeout = 800;
   5538 
   5539 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5540 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5541 
   5542 		while (timeout--) {
   5543 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5544 			    == 0)
   5545 				break;
   5546 			delay(100);
   5547 		}
   5548 		if (timeout == 0)
   5549 			device_printf(sc->sc_dev,
   5550 			    "failed to disable bus mastering\n");
   5551 	}
   5552 
   5553 	/* Set the completion timeout for interface */
   5554 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5555 	    || (sc->sc_type == WM_T_82580)
   5556 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5557 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5558 		wm_set_pcie_completion_timeout(sc);
   5559 
   5560 	/* Clear interrupt */
   5561 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5562 	if (wm_is_using_msix(sc)) {
   5563 		if (sc->sc_type != WM_T_82574) {
   5564 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5565 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5566 		} else
   5567 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5568 	}
   5569 
   5570 	/* Stop the transmit and receive processes. */
   5571 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5572 	sc->sc_rctl &= ~RCTL_EN;
   5573 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5574 	CSR_WRITE_FLUSH(sc);
   5575 
   5576 	/* XXX set_tbi_sbp_82543() */
   5577 
   5578 	delay(10*1000);
   5579 
   5580 	/* Must acquire the MDIO ownership before MAC reset */
   5581 	switch (sc->sc_type) {
   5582 	case WM_T_82573:
   5583 	case WM_T_82574:
   5584 	case WM_T_82583:
   5585 		error = wm_get_hw_semaphore_82573(sc);
   5586 		break;
   5587 	default:
   5588 		break;
   5589 	}
   5590 
   5591 	/*
   5592 	 * 82541 Errata 29? & 82547 Errata 28?
   5593 	 * See also the description about PHY_RST bit in CTRL register
   5594 	 * in 8254x_GBe_SDM.pdf.
   5595 	 */
   5596 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5597 		CSR_WRITE(sc, WMREG_CTRL,
   5598 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5599 		CSR_WRITE_FLUSH(sc);
   5600 		delay(5000);
   5601 	}
   5602 
   5603 	switch (sc->sc_type) {
   5604 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5605 	case WM_T_82541:
   5606 	case WM_T_82541_2:
   5607 	case WM_T_82547:
   5608 	case WM_T_82547_2:
   5609 		/*
   5610 		 * On some chipsets, a reset through a memory-mapped write
   5611 		 * cycle can cause the chip to reset before completing the
   5612 		 * write cycle. This causes major headache that can be avoided
   5613 		 * by issuing the reset via indirect register writes through
   5614 		 * I/O space.
   5615 		 *
   5616 		 * So, if we successfully mapped the I/O BAR at attach time,
   5617 		 * use that. Otherwise, try our luck with a memory-mapped
   5618 		 * reset.
   5619 		 */
   5620 		if (sc->sc_flags & WM_F_IOH_VALID)
   5621 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5622 		else
   5623 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5624 		break;
   5625 	case WM_T_82545_3:
   5626 	case WM_T_82546_3:
   5627 		/* Use the shadow control register on these chips. */
   5628 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5629 		break;
   5630 	case WM_T_80003:
   5631 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5632 		if (sc->phy.acquire(sc) != 0)
   5633 			break;
   5634 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5635 		sc->phy.release(sc);
   5636 		break;
   5637 	case WM_T_ICH8:
   5638 	case WM_T_ICH9:
   5639 	case WM_T_ICH10:
   5640 	case WM_T_PCH:
   5641 	case WM_T_PCH2:
   5642 	case WM_T_PCH_LPT:
   5643 	case WM_T_PCH_SPT:
   5644 	case WM_T_PCH_CNP:
   5645 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5646 		if (wm_phy_resetisblocked(sc) == false) {
   5647 			/*
   5648 			 * Gate automatic PHY configuration by hardware on
   5649 			 * non-managed 82579
   5650 			 */
   5651 			if ((sc->sc_type == WM_T_PCH2)
   5652 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5653 				== 0))
   5654 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5655 
   5656 			reg |= CTRL_PHY_RESET;
   5657 			phy_reset = 1;
   5658 		} else
   5659 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5660 		if (sc->phy.acquire(sc) != 0)
   5661 			break;
   5662 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5663 		/* Don't insert a completion barrier when reset */
   5664 		delay(20*1000);
   5665 		/*
   5666 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
   5667 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
   5668 		 * only. See also wm_get_swflag_ich8lan().
   5669 		 */
   5670 		mutex_exit(sc->sc_ich_phymtx);
   5671 		break;
   5672 	case WM_T_82580:
   5673 	case WM_T_I350:
   5674 	case WM_T_I354:
   5675 	case WM_T_I210:
   5676 	case WM_T_I211:
   5677 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5678 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5679 			CSR_WRITE_FLUSH(sc);
   5680 		delay(5000);
   5681 		break;
   5682 	case WM_T_82542_2_0:
   5683 	case WM_T_82542_2_1:
   5684 	case WM_T_82543:
   5685 	case WM_T_82540:
   5686 	case WM_T_82545:
   5687 	case WM_T_82546:
   5688 	case WM_T_82571:
   5689 	case WM_T_82572:
   5690 	case WM_T_82573:
   5691 	case WM_T_82574:
   5692 	case WM_T_82575:
   5693 	case WM_T_82576:
   5694 	case WM_T_82583:
   5695 	default:
   5696 		/* Everything else can safely use the documented method. */
   5697 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5698 		break;
   5699 	}
   5700 
   5701 	/* Must release the MDIO ownership after MAC reset */
   5702 	switch (sc->sc_type) {
   5703 	case WM_T_82573:
   5704 	case WM_T_82574:
   5705 	case WM_T_82583:
   5706 		if (error == 0)
   5707 			wm_put_hw_semaphore_82573(sc);
   5708 		break;
   5709 	default:
   5710 		break;
   5711 	}
   5712 
   5713 	/* Set Phy Config Counter to 50msec */
   5714 	if (sc->sc_type == WM_T_PCH2) {
   5715 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5716 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5717 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5718 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5719 	}
   5720 
   5721 	if (phy_reset != 0)
   5722 		wm_get_cfg_done(sc);
   5723 
   5724 	/* Reload EEPROM */
   5725 	switch (sc->sc_type) {
   5726 	case WM_T_82542_2_0:
   5727 	case WM_T_82542_2_1:
   5728 	case WM_T_82543:
   5729 	case WM_T_82544:
   5730 		delay(10);
   5731 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5732 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5733 		CSR_WRITE_FLUSH(sc);
   5734 		delay(2000);
   5735 		break;
   5736 	case WM_T_82540:
   5737 	case WM_T_82545:
   5738 	case WM_T_82545_3:
   5739 	case WM_T_82546:
   5740 	case WM_T_82546_3:
   5741 		delay(5*1000);
   5742 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5743 		break;
   5744 	case WM_T_82541:
   5745 	case WM_T_82541_2:
   5746 	case WM_T_82547:
   5747 	case WM_T_82547_2:
   5748 		delay(20000);
   5749 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5750 		break;
   5751 	case WM_T_82571:
   5752 	case WM_T_82572:
   5753 	case WM_T_82573:
   5754 	case WM_T_82574:
   5755 	case WM_T_82583:
   5756 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5757 			delay(10);
   5758 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5759 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5760 			CSR_WRITE_FLUSH(sc);
   5761 		}
   5762 		/* check EECD_EE_AUTORD */
   5763 		wm_get_auto_rd_done(sc);
   5764 		/*
   5765 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5766 		 * is set.
   5767 		 */
   5768 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5769 		    || (sc->sc_type == WM_T_82583))
   5770 			delay(25*1000);
   5771 		break;
   5772 	case WM_T_82575:
   5773 	case WM_T_82576:
   5774 	case WM_T_82580:
   5775 	case WM_T_I350:
   5776 	case WM_T_I354:
   5777 	case WM_T_I210:
   5778 	case WM_T_I211:
   5779 	case WM_T_80003:
   5780 		/* check EECD_EE_AUTORD */
   5781 		wm_get_auto_rd_done(sc);
   5782 		break;
   5783 	case WM_T_ICH8:
   5784 	case WM_T_ICH9:
   5785 	case WM_T_ICH10:
   5786 	case WM_T_PCH:
   5787 	case WM_T_PCH2:
   5788 	case WM_T_PCH_LPT:
   5789 	case WM_T_PCH_SPT:
   5790 	case WM_T_PCH_CNP:
   5791 		break;
   5792 	default:
   5793 		panic("%s: unknown type\n", __func__);
   5794 	}
   5795 
   5796 	/* Check whether EEPROM is present or not */
   5797 	switch (sc->sc_type) {
   5798 	case WM_T_82575:
   5799 	case WM_T_82576:
   5800 	case WM_T_82580:
   5801 	case WM_T_I350:
   5802 	case WM_T_I354:
   5803 	case WM_T_ICH8:
   5804 	case WM_T_ICH9:
   5805 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5806 			/* Not found */
   5807 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5808 			if (sc->sc_type == WM_T_82575)
   5809 				wm_reset_init_script_82575(sc);
   5810 		}
   5811 		break;
   5812 	default:
   5813 		break;
   5814 	}
   5815 
   5816 	if (phy_reset != 0)
   5817 		wm_phy_post_reset(sc);
   5818 
   5819 	if ((sc->sc_type == WM_T_82580)
   5820 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5821 		/* Clear global device reset status bit */
   5822 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5823 	}
   5824 
   5825 	/* Clear any pending interrupt events. */
   5826 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5827 	reg = CSR_READ(sc, WMREG_ICR);
   5828 	if (wm_is_using_msix(sc)) {
   5829 		if (sc->sc_type != WM_T_82574) {
   5830 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5831 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5832 		} else
   5833 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5834 	}
   5835 
   5836 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5837 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5838 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5839 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5840 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5841 		reg |= KABGTXD_BGSQLBIAS;
   5842 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5843 	}
   5844 
   5845 	/* Reload sc_ctrl */
   5846 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5847 
   5848 	wm_set_eee(sc);
   5849 
   5850 	/*
   5851 	 * For PCH, this write will make sure that any noise will be detected
   5852 	 * as a CRC error and be dropped rather than show up as a bad packet
   5853 	 * to the DMA engine
   5854 	 */
   5855 	if (sc->sc_type == WM_T_PCH)
   5856 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5857 
   5858 	if (sc->sc_type >= WM_T_82544)
   5859 		CSR_WRITE(sc, WMREG_WUC, 0);
   5860 
   5861 	if (sc->sc_type < WM_T_82575)
   5862 		wm_disable_aspm(sc); /* Workaround for some chips */
   5863 
   5864 	wm_reset_mdicnfg_82580(sc);
   5865 
   5866 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5867 		wm_pll_workaround_i210(sc);
   5868 
   5869 	if (sc->sc_type == WM_T_80003) {
   5870 		/* Default to TRUE to enable the MDIC W/A */
   5871 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5872 
   5873 		rv = wm_kmrn_readreg(sc,
   5874 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5875 		if (rv == 0) {
   5876 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5877 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5878 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5879 			else
   5880 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5881 		}
   5882 	}
   5883 }
   5884 
   5885 /*
   5886  * wm_add_rxbuf:
   5887  *
   5888  *	Add a receive buffer to the indiciated descriptor.
   5889  */
   5890 static int
   5891 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5892 {
   5893 	struct wm_softc *sc = rxq->rxq_sc;
   5894 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5895 	struct mbuf *m;
   5896 	int error;
   5897 
   5898 	KASSERT(mutex_owned(rxq->rxq_lock));
   5899 
   5900 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5901 	if (m == NULL)
   5902 		return ENOBUFS;
   5903 
   5904 	MCLGET(m, M_DONTWAIT);
   5905 	if ((m->m_flags & M_EXT) == 0) {
   5906 		m_freem(m);
   5907 		return ENOBUFS;
   5908 	}
   5909 
   5910 	if (rxs->rxs_mbuf != NULL)
   5911 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5912 
   5913 	rxs->rxs_mbuf = m;
   5914 
   5915 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5916 	/*
   5917 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5918 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5919 	 */
   5920 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5921 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5922 	if (error) {
   5923 		/* XXX XXX XXX */
   5924 		aprint_error_dev(sc->sc_dev,
   5925 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5926 		panic("wm_add_rxbuf");
   5927 	}
   5928 
   5929 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5930 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5931 
   5932 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5933 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5934 			wm_init_rxdesc(rxq, idx);
   5935 	} else
   5936 		wm_init_rxdesc(rxq, idx);
   5937 
   5938 	return 0;
   5939 }
   5940 
   5941 /*
   5942  * wm_rxdrain:
   5943  *
   5944  *	Drain the receive queue.
   5945  */
   5946 static void
   5947 wm_rxdrain(struct wm_rxqueue *rxq)
   5948 {
   5949 	struct wm_softc *sc = rxq->rxq_sc;
   5950 	struct wm_rxsoft *rxs;
   5951 	int i;
   5952 
   5953 	KASSERT(mutex_owned(rxq->rxq_lock));
   5954 
   5955 	for (i = 0; i < WM_NRXDESC; i++) {
   5956 		rxs = &rxq->rxq_soft[i];
   5957 		if (rxs->rxs_mbuf != NULL) {
   5958 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5959 			m_freem(rxs->rxs_mbuf);
   5960 			rxs->rxs_mbuf = NULL;
   5961 		}
   5962 	}
   5963 }
   5964 
   5965 /*
   5966  * Setup registers for RSS.
   5967  *
   5968  * XXX not yet VMDq support
   5969  */
   5970 static void
   5971 wm_init_rss(struct wm_softc *sc)
   5972 {
   5973 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5974 	int i;
   5975 
   5976 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5977 
   5978 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5979 		unsigned int qid, reta_ent;
   5980 
   5981 		qid  = i % sc->sc_nqueues;
   5982 		switch (sc->sc_type) {
   5983 		case WM_T_82574:
   5984 			reta_ent = __SHIFTIN(qid,
   5985 			    RETA_ENT_QINDEX_MASK_82574);
   5986 			break;
   5987 		case WM_T_82575:
   5988 			reta_ent = __SHIFTIN(qid,
   5989 			    RETA_ENT_QINDEX1_MASK_82575);
   5990 			break;
   5991 		default:
   5992 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5993 			break;
   5994 		}
   5995 
   5996 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5997 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5998 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5999 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   6000 	}
   6001 
   6002 	rss_getkey((uint8_t *)rss_key);
   6003 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   6004 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   6005 
   6006 	if (sc->sc_type == WM_T_82574)
   6007 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   6008 	else
   6009 		mrqc = MRQC_ENABLE_RSS_MQ;
   6010 
   6011 	/*
   6012 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   6013 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   6014 	 */
   6015 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   6016 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   6017 #if 0
   6018 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   6019 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   6020 #endif
   6021 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   6022 
   6023 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   6024 }
   6025 
   6026 /*
   6027  * Adjust TX and RX queue numbers which the system actulally uses.
   6028  *
   6029  * The numbers are affected by below parameters.
   6030  *     - The nubmer of hardware queues
   6031  *     - The number of MSI-X vectors (= "nvectors" argument)
   6032  *     - ncpu
   6033  */
   6034 static void
   6035 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   6036 {
   6037 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   6038 
   6039 	if (nvectors < 2) {
   6040 		sc->sc_nqueues = 1;
   6041 		return;
   6042 	}
   6043 
   6044 	switch (sc->sc_type) {
   6045 	case WM_T_82572:
   6046 		hw_ntxqueues = 2;
   6047 		hw_nrxqueues = 2;
   6048 		break;
   6049 	case WM_T_82574:
   6050 		hw_ntxqueues = 2;
   6051 		hw_nrxqueues = 2;
   6052 		break;
   6053 	case WM_T_82575:
   6054 		hw_ntxqueues = 4;
   6055 		hw_nrxqueues = 4;
   6056 		break;
   6057 	case WM_T_82576:
   6058 		hw_ntxqueues = 16;
   6059 		hw_nrxqueues = 16;
   6060 		break;
   6061 	case WM_T_82580:
   6062 	case WM_T_I350:
   6063 	case WM_T_I354:
   6064 		hw_ntxqueues = 8;
   6065 		hw_nrxqueues = 8;
   6066 		break;
   6067 	case WM_T_I210:
   6068 		hw_ntxqueues = 4;
   6069 		hw_nrxqueues = 4;
   6070 		break;
   6071 	case WM_T_I211:
   6072 		hw_ntxqueues = 2;
   6073 		hw_nrxqueues = 2;
   6074 		break;
   6075 		/*
   6076 		 * The below Ethernet controllers do not support MSI-X;
   6077 		 * this driver doesn't let them use multiqueue.
   6078 		 *     - WM_T_80003
   6079 		 *     - WM_T_ICH8
   6080 		 *     - WM_T_ICH9
   6081 		 *     - WM_T_ICH10
   6082 		 *     - WM_T_PCH
   6083 		 *     - WM_T_PCH2
   6084 		 *     - WM_T_PCH_LPT
   6085 		 */
   6086 	default:
   6087 		hw_ntxqueues = 1;
   6088 		hw_nrxqueues = 1;
   6089 		break;
   6090 	}
   6091 
   6092 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6093 
   6094 	/*
   6095 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6096 	 * the number of queues used actually.
   6097 	 */
   6098 	if (nvectors < hw_nqueues + 1)
   6099 		sc->sc_nqueues = nvectors - 1;
   6100 	else
   6101 		sc->sc_nqueues = hw_nqueues;
   6102 
   6103 	/*
   6104 	 * As queues more than CPUs cannot improve scaling, we limit
   6105 	 * the number of queues used actually.
   6106 	 */
   6107 	if (ncpu < sc->sc_nqueues)
   6108 		sc->sc_nqueues = ncpu;
   6109 }
   6110 
   6111 static inline bool
   6112 wm_is_using_msix(struct wm_softc *sc)
   6113 {
   6114 
   6115 	return (sc->sc_nintrs > 1);
   6116 }
   6117 
   6118 static inline bool
   6119 wm_is_using_multiqueue(struct wm_softc *sc)
   6120 {
   6121 
   6122 	return (sc->sc_nqueues > 1);
   6123 }
   6124 
   6125 static int
   6126 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6127 {
   6128 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6129 
   6130 	wmq->wmq_id = qidx;
   6131 	wmq->wmq_intr_idx = intr_idx;
   6132 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6133 	    wm_handle_queue, wmq);
   6134 	if (wmq->wmq_si != NULL)
   6135 		return 0;
   6136 
   6137 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6138 	    wmq->wmq_id);
   6139 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6140 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6141 	return ENOMEM;
   6142 }
   6143 
   6144 /*
   6145  * Both single interrupt MSI and INTx can use this function.
   6146  */
   6147 static int
   6148 wm_setup_legacy(struct wm_softc *sc)
   6149 {
   6150 	pci_chipset_tag_t pc = sc->sc_pc;
   6151 	const char *intrstr = NULL;
   6152 	char intrbuf[PCI_INTRSTR_LEN];
   6153 	int error;
   6154 
   6155 	error = wm_alloc_txrx_queues(sc);
   6156 	if (error) {
   6157 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6158 		    error);
   6159 		return ENOMEM;
   6160 	}
   6161 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6162 	    sizeof(intrbuf));
   6163 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6164 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6165 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6166 	if (sc->sc_ihs[0] == NULL) {
   6167 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6168 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6169 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6170 		return ENOMEM;
   6171 	}
   6172 
   6173 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6174 	sc->sc_nintrs = 1;
   6175 
   6176 	return wm_softint_establish_queue(sc, 0, 0);
   6177 }
   6178 
   6179 static int
   6180 wm_setup_msix(struct wm_softc *sc)
   6181 {
   6182 	void *vih;
   6183 	kcpuset_t *affinity;
   6184 	int qidx, error, intr_idx, txrx_established;
   6185 	pci_chipset_tag_t pc = sc->sc_pc;
   6186 	const char *intrstr = NULL;
   6187 	char intrbuf[PCI_INTRSTR_LEN];
   6188 	char intr_xname[INTRDEVNAMEBUF];
   6189 
   6190 	if (sc->sc_nqueues < ncpu) {
   6191 		/*
   6192 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6193 		 * interrupts start from CPU#1.
   6194 		 */
   6195 		sc->sc_affinity_offset = 1;
   6196 	} else {
   6197 		/*
   6198 		 * In this case, this device use all CPUs. So, we unify
   6199 		 * affinitied cpu_index to msix vector number for readability.
   6200 		 */
   6201 		sc->sc_affinity_offset = 0;
   6202 	}
   6203 
   6204 	error = wm_alloc_txrx_queues(sc);
   6205 	if (error) {
   6206 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6207 		    error);
   6208 		return ENOMEM;
   6209 	}
   6210 
   6211 	kcpuset_create(&affinity, false);
   6212 	intr_idx = 0;
   6213 
   6214 	/*
   6215 	 * TX and RX
   6216 	 */
   6217 	txrx_established = 0;
   6218 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6219 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6220 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6221 
   6222 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6223 		    sizeof(intrbuf));
   6224 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6225 		    PCI_INTR_MPSAFE, true);
   6226 		memset(intr_xname, 0, sizeof(intr_xname));
   6227 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6228 		    device_xname(sc->sc_dev), qidx);
   6229 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6230 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6231 		if (vih == NULL) {
   6232 			aprint_error_dev(sc->sc_dev,
   6233 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6234 			    intrstr ? " at " : "",
   6235 			    intrstr ? intrstr : "");
   6236 
   6237 			goto fail;
   6238 		}
   6239 		kcpuset_zero(affinity);
   6240 		/* Round-robin affinity */
   6241 		kcpuset_set(affinity, affinity_to);
   6242 		error = interrupt_distribute(vih, affinity, NULL);
   6243 		if (error == 0) {
   6244 			aprint_normal_dev(sc->sc_dev,
   6245 			    "for TX and RX interrupting at %s affinity to %u\n",
   6246 			    intrstr, affinity_to);
   6247 		} else {
   6248 			aprint_normal_dev(sc->sc_dev,
   6249 			    "for TX and RX interrupting at %s\n", intrstr);
   6250 		}
   6251 		sc->sc_ihs[intr_idx] = vih;
   6252 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6253 			goto fail;
   6254 		txrx_established++;
   6255 		intr_idx++;
   6256 	}
   6257 
   6258 	/* LINK */
   6259 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6260 	    sizeof(intrbuf));
   6261 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6262 	memset(intr_xname, 0, sizeof(intr_xname));
   6263 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6264 	    device_xname(sc->sc_dev));
   6265 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6266 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6267 	if (vih == NULL) {
   6268 		aprint_error_dev(sc->sc_dev,
   6269 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6270 		    intrstr ? " at " : "",
   6271 		    intrstr ? intrstr : "");
   6272 
   6273 		goto fail;
   6274 	}
   6275 	/* Keep default affinity to LINK interrupt */
   6276 	aprint_normal_dev(sc->sc_dev,
   6277 	    "for LINK interrupting at %s\n", intrstr);
   6278 	sc->sc_ihs[intr_idx] = vih;
   6279 	sc->sc_link_intr_idx = intr_idx;
   6280 
   6281 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6282 	kcpuset_destroy(affinity);
   6283 	return 0;
   6284 
   6285 fail:
   6286 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6287 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6288 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6289 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6290 	}
   6291 
   6292 	kcpuset_destroy(affinity);
   6293 	return ENOMEM;
   6294 }
   6295 
   6296 static void
   6297 wm_unset_stopping_flags(struct wm_softc *sc)
   6298 {
   6299 	int i;
   6300 
   6301 	KASSERT(mutex_owned(sc->sc_core_lock));
   6302 
   6303 	/* Must unset stopping flags in ascending order. */
   6304 	for (i = 0; i < sc->sc_nqueues; i++) {
   6305 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6306 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6307 
   6308 		mutex_enter(txq->txq_lock);
   6309 		txq->txq_stopping = false;
   6310 		mutex_exit(txq->txq_lock);
   6311 
   6312 		mutex_enter(rxq->rxq_lock);
   6313 		rxq->rxq_stopping = false;
   6314 		mutex_exit(rxq->rxq_lock);
   6315 	}
   6316 
   6317 	sc->sc_core_stopping = false;
   6318 }
   6319 
   6320 static void
   6321 wm_set_stopping_flags(struct wm_softc *sc)
   6322 {
   6323 	int i;
   6324 
   6325 	KASSERT(mutex_owned(sc->sc_core_lock));
   6326 
   6327 	sc->sc_core_stopping = true;
   6328 
   6329 	/* Must set stopping flags in ascending order. */
   6330 	for (i = 0; i < sc->sc_nqueues; i++) {
   6331 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6332 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6333 
   6334 		mutex_enter(rxq->rxq_lock);
   6335 		rxq->rxq_stopping = true;
   6336 		mutex_exit(rxq->rxq_lock);
   6337 
   6338 		mutex_enter(txq->txq_lock);
   6339 		txq->txq_stopping = true;
   6340 		mutex_exit(txq->txq_lock);
   6341 	}
   6342 }
   6343 
   6344 /*
   6345  * Write interrupt interval value to ITR or EITR
   6346  */
   6347 static void
   6348 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6349 {
   6350 
   6351 	if (!wmq->wmq_set_itr)
   6352 		return;
   6353 
   6354 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6355 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6356 
   6357 		/*
   6358 		 * 82575 doesn't have CNT_INGR field.
   6359 		 * So, overwrite counter field by software.
   6360 		 */
   6361 		if (sc->sc_type == WM_T_82575)
   6362 			eitr |= __SHIFTIN(wmq->wmq_itr,
   6363 			    EITR_COUNTER_MASK_82575);
   6364 		else
   6365 			eitr |= EITR_CNT_INGR;
   6366 
   6367 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6368 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6369 		/*
   6370 		 * 82574 has both ITR and EITR. SET EITR when we use
   6371 		 * the multi queue function with MSI-X.
   6372 		 */
   6373 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6374 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6375 	} else {
   6376 		KASSERT(wmq->wmq_id == 0);
   6377 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6378 	}
   6379 
   6380 	wmq->wmq_set_itr = false;
   6381 }
   6382 
   6383 /*
   6384  * TODO
   6385  * Below dynamic calculation of itr is almost the same as Linux igb,
   6386  * however it does not fit to wm(4). So, we will have been disable AIM
   6387  * until we will find appropriate calculation of itr.
   6388  */
   6389 /*
   6390  * Calculate interrupt interval value to be going to write register in
   6391  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6392  */
   6393 static void
   6394 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6395 {
   6396 #ifdef NOTYET
   6397 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6398 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6399 	uint32_t avg_size = 0;
   6400 	uint32_t new_itr;
   6401 
   6402 	if (rxq->rxq_packets)
   6403 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6404 	if (txq->txq_packets)
   6405 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6406 
   6407 	if (avg_size == 0) {
   6408 		new_itr = 450; /* restore default value */
   6409 		goto out;
   6410 	}
   6411 
   6412 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6413 	avg_size += 24;
   6414 
   6415 	/* Don't starve jumbo frames */
   6416 	avg_size = uimin(avg_size, 3000);
   6417 
   6418 	/* Give a little boost to mid-size frames */
   6419 	if ((avg_size > 300) && (avg_size < 1200))
   6420 		new_itr = avg_size / 3;
   6421 	else
   6422 		new_itr = avg_size / 2;
   6423 
   6424 out:
   6425 	/*
   6426 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6427 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6428 	 */
   6429 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6430 		new_itr *= 4;
   6431 
   6432 	if (new_itr != wmq->wmq_itr) {
   6433 		wmq->wmq_itr = new_itr;
   6434 		wmq->wmq_set_itr = true;
   6435 	} else
   6436 		wmq->wmq_set_itr = false;
   6437 
   6438 	rxq->rxq_packets = 0;
   6439 	rxq->rxq_bytes = 0;
   6440 	txq->txq_packets = 0;
   6441 	txq->txq_bytes = 0;
   6442 #endif
   6443 }
   6444 
   6445 static void
   6446 wm_init_sysctls(struct wm_softc *sc)
   6447 {
   6448 	struct sysctllog **log;
   6449 	const struct sysctlnode *rnode, *qnode, *cnode;
   6450 	int i, rv;
   6451 	const char *dvname;
   6452 
   6453 	log = &sc->sc_sysctllog;
   6454 	dvname = device_xname(sc->sc_dev);
   6455 
   6456 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6457 	    0, CTLTYPE_NODE, dvname,
   6458 	    SYSCTL_DESCR("wm information and settings"),
   6459 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6460 	if (rv != 0)
   6461 		goto err;
   6462 
   6463 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6464 	    CTLTYPE_BOOL, "txrx_workqueue",
   6465 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   6466 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6467 	if (rv != 0)
   6468 		goto teardown;
   6469 
   6470 	for (i = 0; i < sc->sc_nqueues; i++) {
   6471 		struct wm_queue *wmq = &sc->sc_queue[i];
   6472 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6473 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6474 
   6475 		snprintf(sc->sc_queue[i].sysctlname,
   6476 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6477 
   6478 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6479 		    0, CTLTYPE_NODE,
   6480 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6481 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6482 			break;
   6483 
   6484 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6485 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6486 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6487 		    NULL, 0, &txq->txq_free,
   6488 		    0, CTL_CREATE, CTL_EOL) != 0)
   6489 			break;
   6490 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6491 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6492 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6493 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6494 		    0, CTL_CREATE, CTL_EOL) != 0)
   6495 			break;
   6496 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6497 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6498 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6499 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6500 		    0, CTL_CREATE, CTL_EOL) != 0)
   6501 			break;
   6502 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6503 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6504 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6505 		    NULL, 0, &txq->txq_next,
   6506 		    0, CTL_CREATE, CTL_EOL) != 0)
   6507 			break;
   6508 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6509 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6510 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6511 		    NULL, 0, &txq->txq_sfree,
   6512 		    0, CTL_CREATE, CTL_EOL) != 0)
   6513 			break;
   6514 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6515 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6516 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6517 		    NULL, 0, &txq->txq_snext,
   6518 		    0, CTL_CREATE, CTL_EOL) != 0)
   6519 			break;
   6520 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6521 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6522 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6523 		    NULL, 0, &txq->txq_sdirty,
   6524 		    0, CTL_CREATE, CTL_EOL) != 0)
   6525 			break;
   6526 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6527 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6528 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6529 		    NULL, 0, &txq->txq_flags,
   6530 		    0, CTL_CREATE, CTL_EOL) != 0)
   6531 			break;
   6532 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6533 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6534 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6535 		    NULL, 0, &txq->txq_stopping,
   6536 		    0, CTL_CREATE, CTL_EOL) != 0)
   6537 			break;
   6538 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6539 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6540 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6541 		    NULL, 0, &txq->txq_sending,
   6542 		    0, CTL_CREATE, CTL_EOL) != 0)
   6543 			break;
   6544 
   6545 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6546 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6547 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6548 		    NULL, 0, &rxq->rxq_ptr,
   6549 		    0, CTL_CREATE, CTL_EOL) != 0)
   6550 			break;
   6551 	}
   6552 
   6553 #ifdef WM_DEBUG
   6554 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6555 	    CTLTYPE_INT, "debug_flags",
   6556 	    SYSCTL_DESCR(
   6557 		    "Debug flags:\n"	\
   6558 		    "\t0x01 LINK\n"	\
   6559 		    "\t0x02 TX\n"	\
   6560 		    "\t0x04 RX\n"	\
   6561 		    "\t0x08 GMII\n"	\
   6562 		    "\t0x10 MANAGE\n"	\
   6563 		    "\t0x20 NVM\n"	\
   6564 		    "\t0x40 INIT\n"	\
   6565 		    "\t0x80 LOCK"),
   6566 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6567 	if (rv != 0)
   6568 		goto teardown;
   6569 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6570 	    CTLTYPE_BOOL, "trigger_reset",
   6571 	    SYSCTL_DESCR("Trigger an interface reset"),
   6572 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6573 	if (rv != 0)
   6574 		goto teardown;
   6575 #endif
   6576 
   6577 	return;
   6578 
   6579 teardown:
   6580 	sysctl_teardown(log);
   6581 err:
   6582 	sc->sc_sysctllog = NULL;
   6583 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6584 	    __func__, rv);
   6585 }
   6586 
   6587 static void
   6588 wm_update_stats(struct wm_softc *sc)
   6589 {
   6590 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6591 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   6592 	    cexterr;
   6593 
   6594 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   6595 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   6596 	mpc = CSR_READ(sc, WMREG_MPC);
   6597 	colc = CSR_READ(sc, WMREG_COLC);
   6598 	sec = CSR_READ(sc, WMREG_SEC);
   6599 	rlec = CSR_READ(sc, WMREG_RLEC);
   6600 
   6601 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   6602 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   6603 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   6604 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   6605 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   6606 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   6607 
   6608 	if (sc->sc_type >= WM_T_82543) {
   6609 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   6610 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   6611 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   6612 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   6613 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
   6614 			cexterr = CSR_READ(sc, WMREG_CEXTERR);
   6615 			WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   6616 		} else {
   6617 			cexterr = 0;
   6618 			/* Excessive collision + Link down */
   6619 			WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
   6620 			    CSR_READ(sc, WMREG_HTDPMC));
   6621 		}
   6622 
   6623 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   6624 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   6625 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6626 			WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
   6627 			    CSR_READ(sc, WMREG_TSCTFC));
   6628 		else {
   6629 			WM_EVCNT_ADD(&sc->sc_ev_cbrdpc,
   6630 			    CSR_READ(sc, WMREG_CBRDPC));
   6631 			WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
   6632 			    CSR_READ(sc, WMREG_CBRMPC));
   6633 		}
   6634 	} else
   6635 		algnerrc = rxerrc = cexterr = 0;
   6636 
   6637 	if (sc->sc_type >= WM_T_82542_2_1) {
   6638 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   6639 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   6640 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   6641 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   6642 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   6643 	}
   6644 
   6645 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   6646 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   6647 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   6648 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   6649 
   6650 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6651 		WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
   6652 	}
   6653 
   6654 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   6655 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   6656 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   6657 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   6658 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   6659 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   6660 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   6661 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   6662 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   6663 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   6664 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   6665 
   6666 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   6667 	    CSR_READ(sc, WMREG_GORCL) +
   6668 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
   6669 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   6670 	    CSR_READ(sc, WMREG_GOTCL) +
   6671 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
   6672 
   6673 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   6674 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   6675 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   6676 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   6677 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   6678 
   6679 	if (sc->sc_type >= WM_T_82540) {
   6680 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   6681 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   6682 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   6683 	}
   6684 
   6685 	/*
   6686 	 * The TOR(L) register includes:
   6687 	 *  - Error
   6688 	 *  - Flow control
   6689 	 *  - Broadcast rejected (This note is described in 82574 and newer
   6690 	 *    datasheets. What does "broadcast rejected" mean?)
   6691 	 */
   6692 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   6693 	    CSR_READ(sc, WMREG_TORL) +
   6694 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
   6695 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   6696 	    CSR_READ(sc, WMREG_TOTL) +
   6697 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
   6698 
   6699 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   6700 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   6701 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   6702 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   6703 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   6704 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   6705 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   6706 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   6707 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   6708 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   6709 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   6710 	if (sc->sc_type < WM_T_82575) {
   6711 		WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   6712 		WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   6713 		WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   6714 		WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC));
   6715 		WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   6716 		WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
   6717 		    CSR_READ(sc, WMREG_ICTXQMTC));
   6718 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
   6719 		    CSR_READ(sc, WMREG_ICRXDMTC));
   6720 		WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   6721 	} else if (!WM_IS_ICHPCH(sc)) {
   6722 		WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
   6723 		WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
   6724 		WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
   6725 		WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
   6726 		WM_EVCNT_ADD(&sc->sc_ev_hgptc,  CSR_READ(sc, WMREG_HGPTC));
   6727 		WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
   6728 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
   6729 		WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
   6730 
   6731 		WM_EVCNT_ADD(&sc->sc_ev_hgorc,
   6732 		    CSR_READ(sc, WMREG_HGORCL) +
   6733 		    ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
   6734 		WM_EVCNT_ADD(&sc->sc_ev_hgotc,
   6735 		    CSR_READ(sc, WMREG_HGOTCL) +
   6736 		    ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
   6737 		WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
   6738 	}
   6739 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6740 		WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
   6741 		WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
   6742 		if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
   6743 			WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
   6744 			    CSR_READ(sc, WMREG_B2OGPRC));
   6745 			WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
   6746 			    CSR_READ(sc, WMREG_O2BSPC));
   6747 			WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
   6748 			    CSR_READ(sc, WMREG_B2OSPC));
   6749 			WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
   6750 			    CSR_READ(sc, WMREG_O2BGPTC));
   6751 		}
   6752 		WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
   6753 		WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
   6754 	}
   6755 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   6756 	if_statadd_ref(nsr, if_collisions, colc);
   6757 	if_statadd_ref(nsr, if_ierrors,
   6758 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   6759 	/*
   6760 	 * WMREG_RNBC is incremented when there are no available buffers in
   6761 	 * host memory. It does not mean the number of dropped packets, because
   6762 	 * an Ethernet controller can receive packets in such case if there is
   6763 	 * space in the phy's FIFO.
   6764 	 *
   6765 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   6766 	 * own EVCNT instead of if_iqdrops.
   6767 	 */
   6768 	if_statadd_ref(nsr, if_iqdrops, mpc);
   6769 	IF_STAT_PUTREF(ifp);
   6770 }
   6771 
   6772 void
   6773 wm_clear_evcnt(struct wm_softc *sc)
   6774 {
   6775 #ifdef WM_EVENT_COUNTERS
   6776 	int i;
   6777 
   6778 	/* RX queues */
   6779 	for (i = 0; i < sc->sc_nqueues; i++) {
   6780 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6781 
   6782 		WM_Q_EVCNT_STORE(rxq, intr, 0);
   6783 		WM_Q_EVCNT_STORE(rxq, defer, 0);
   6784 		WM_Q_EVCNT_STORE(rxq, ipsum, 0);
   6785 		WM_Q_EVCNT_STORE(rxq, tusum, 0);
   6786 	}
   6787 
   6788 	/* TX queues */
   6789 	for (i = 0; i < sc->sc_nqueues; i++) {
   6790 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6791 		int j;
   6792 
   6793 		WM_Q_EVCNT_STORE(txq, txsstall, 0);
   6794 		WM_Q_EVCNT_STORE(txq, txdstall, 0);
   6795 		WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
   6796 		WM_Q_EVCNT_STORE(txq, txdw, 0);
   6797 		WM_Q_EVCNT_STORE(txq, txqe, 0);
   6798 		WM_Q_EVCNT_STORE(txq, ipsum, 0);
   6799 		WM_Q_EVCNT_STORE(txq, tusum, 0);
   6800 		WM_Q_EVCNT_STORE(txq, tusum6, 0);
   6801 		WM_Q_EVCNT_STORE(txq, tso, 0);
   6802 		WM_Q_EVCNT_STORE(txq, tso6, 0);
   6803 		WM_Q_EVCNT_STORE(txq, tsopain, 0);
   6804 
   6805 		for (j = 0; j < WM_NTXSEGS; j++)
   6806 			WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
   6807 
   6808 		WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
   6809 		WM_Q_EVCNT_STORE(txq, descdrop, 0);
   6810 		WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
   6811 		WM_Q_EVCNT_STORE(txq, defrag, 0);
   6812 		if (sc->sc_type <= WM_T_82544)
   6813 			WM_Q_EVCNT_STORE(txq, underrun, 0);
   6814 		WM_Q_EVCNT_STORE(txq, skipcontext, 0);
   6815 	}
   6816 
   6817 	/* Miscs */
   6818 	WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0);
   6819 
   6820 	WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0);
   6821 	WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0);
   6822 	WM_EVCNT_STORE(&sc->sc_ev_mpc, 0);
   6823 	WM_EVCNT_STORE(&sc->sc_ev_colc, 0);
   6824 	WM_EVCNT_STORE(&sc->sc_ev_sec, 0);
   6825 	WM_EVCNT_STORE(&sc->sc_ev_rlec, 0);
   6826 
   6827 	if (sc->sc_type >= WM_T_82543) {
   6828 		WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0);
   6829 		WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0);
   6830 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6831 			WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0);
   6832 		else
   6833 			WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0);
   6834 
   6835 		WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0);
   6836 		WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0);
   6837 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6838 			WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0);
   6839 		else {
   6840 			WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0);
   6841 			WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0);
   6842 		}
   6843 	}
   6844 
   6845 	if (sc->sc_type >= WM_T_82542_2_1) {
   6846 		WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0);
   6847 		WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0);
   6848 		WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0);
   6849 		WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0);
   6850 		WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0);
   6851 	}
   6852 
   6853 	WM_EVCNT_STORE(&sc->sc_ev_scc, 0);
   6854 	WM_EVCNT_STORE(&sc->sc_ev_ecol, 0);
   6855 	WM_EVCNT_STORE(&sc->sc_ev_mcc, 0);
   6856 	WM_EVCNT_STORE(&sc->sc_ev_latecol, 0);
   6857 
   6858 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   6859 		WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0);
   6860 
   6861 	WM_EVCNT_STORE(&sc->sc_ev_dc, 0);
   6862 	WM_EVCNT_STORE(&sc->sc_ev_prc64, 0);
   6863 	WM_EVCNT_STORE(&sc->sc_ev_prc127, 0);
   6864 	WM_EVCNT_STORE(&sc->sc_ev_prc255, 0);
   6865 	WM_EVCNT_STORE(&sc->sc_ev_prc511, 0);
   6866 	WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0);
   6867 	WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0);
   6868 	WM_EVCNT_STORE(&sc->sc_ev_gprc, 0);
   6869 	WM_EVCNT_STORE(&sc->sc_ev_bprc, 0);
   6870 	WM_EVCNT_STORE(&sc->sc_ev_mprc, 0);
   6871 	WM_EVCNT_STORE(&sc->sc_ev_gptc, 0);
   6872 	WM_EVCNT_STORE(&sc->sc_ev_gorc, 0);
   6873 	WM_EVCNT_STORE(&sc->sc_ev_gotc, 0);
   6874 	WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0);
   6875 	WM_EVCNT_STORE(&sc->sc_ev_ruc, 0);
   6876 	WM_EVCNT_STORE(&sc->sc_ev_rfc, 0);
   6877 	WM_EVCNT_STORE(&sc->sc_ev_roc, 0);
   6878 	WM_EVCNT_STORE(&sc->sc_ev_rjc, 0);
   6879 	if (sc->sc_type >= WM_T_82540) {
   6880 		WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0);
   6881 		WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0);
   6882 		WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0);
   6883 	}
   6884 	WM_EVCNT_STORE(&sc->sc_ev_tor, 0);
   6885 	WM_EVCNT_STORE(&sc->sc_ev_tot, 0);
   6886 	WM_EVCNT_STORE(&sc->sc_ev_tpr, 0);
   6887 	WM_EVCNT_STORE(&sc->sc_ev_tpt, 0);
   6888 	WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0);
   6889 	WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0);
   6890 	WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0);
   6891 	WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0);
   6892 	WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0);
   6893 	WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0);
   6894 	WM_EVCNT_STORE(&sc->sc_ev_mptc, 0);
   6895 	WM_EVCNT_STORE(&sc->sc_ev_bptc, 0);
   6896 	WM_EVCNT_STORE(&sc->sc_ev_iac, 0);
   6897 	if (sc->sc_type < WM_T_82575) {
   6898 		WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0);
   6899 		WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0);
   6900 		WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0);
   6901 		WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0);
   6902 		WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0);
   6903 		WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0);
   6904 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6905 		WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0);
   6906 	} else if (!WM_IS_ICHPCH(sc)) {
   6907 		WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0);
   6908 		WM_EVCNT_STORE(&sc->sc_ev_debug1, 0);
   6909 		WM_EVCNT_STORE(&sc->sc_ev_debug2, 0);
   6910 		WM_EVCNT_STORE(&sc->sc_ev_debug3, 0);
   6911 		WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0);
   6912 		WM_EVCNT_STORE(&sc->sc_ev_debug4, 0);
   6913 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6914 		WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0);
   6915 
   6916 		WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0);
   6917 		WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0);
   6918 		WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0);
   6919 	}
   6920 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6921 		WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0);
   6922 		WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0);
   6923 		WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0);
   6924 		WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0);
   6925 		WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0);
   6926 		WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0);
   6927 		WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0);
   6928 		WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0);
   6929 	}
   6930 #endif
   6931 }
   6932 
   6933 /*
   6934  * wm_init:		[ifnet interface function]
   6935  *
   6936  *	Initialize the interface.
   6937  */
   6938 static int
   6939 wm_init(struct ifnet *ifp)
   6940 {
   6941 	struct wm_softc *sc = ifp->if_softc;
   6942 	int ret;
   6943 
   6944 	KASSERT(IFNET_LOCKED(ifp));
   6945 
   6946 	if (sc->sc_dying)
   6947 		return ENXIO;
   6948 
   6949 	mutex_enter(sc->sc_core_lock);
   6950 	ret = wm_init_locked(ifp);
   6951 	mutex_exit(sc->sc_core_lock);
   6952 
   6953 	return ret;
   6954 }
   6955 
   6956 static int
   6957 wm_init_locked(struct ifnet *ifp)
   6958 {
   6959 	struct wm_softc *sc = ifp->if_softc;
   6960 	struct ethercom *ec = &sc->sc_ethercom;
   6961 	int i, j, trynum, error = 0;
   6962 	uint32_t reg, sfp_mask = 0;
   6963 
   6964 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6965 		device_xname(sc->sc_dev), __func__));
   6966 	KASSERT(IFNET_LOCKED(ifp));
   6967 	KASSERT(mutex_owned(sc->sc_core_lock));
   6968 
   6969 	/*
   6970 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6971 	 * There is a small but measurable benefit to avoiding the adjusment
   6972 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6973 	 * on such platforms.  One possibility is that the DMA itself is
   6974 	 * slightly more efficient if the front of the entire packet (instead
   6975 	 * of the front of the headers) is aligned.
   6976 	 *
   6977 	 * Note we must always set align_tweak to 0 if we are using
   6978 	 * jumbo frames.
   6979 	 */
   6980 #ifdef __NO_STRICT_ALIGNMENT
   6981 	sc->sc_align_tweak = 0;
   6982 #else
   6983 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6984 		sc->sc_align_tweak = 0;
   6985 	else
   6986 		sc->sc_align_tweak = 2;
   6987 #endif /* __NO_STRICT_ALIGNMENT */
   6988 
   6989 	/* Cancel any pending I/O. */
   6990 	wm_stop_locked(ifp, false, false);
   6991 
   6992 	/* Update statistics before reset */
   6993 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6994 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6995 
   6996 	/* >= PCH_SPT hardware workaround before reset. */
   6997 	if (sc->sc_type >= WM_T_PCH_SPT)
   6998 		wm_flush_desc_rings(sc);
   6999 
   7000 	/* Reset the chip to a known state. */
   7001 	wm_reset(sc);
   7002 
   7003 	/*
   7004 	 * AMT based hardware can now take control from firmware
   7005 	 * Do this after reset.
   7006 	 */
   7007 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   7008 		wm_get_hw_control(sc);
   7009 
   7010 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   7011 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   7012 		wm_legacy_irq_quirk_spt(sc);
   7013 
   7014 	/* Init hardware bits */
   7015 	wm_initialize_hardware_bits(sc);
   7016 
   7017 	/* Reset the PHY. */
   7018 	if (sc->sc_flags & WM_F_HAS_MII)
   7019 		wm_gmii_reset(sc);
   7020 
   7021 	if (sc->sc_type >= WM_T_ICH8) {
   7022 		reg = CSR_READ(sc, WMREG_GCR);
   7023 		/*
   7024 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   7025 		 * default after reset.
   7026 		 */
   7027 		if (sc->sc_type == WM_T_ICH8)
   7028 			reg |= GCR_NO_SNOOP_ALL;
   7029 		else
   7030 			reg &= ~GCR_NO_SNOOP_ALL;
   7031 		CSR_WRITE(sc, WMREG_GCR, reg);
   7032 	}
   7033 
   7034 	if ((sc->sc_type >= WM_T_ICH8)
   7035 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   7036 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   7037 
   7038 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7039 		reg |= CTRL_EXT_RO_DIS;
   7040 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7041 	}
   7042 
   7043 	/* Calculate (E)ITR value */
   7044 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   7045 		/*
   7046 		 * For NEWQUEUE's EITR (except for 82575).
   7047 		 * 82575's EITR should be set same throttling value as other
   7048 		 * old controllers' ITR because the interrupt/sec calculation
   7049 		 * is the same, that is, 1,000,000,000 / (N * 256).
   7050 		 *
   7051 		 * 82574's EITR should be set same throttling value as ITR.
   7052 		 *
   7053 		 * For N interrupts/sec, set this value to:
   7054 		 * 1,000,000 / N in contrast to ITR throttling value.
   7055 		 */
   7056 		sc->sc_itr_init = 450;
   7057 	} else if (sc->sc_type >= WM_T_82543) {
   7058 		/*
   7059 		 * Set up the interrupt throttling register (units of 256ns)
   7060 		 * Note that a footnote in Intel's documentation says this
   7061 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   7062 		 * or 10Mbit mode.  Empirically, it appears to be the case
   7063 		 * that that is also true for the 1024ns units of the other
   7064 		 * interrupt-related timer registers -- so, really, we ought
   7065 		 * to divide this value by 4 when the link speed is low.
   7066 		 *
   7067 		 * XXX implement this division at link speed change!
   7068 		 */
   7069 
   7070 		/*
   7071 		 * For N interrupts/sec, set this value to:
   7072 		 * 1,000,000,000 / (N * 256).  Note that we set the
   7073 		 * absolute and packet timer values to this value
   7074 		 * divided by 4 to get "simple timer" behavior.
   7075 		 */
   7076 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   7077 	}
   7078 
   7079 	error = wm_init_txrx_queues(sc);
   7080 	if (error)
   7081 		goto out;
   7082 
   7083 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   7084 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   7085 	    (sc->sc_type >= WM_T_82575))
   7086 		wm_serdes_power_up_link_82575(sc);
   7087 
   7088 	/* Clear out the VLAN table -- we don't use it (yet). */
   7089 	CSR_WRITE(sc, WMREG_VET, 0);
   7090 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   7091 		trynum = 10; /* Due to hw errata */
   7092 	else
   7093 		trynum = 1;
   7094 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   7095 		for (j = 0; j < trynum; j++)
   7096 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   7097 
   7098 	/*
   7099 	 * Set up flow-control parameters.
   7100 	 *
   7101 	 * XXX Values could probably stand some tuning.
   7102 	 */
   7103 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   7104 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   7105 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   7106 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   7107 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   7108 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   7109 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   7110 	}
   7111 
   7112 	sc->sc_fcrtl = FCRTL_DFLT;
   7113 	if (sc->sc_type < WM_T_82543) {
   7114 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   7115 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   7116 	} else {
   7117 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   7118 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   7119 	}
   7120 
   7121 	if (sc->sc_type == WM_T_80003)
   7122 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   7123 	else
   7124 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   7125 
   7126 	/* Writes the control register. */
   7127 	wm_set_vlan(sc);
   7128 
   7129 	if (sc->sc_flags & WM_F_HAS_MII) {
   7130 		uint16_t kmreg;
   7131 
   7132 		switch (sc->sc_type) {
   7133 		case WM_T_80003:
   7134 		case WM_T_ICH8:
   7135 		case WM_T_ICH9:
   7136 		case WM_T_ICH10:
   7137 		case WM_T_PCH:
   7138 		case WM_T_PCH2:
   7139 		case WM_T_PCH_LPT:
   7140 		case WM_T_PCH_SPT:
   7141 		case WM_T_PCH_CNP:
   7142 			/*
   7143 			 * Set the mac to wait the maximum time between each
   7144 			 * iteration and increase the max iterations when
   7145 			 * polling the phy; this fixes erroneous timeouts at
   7146 			 * 10Mbps.
   7147 			 */
   7148 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   7149 			    0xFFFF);
   7150 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7151 			    &kmreg);
   7152 			kmreg |= 0x3F;
   7153 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7154 			    kmreg);
   7155 			break;
   7156 		default:
   7157 			break;
   7158 		}
   7159 
   7160 		if (sc->sc_type == WM_T_80003) {
   7161 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7162 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   7163 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7164 
   7165 			/* Bypass RX and TX FIFOs */
   7166 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   7167 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   7168 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   7169 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   7170 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   7171 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   7172 		}
   7173 	}
   7174 #if 0
   7175 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   7176 #endif
   7177 
   7178 	/* Set up checksum offload parameters. */
   7179 	reg = CSR_READ(sc, WMREG_RXCSUM);
   7180 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   7181 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   7182 		reg |= RXCSUM_IPOFL;
   7183 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   7184 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   7185 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   7186 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   7187 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7188 
   7189 	/* Set registers about MSI-X */
   7190 	if (wm_is_using_msix(sc)) {
   7191 		uint32_t ivar, qintr_idx;
   7192 		struct wm_queue *wmq;
   7193 		unsigned int qid;
   7194 
   7195 		if (sc->sc_type == WM_T_82575) {
   7196 			/* Interrupt control */
   7197 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7198 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   7199 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7200 
   7201 			/* TX and RX */
   7202 			for (i = 0; i < sc->sc_nqueues; i++) {
   7203 				wmq = &sc->sc_queue[i];
   7204 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   7205 				    EITR_TX_QUEUE(wmq->wmq_id)
   7206 				    | EITR_RX_QUEUE(wmq->wmq_id));
   7207 			}
   7208 			/* Link status */
   7209 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   7210 			    EITR_OTHER);
   7211 		} else if (sc->sc_type == WM_T_82574) {
   7212 			/* Interrupt control */
   7213 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7214 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   7215 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7216 
   7217 			/*
   7218 			 * Work around issue with spurious interrupts
   7219 			 * in MSI-X mode.
   7220 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   7221 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   7222 			 */
   7223 			reg = CSR_READ(sc, WMREG_RFCTL);
   7224 			reg |= WMREG_RFCTL_ACKDIS;
   7225 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   7226 
   7227 			ivar = 0;
   7228 			/* TX and RX */
   7229 			for (i = 0; i < sc->sc_nqueues; i++) {
   7230 				wmq = &sc->sc_queue[i];
   7231 				qid = wmq->wmq_id;
   7232 				qintr_idx = wmq->wmq_intr_idx;
   7233 
   7234 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7235 				    IVAR_TX_MASK_Q_82574(qid));
   7236 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7237 				    IVAR_RX_MASK_Q_82574(qid));
   7238 			}
   7239 			/* Link status */
   7240 			ivar |= __SHIFTIN((IVAR_VALID_82574
   7241 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   7242 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   7243 		} else {
   7244 			/* Interrupt control */
   7245 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   7246 			    | GPIE_EIAME | GPIE_PBA);
   7247 
   7248 			switch (sc->sc_type) {
   7249 			case WM_T_82580:
   7250 			case WM_T_I350:
   7251 			case WM_T_I354:
   7252 			case WM_T_I210:
   7253 			case WM_T_I211:
   7254 				/* TX and RX */
   7255 				for (i = 0; i < sc->sc_nqueues; i++) {
   7256 					wmq = &sc->sc_queue[i];
   7257 					qid = wmq->wmq_id;
   7258 					qintr_idx = wmq->wmq_intr_idx;
   7259 
   7260 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   7261 					ivar &= ~IVAR_TX_MASK_Q(qid);
   7262 					ivar |= __SHIFTIN((qintr_idx
   7263 						| IVAR_VALID),
   7264 					    IVAR_TX_MASK_Q(qid));
   7265 					ivar &= ~IVAR_RX_MASK_Q(qid);
   7266 					ivar |= __SHIFTIN((qintr_idx
   7267 						| IVAR_VALID),
   7268 					    IVAR_RX_MASK_Q(qid));
   7269 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   7270 				}
   7271 				break;
   7272 			case WM_T_82576:
   7273 				/* TX and RX */
   7274 				for (i = 0; i < sc->sc_nqueues; i++) {
   7275 					wmq = &sc->sc_queue[i];
   7276 					qid = wmq->wmq_id;
   7277 					qintr_idx = wmq->wmq_intr_idx;
   7278 
   7279 					ivar = CSR_READ(sc,
   7280 					    WMREG_IVAR_Q_82576(qid));
   7281 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   7282 					ivar |= __SHIFTIN((qintr_idx
   7283 						| IVAR_VALID),
   7284 					    IVAR_TX_MASK_Q_82576(qid));
   7285 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   7286 					ivar |= __SHIFTIN((qintr_idx
   7287 						| IVAR_VALID),
   7288 					    IVAR_RX_MASK_Q_82576(qid));
   7289 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   7290 					    ivar);
   7291 				}
   7292 				break;
   7293 			default:
   7294 				break;
   7295 			}
   7296 
   7297 			/* Link status */
   7298 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   7299 			    IVAR_MISC_OTHER);
   7300 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   7301 		}
   7302 
   7303 		if (wm_is_using_multiqueue(sc)) {
   7304 			wm_init_rss(sc);
   7305 
   7306 			/*
   7307 			** NOTE: Receive Full-Packet Checksum Offload
   7308 			** is mutually exclusive with Multiqueue. However
   7309 			** this is not the same as TCP/IP checksums which
   7310 			** still work.
   7311 			*/
   7312 			reg = CSR_READ(sc, WMREG_RXCSUM);
   7313 			reg |= RXCSUM_PCSD;
   7314 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7315 		}
   7316 	}
   7317 
   7318 	/* Set up the interrupt registers. */
   7319 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7320 
   7321 	/* Enable SFP module insertion interrupt if it's required */
   7322 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   7323 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   7324 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7325 		sfp_mask = ICR_GPI(0);
   7326 	}
   7327 
   7328 	if (wm_is_using_msix(sc)) {
   7329 		uint32_t mask;
   7330 		struct wm_queue *wmq;
   7331 
   7332 		switch (sc->sc_type) {
   7333 		case WM_T_82574:
   7334 			mask = 0;
   7335 			for (i = 0; i < sc->sc_nqueues; i++) {
   7336 				wmq = &sc->sc_queue[i];
   7337 				mask |= ICR_TXQ(wmq->wmq_id);
   7338 				mask |= ICR_RXQ(wmq->wmq_id);
   7339 			}
   7340 			mask |= ICR_OTHER;
   7341 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   7342 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   7343 			break;
   7344 		default:
   7345 			if (sc->sc_type == WM_T_82575) {
   7346 				mask = 0;
   7347 				for (i = 0; i < sc->sc_nqueues; i++) {
   7348 					wmq = &sc->sc_queue[i];
   7349 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   7350 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   7351 				}
   7352 				mask |= EITR_OTHER;
   7353 			} else {
   7354 				mask = 0;
   7355 				for (i = 0; i < sc->sc_nqueues; i++) {
   7356 					wmq = &sc->sc_queue[i];
   7357 					mask |= 1 << wmq->wmq_intr_idx;
   7358 				}
   7359 				mask |= 1 << sc->sc_link_intr_idx;
   7360 			}
   7361 			CSR_WRITE(sc, WMREG_EIAC, mask);
   7362 			CSR_WRITE(sc, WMREG_EIAM, mask);
   7363 			CSR_WRITE(sc, WMREG_EIMS, mask);
   7364 
   7365 			/* For other interrupts */
   7366 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   7367 			break;
   7368 		}
   7369 	} else {
   7370 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   7371 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   7372 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   7373 	}
   7374 
   7375 	/* Set up the inter-packet gap. */
   7376 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7377 
   7378 	if (sc->sc_type >= WM_T_82543) {
   7379 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7380 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   7381 			wm_itrs_writereg(sc, wmq);
   7382 		}
   7383 		/*
   7384 		 * Link interrupts occur much less than TX
   7385 		 * interrupts and RX interrupts. So, we don't
   7386 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   7387 		 * FreeBSD's if_igb.
   7388 		 */
   7389 	}
   7390 
   7391 	/* Set the VLAN EtherType. */
   7392 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   7393 
   7394 	/*
   7395 	 * Set up the transmit control register; we start out with
   7396 	 * a collision distance suitable for FDX, but update it when
   7397 	 * we resolve the media type.
   7398 	 */
   7399 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   7400 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   7401 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7402 	if (sc->sc_type >= WM_T_82571)
   7403 		sc->sc_tctl |= TCTL_MULR;
   7404 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7405 
   7406 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7407 		/* Write TDT after TCTL.EN is set. See the document. */
   7408 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   7409 	}
   7410 
   7411 	if (sc->sc_type == WM_T_80003) {
   7412 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   7413 		reg &= ~TCTL_EXT_GCEX_MASK;
   7414 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   7415 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   7416 	}
   7417 
   7418 	/* Set the media. */
   7419 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   7420 		goto out;
   7421 
   7422 	/* Configure for OS presence */
   7423 	wm_init_manageability(sc);
   7424 
   7425 	/*
   7426 	 * Set up the receive control register; we actually program the
   7427 	 * register when we set the receive filter. Use multicast address
   7428 	 * offset type 0.
   7429 	 *
   7430 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7431 	 * don't enable that feature.
   7432 	 */
   7433 	sc->sc_mchash_type = 0;
   7434 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7435 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7436 
   7437 	/* 82574 use one buffer extended Rx descriptor. */
   7438 	if (sc->sc_type == WM_T_82574)
   7439 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7440 
   7441 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7442 		sc->sc_rctl |= RCTL_SECRC;
   7443 
   7444 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7445 	    && (ifp->if_mtu > ETHERMTU)) {
   7446 		sc->sc_rctl |= RCTL_LPE;
   7447 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7448 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7449 	}
   7450 
   7451 	if (MCLBYTES == 2048)
   7452 		sc->sc_rctl |= RCTL_2k;
   7453 	else {
   7454 		if (sc->sc_type >= WM_T_82543) {
   7455 			switch (MCLBYTES) {
   7456 			case 4096:
   7457 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7458 				break;
   7459 			case 8192:
   7460 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7461 				break;
   7462 			case 16384:
   7463 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7464 				break;
   7465 			default:
   7466 				panic("wm_init: MCLBYTES %d unsupported",
   7467 				    MCLBYTES);
   7468 				break;
   7469 			}
   7470 		} else
   7471 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7472 	}
   7473 
   7474 	/* Enable ECC */
   7475 	switch (sc->sc_type) {
   7476 	case WM_T_82571:
   7477 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7478 		reg |= PBA_ECC_CORR_EN;
   7479 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7480 		break;
   7481 	case WM_T_PCH_LPT:
   7482 	case WM_T_PCH_SPT:
   7483 	case WM_T_PCH_CNP:
   7484 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7485 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7486 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7487 
   7488 		sc->sc_ctrl |= CTRL_MEHE;
   7489 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7490 		break;
   7491 	default:
   7492 		break;
   7493 	}
   7494 
   7495 	/*
   7496 	 * Set the receive filter.
   7497 	 *
   7498 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7499 	 * the setting of RCTL.EN in wm_set_filter()
   7500 	 */
   7501 	wm_set_filter(sc);
   7502 
   7503 	/* On 575 and later set RDT only if RX enabled */
   7504 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7505 		int qidx;
   7506 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7507 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7508 			for (i = 0; i < WM_NRXDESC; i++) {
   7509 				mutex_enter(rxq->rxq_lock);
   7510 				wm_init_rxdesc(rxq, i);
   7511 				mutex_exit(rxq->rxq_lock);
   7512 
   7513 			}
   7514 		}
   7515 	}
   7516 
   7517 	wm_unset_stopping_flags(sc);
   7518 
   7519 	/* Start the one second link check clock. */
   7520 	callout_schedule(&sc->sc_tick_ch, hz);
   7521 
   7522 	/*
   7523 	 * ...all done! (IFNET_LOCKED asserted above.)
   7524 	 */
   7525 	ifp->if_flags |= IFF_RUNNING;
   7526 
   7527 out:
   7528 	/* Save last flags for the callback */
   7529 	sc->sc_if_flags = ifp->if_flags;
   7530 	sc->sc_ec_capenable = ec->ec_capenable;
   7531 	if (error)
   7532 		log(LOG_ERR, "%s: interface not running\n",
   7533 		    device_xname(sc->sc_dev));
   7534 	return error;
   7535 }
   7536 
   7537 /*
   7538  * wm_stop:		[ifnet interface function]
   7539  *
   7540  *	Stop transmission on the interface.
   7541  */
   7542 static void
   7543 wm_stop(struct ifnet *ifp, int disable)
   7544 {
   7545 	struct wm_softc *sc = ifp->if_softc;
   7546 
   7547 	ASSERT_SLEEPABLE();
   7548 	KASSERT(IFNET_LOCKED(ifp));
   7549 
   7550 	mutex_enter(sc->sc_core_lock);
   7551 	wm_stop_locked(ifp, disable ? true : false, true);
   7552 	mutex_exit(sc->sc_core_lock);
   7553 
   7554 	/*
   7555 	 * After wm_set_stopping_flags(), it is guaranteed that
   7556 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7557 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7558 	 * because it can sleep...
   7559 	 * so, call workqueue_wait() here.
   7560 	 */
   7561 	for (int i = 0; i < sc->sc_nqueues; i++)
   7562 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7563 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7564 }
   7565 
   7566 static void
   7567 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7568 {
   7569 	struct wm_softc *sc = ifp->if_softc;
   7570 	struct wm_txsoft *txs;
   7571 	int i, qidx;
   7572 
   7573 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7574 		device_xname(sc->sc_dev), __func__));
   7575 	KASSERT(IFNET_LOCKED(ifp));
   7576 	KASSERT(mutex_owned(sc->sc_core_lock));
   7577 
   7578 	wm_set_stopping_flags(sc);
   7579 
   7580 	if (sc->sc_flags & WM_F_HAS_MII) {
   7581 		/* Down the MII. */
   7582 		mii_down(&sc->sc_mii);
   7583 	} else {
   7584 #if 0
   7585 		/* Should we clear PHY's status properly? */
   7586 		wm_reset(sc);
   7587 #endif
   7588 	}
   7589 
   7590 	/* Stop the transmit and receive processes. */
   7591 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7592 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7593 	sc->sc_rctl &= ~RCTL_EN;
   7594 
   7595 	/*
   7596 	 * Clear the interrupt mask to ensure the device cannot assert its
   7597 	 * interrupt line.
   7598 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7599 	 * service any currently pending or shared interrupt.
   7600 	 */
   7601 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7602 	sc->sc_icr = 0;
   7603 	if (wm_is_using_msix(sc)) {
   7604 		if (sc->sc_type != WM_T_82574) {
   7605 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7606 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7607 		} else
   7608 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7609 	}
   7610 
   7611 	/*
   7612 	 * Stop callouts after interrupts are disabled; if we have
   7613 	 * to wait for them, we will be releasing the CORE_LOCK
   7614 	 * briefly, which will unblock interrupts on the current CPU.
   7615 	 */
   7616 
   7617 	/* Stop the one second clock. */
   7618 	if (wait)
   7619 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7620 	else
   7621 		callout_stop(&sc->sc_tick_ch);
   7622 
   7623 	/* Stop the 82547 Tx FIFO stall check timer. */
   7624 	if (sc->sc_type == WM_T_82547) {
   7625 		if (wait)
   7626 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7627 		else
   7628 			callout_stop(&sc->sc_txfifo_ch);
   7629 	}
   7630 
   7631 	/* Release any queued transmit buffers. */
   7632 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7633 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7634 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7635 		struct mbuf *m;
   7636 
   7637 		mutex_enter(txq->txq_lock);
   7638 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7639 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7640 			txs = &txq->txq_soft[i];
   7641 			if (txs->txs_mbuf != NULL) {
   7642 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7643 				m_freem(txs->txs_mbuf);
   7644 				txs->txs_mbuf = NULL;
   7645 			}
   7646 		}
   7647 		/* Drain txq_interq */
   7648 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7649 			m_freem(m);
   7650 		mutex_exit(txq->txq_lock);
   7651 	}
   7652 
   7653 	/* Mark the interface as down and cancel the watchdog timer. */
   7654 	ifp->if_flags &= ~IFF_RUNNING;
   7655 	sc->sc_if_flags = ifp->if_flags;
   7656 
   7657 	if (disable) {
   7658 		for (i = 0; i < sc->sc_nqueues; i++) {
   7659 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7660 			mutex_enter(rxq->rxq_lock);
   7661 			wm_rxdrain(rxq);
   7662 			mutex_exit(rxq->rxq_lock);
   7663 		}
   7664 	}
   7665 
   7666 #if 0 /* notyet */
   7667 	if (sc->sc_type >= WM_T_82544)
   7668 		CSR_WRITE(sc, WMREG_WUC, 0);
   7669 #endif
   7670 }
   7671 
   7672 static void
   7673 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7674 {
   7675 	struct mbuf *m;
   7676 	int i;
   7677 
   7678 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7679 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7680 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7681 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7682 		    m->m_data, m->m_len, m->m_flags);
   7683 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7684 	    i, i == 1 ? "" : "s");
   7685 }
   7686 
   7687 /*
   7688  * wm_82547_txfifo_stall:
   7689  *
   7690  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7691  *	reset the FIFO pointers, and restart packet transmission.
   7692  */
   7693 static void
   7694 wm_82547_txfifo_stall(void *arg)
   7695 {
   7696 	struct wm_softc *sc = arg;
   7697 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7698 
   7699 	mutex_enter(txq->txq_lock);
   7700 
   7701 	if (txq->txq_stopping)
   7702 		goto out;
   7703 
   7704 	if (txq->txq_fifo_stall) {
   7705 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7706 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7707 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7708 			/*
   7709 			 * Packets have drained.  Stop transmitter, reset
   7710 			 * FIFO pointers, restart transmitter, and kick
   7711 			 * the packet queue.
   7712 			 */
   7713 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7714 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7715 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7716 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7717 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7718 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7719 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7720 			CSR_WRITE_FLUSH(sc);
   7721 
   7722 			txq->txq_fifo_head = 0;
   7723 			txq->txq_fifo_stall = 0;
   7724 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7725 		} else {
   7726 			/*
   7727 			 * Still waiting for packets to drain; try again in
   7728 			 * another tick.
   7729 			 */
   7730 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7731 		}
   7732 	}
   7733 
   7734 out:
   7735 	mutex_exit(txq->txq_lock);
   7736 }
   7737 
   7738 /*
   7739  * wm_82547_txfifo_bugchk:
   7740  *
   7741  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7742  *	prevent enqueueing a packet that would wrap around the end
   7743  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7744  *
   7745  *	We do this by checking the amount of space before the end
   7746  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7747  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7748  *	the internal FIFO pointers to the beginning, and restart
   7749  *	transmission on the interface.
   7750  */
   7751 #define	WM_FIFO_HDR		0x10
   7752 #define	WM_82547_PAD_LEN	0x3e0
   7753 static int
   7754 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7755 {
   7756 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7757 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7758 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7759 
   7760 	/* Just return if already stalled. */
   7761 	if (txq->txq_fifo_stall)
   7762 		return 1;
   7763 
   7764 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7765 		/* Stall only occurs in half-duplex mode. */
   7766 		goto send_packet;
   7767 	}
   7768 
   7769 	if (len >= WM_82547_PAD_LEN + space) {
   7770 		txq->txq_fifo_stall = 1;
   7771 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7772 		return 1;
   7773 	}
   7774 
   7775 send_packet:
   7776 	txq->txq_fifo_head += len;
   7777 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7778 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7779 
   7780 	return 0;
   7781 }
   7782 
   7783 static int
   7784 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7785 {
   7786 	int error;
   7787 
   7788 	/*
   7789 	 * Allocate the control data structures, and create and load the
   7790 	 * DMA map for it.
   7791 	 *
   7792 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7793 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7794 	 * both sets within the same 4G segment.
   7795 	 */
   7796 	if (sc->sc_type < WM_T_82544)
   7797 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7798 	else
   7799 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7800 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7801 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7802 	else
   7803 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7804 
   7805 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7806 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7807 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7808 		aprint_error_dev(sc->sc_dev,
   7809 		    "unable to allocate TX control data, error = %d\n",
   7810 		    error);
   7811 		goto fail_0;
   7812 	}
   7813 
   7814 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7815 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7816 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7817 		aprint_error_dev(sc->sc_dev,
   7818 		    "unable to map TX control data, error = %d\n", error);
   7819 		goto fail_1;
   7820 	}
   7821 
   7822 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7823 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7824 		aprint_error_dev(sc->sc_dev,
   7825 		    "unable to create TX control data DMA map, error = %d\n",
   7826 		    error);
   7827 		goto fail_2;
   7828 	}
   7829 
   7830 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7831 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7832 		aprint_error_dev(sc->sc_dev,
   7833 		    "unable to load TX control data DMA map, error = %d\n",
   7834 		    error);
   7835 		goto fail_3;
   7836 	}
   7837 
   7838 	return 0;
   7839 
   7840 fail_3:
   7841 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7842 fail_2:
   7843 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7844 	    WM_TXDESCS_SIZE(txq));
   7845 fail_1:
   7846 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7847 fail_0:
   7848 	return error;
   7849 }
   7850 
   7851 static void
   7852 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7853 {
   7854 
   7855 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7856 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7857 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7858 	    WM_TXDESCS_SIZE(txq));
   7859 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7860 }
   7861 
   7862 static int
   7863 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7864 {
   7865 	int error;
   7866 	size_t rxq_descs_size;
   7867 
   7868 	/*
   7869 	 * Allocate the control data structures, and create and load the
   7870 	 * DMA map for it.
   7871 	 *
   7872 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7873 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7874 	 * both sets within the same 4G segment.
   7875 	 */
   7876 	rxq->rxq_ndesc = WM_NRXDESC;
   7877 	if (sc->sc_type == WM_T_82574)
   7878 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7879 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7880 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7881 	else
   7882 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7883 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7884 
   7885 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7886 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7887 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7888 		aprint_error_dev(sc->sc_dev,
   7889 		    "unable to allocate RX control data, error = %d\n",
   7890 		    error);
   7891 		goto fail_0;
   7892 	}
   7893 
   7894 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7895 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7896 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7897 		aprint_error_dev(sc->sc_dev,
   7898 		    "unable to map RX control data, error = %d\n", error);
   7899 		goto fail_1;
   7900 	}
   7901 
   7902 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7903 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7904 		aprint_error_dev(sc->sc_dev,
   7905 		    "unable to create RX control data DMA map, error = %d\n",
   7906 		    error);
   7907 		goto fail_2;
   7908 	}
   7909 
   7910 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7911 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7912 		aprint_error_dev(sc->sc_dev,
   7913 		    "unable to load RX control data DMA map, error = %d\n",
   7914 		    error);
   7915 		goto fail_3;
   7916 	}
   7917 
   7918 	return 0;
   7919 
   7920  fail_3:
   7921 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7922  fail_2:
   7923 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7924 	    rxq_descs_size);
   7925  fail_1:
   7926 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7927  fail_0:
   7928 	return error;
   7929 }
   7930 
   7931 static void
   7932 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7933 {
   7934 
   7935 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7936 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7937 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7938 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7939 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7940 }
   7941 
   7942 
   7943 static int
   7944 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7945 {
   7946 	int i, error;
   7947 
   7948 	/* Create the transmit buffer DMA maps. */
   7949 	WM_TXQUEUELEN(txq) =
   7950 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7951 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7952 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7953 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7954 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7955 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7956 			aprint_error_dev(sc->sc_dev,
   7957 			    "unable to create Tx DMA map %d, error = %d\n",
   7958 			    i, error);
   7959 			goto fail;
   7960 		}
   7961 	}
   7962 
   7963 	return 0;
   7964 
   7965 fail:
   7966 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7967 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7968 			bus_dmamap_destroy(sc->sc_dmat,
   7969 			    txq->txq_soft[i].txs_dmamap);
   7970 	}
   7971 	return error;
   7972 }
   7973 
   7974 static void
   7975 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7976 {
   7977 	int i;
   7978 
   7979 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7980 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7981 			bus_dmamap_destroy(sc->sc_dmat,
   7982 			    txq->txq_soft[i].txs_dmamap);
   7983 	}
   7984 }
   7985 
   7986 static int
   7987 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7988 {
   7989 	int i, error;
   7990 
   7991 	/* Create the receive buffer DMA maps. */
   7992 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7993 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7994 			    MCLBYTES, 0, 0,
   7995 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7996 			aprint_error_dev(sc->sc_dev,
   7997 			    "unable to create Rx DMA map %d error = %d\n",
   7998 			    i, error);
   7999 			goto fail;
   8000 		}
   8001 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   8002 	}
   8003 
   8004 	return 0;
   8005 
   8006  fail:
   8007 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8008 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   8009 			bus_dmamap_destroy(sc->sc_dmat,
   8010 			    rxq->rxq_soft[i].rxs_dmamap);
   8011 	}
   8012 	return error;
   8013 }
   8014 
   8015 static void
   8016 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8017 {
   8018 	int i;
   8019 
   8020 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8021 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   8022 			bus_dmamap_destroy(sc->sc_dmat,
   8023 			    rxq->rxq_soft[i].rxs_dmamap);
   8024 	}
   8025 }
   8026 
   8027 /*
   8028  * wm_alloc_quques:
   8029  *	Allocate {tx,rx}descs and {tx,rx} buffers
   8030  */
   8031 static int
   8032 wm_alloc_txrx_queues(struct wm_softc *sc)
   8033 {
   8034 	int i, error, tx_done, rx_done;
   8035 
   8036 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   8037 	    KM_SLEEP);
   8038 	if (sc->sc_queue == NULL) {
   8039 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   8040 		error = ENOMEM;
   8041 		goto fail_0;
   8042 	}
   8043 
   8044 	/* For transmission */
   8045 	error = 0;
   8046 	tx_done = 0;
   8047 	for (i = 0; i < sc->sc_nqueues; i++) {
   8048 #ifdef WM_EVENT_COUNTERS
   8049 		int j;
   8050 		const char *xname;
   8051 #endif
   8052 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8053 		txq->txq_sc = sc;
   8054 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8055 
   8056 		error = wm_alloc_tx_descs(sc, txq);
   8057 		if (error)
   8058 			break;
   8059 		error = wm_alloc_tx_buffer(sc, txq);
   8060 		if (error) {
   8061 			wm_free_tx_descs(sc, txq);
   8062 			break;
   8063 		}
   8064 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   8065 		if (txq->txq_interq == NULL) {
   8066 			wm_free_tx_descs(sc, txq);
   8067 			wm_free_tx_buffer(sc, txq);
   8068 			error = ENOMEM;
   8069 			break;
   8070 		}
   8071 
   8072 #ifdef WM_EVENT_COUNTERS
   8073 		xname = device_xname(sc->sc_dev);
   8074 
   8075 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   8076 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   8077 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   8078 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   8079 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   8080 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   8081 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   8082 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   8083 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   8084 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   8085 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   8086 
   8087 		for (j = 0; j < WM_NTXSEGS; j++) {
   8088 			snprintf(txq->txq_txseg_evcnt_names[j],
   8089 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   8090 			    "txq%02dtxseg%d", i, j);
   8091 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   8092 			    EVCNT_TYPE_MISC,
   8093 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   8094 		}
   8095 
   8096 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   8097 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   8098 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   8099 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   8100 		/* Only for 82544 (and earlier?) */
   8101 		if (sc->sc_type <= WM_T_82544)
   8102 			WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   8103 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   8104 #endif /* WM_EVENT_COUNTERS */
   8105 
   8106 		tx_done++;
   8107 	}
   8108 	if (error)
   8109 		goto fail_1;
   8110 
   8111 	/* For receive */
   8112 	error = 0;
   8113 	rx_done = 0;
   8114 	for (i = 0; i < sc->sc_nqueues; i++) {
   8115 #ifdef WM_EVENT_COUNTERS
   8116 		const char *xname;
   8117 #endif
   8118 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8119 		rxq->rxq_sc = sc;
   8120 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8121 
   8122 		error = wm_alloc_rx_descs(sc, rxq);
   8123 		if (error)
   8124 			break;
   8125 
   8126 		error = wm_alloc_rx_buffer(sc, rxq);
   8127 		if (error) {
   8128 			wm_free_rx_descs(sc, rxq);
   8129 			break;
   8130 		}
   8131 
   8132 #ifdef WM_EVENT_COUNTERS
   8133 		xname = device_xname(sc->sc_dev);
   8134 
   8135 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   8136 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   8137 
   8138 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   8139 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   8140 #endif /* WM_EVENT_COUNTERS */
   8141 
   8142 		rx_done++;
   8143 	}
   8144 	if (error)
   8145 		goto fail_2;
   8146 
   8147 	return 0;
   8148 
   8149 fail_2:
   8150 	for (i = 0; i < rx_done; i++) {
   8151 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8152 		wm_free_rx_buffer(sc, rxq);
   8153 		wm_free_rx_descs(sc, rxq);
   8154 		if (rxq->rxq_lock)
   8155 			mutex_obj_free(rxq->rxq_lock);
   8156 	}
   8157 fail_1:
   8158 	for (i = 0; i < tx_done; i++) {
   8159 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8160 		pcq_destroy(txq->txq_interq);
   8161 		wm_free_tx_buffer(sc, txq);
   8162 		wm_free_tx_descs(sc, txq);
   8163 		if (txq->txq_lock)
   8164 			mutex_obj_free(txq->txq_lock);
   8165 	}
   8166 
   8167 	kmem_free(sc->sc_queue,
   8168 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   8169 fail_0:
   8170 	return error;
   8171 }
   8172 
   8173 /*
   8174  * wm_free_quques:
   8175  *	Free {tx,rx}descs and {tx,rx} buffers
   8176  */
   8177 static void
   8178 wm_free_txrx_queues(struct wm_softc *sc)
   8179 {
   8180 	int i;
   8181 
   8182 	for (i = 0; i < sc->sc_nqueues; i++) {
   8183 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8184 
   8185 #ifdef WM_EVENT_COUNTERS
   8186 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   8187 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   8188 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   8189 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   8190 #endif /* WM_EVENT_COUNTERS */
   8191 
   8192 		wm_free_rx_buffer(sc, rxq);
   8193 		wm_free_rx_descs(sc, rxq);
   8194 		if (rxq->rxq_lock)
   8195 			mutex_obj_free(rxq->rxq_lock);
   8196 	}
   8197 
   8198 	for (i = 0; i < sc->sc_nqueues; i++) {
   8199 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8200 		struct mbuf *m;
   8201 #ifdef WM_EVENT_COUNTERS
   8202 		int j;
   8203 
   8204 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   8205 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   8206 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   8207 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   8208 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   8209 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   8210 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   8211 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   8212 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   8213 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   8214 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   8215 
   8216 		for (j = 0; j < WM_NTXSEGS; j++)
   8217 			evcnt_detach(&txq->txq_ev_txseg[j]);
   8218 
   8219 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   8220 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   8221 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   8222 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   8223 		if (sc->sc_type <= WM_T_82544)
   8224 			WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   8225 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   8226 #endif /* WM_EVENT_COUNTERS */
   8227 
   8228 		/* Drain txq_interq */
   8229 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   8230 			m_freem(m);
   8231 		pcq_destroy(txq->txq_interq);
   8232 
   8233 		wm_free_tx_buffer(sc, txq);
   8234 		wm_free_tx_descs(sc, txq);
   8235 		if (txq->txq_lock)
   8236 			mutex_obj_free(txq->txq_lock);
   8237 	}
   8238 
   8239 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   8240 }
   8241 
   8242 static void
   8243 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8244 {
   8245 
   8246 	KASSERT(mutex_owned(txq->txq_lock));
   8247 
   8248 	/* Initialize the transmit descriptor ring. */
   8249 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   8250 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   8251 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8252 	txq->txq_free = WM_NTXDESC(txq);
   8253 	txq->txq_next = 0;
   8254 }
   8255 
   8256 static void
   8257 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8258     struct wm_txqueue *txq)
   8259 {
   8260 
   8261 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8262 		device_xname(sc->sc_dev), __func__));
   8263 	KASSERT(mutex_owned(txq->txq_lock));
   8264 
   8265 	if (sc->sc_type < WM_T_82543) {
   8266 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   8267 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   8268 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   8269 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   8270 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   8271 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   8272 	} else {
   8273 		int qid = wmq->wmq_id;
   8274 
   8275 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   8276 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   8277 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   8278 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   8279 
   8280 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8281 			/*
   8282 			 * Don't write TDT before TCTL.EN is set.
   8283 			 * See the document.
   8284 			 */
   8285 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   8286 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   8287 			    | TXDCTL_WTHRESH(0));
   8288 		else {
   8289 			/* XXX should update with AIM? */
   8290 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   8291 			if (sc->sc_type >= WM_T_82540) {
   8292 				/* Should be the same */
   8293 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   8294 			}
   8295 
   8296 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   8297 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   8298 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   8299 		}
   8300 	}
   8301 }
   8302 
   8303 static void
   8304 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8305 {
   8306 	int i;
   8307 
   8308 	KASSERT(mutex_owned(txq->txq_lock));
   8309 
   8310 	/* Initialize the transmit job descriptors. */
   8311 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   8312 		txq->txq_soft[i].txs_mbuf = NULL;
   8313 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   8314 	txq->txq_snext = 0;
   8315 	txq->txq_sdirty = 0;
   8316 }
   8317 
   8318 static void
   8319 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8320     struct wm_txqueue *txq)
   8321 {
   8322 
   8323 	KASSERT(mutex_owned(txq->txq_lock));
   8324 
   8325 	/*
   8326 	 * Set up some register offsets that are different between
   8327 	 * the i82542 and the i82543 and later chips.
   8328 	 */
   8329 	if (sc->sc_type < WM_T_82543)
   8330 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   8331 	else
   8332 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   8333 
   8334 	wm_init_tx_descs(sc, txq);
   8335 	wm_init_tx_regs(sc, wmq, txq);
   8336 	wm_init_tx_buffer(sc, txq);
   8337 
   8338 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   8339 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   8340 
   8341 	txq->txq_sending = false;
   8342 }
   8343 
   8344 static void
   8345 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8346     struct wm_rxqueue *rxq)
   8347 {
   8348 
   8349 	KASSERT(mutex_owned(rxq->rxq_lock));
   8350 
   8351 	/*
   8352 	 * Initialize the receive descriptor and receive job
   8353 	 * descriptor rings.
   8354 	 */
   8355 	if (sc->sc_type < WM_T_82543) {
   8356 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   8357 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   8358 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   8359 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8360 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   8361 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   8362 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   8363 
   8364 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   8365 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   8366 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   8367 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   8368 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   8369 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   8370 	} else {
   8371 		int qid = wmq->wmq_id;
   8372 
   8373 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   8374 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   8375 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   8376 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8377 
   8378 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8379 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   8380 				panic("%s: MCLBYTES %d unsupported for 82575 "
   8381 				    "or higher\n", __func__, MCLBYTES);
   8382 
   8383 			/*
   8384 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   8385 			 * only.
   8386 			 */
   8387 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   8388 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   8389 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   8390 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   8391 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   8392 			    | RXDCTL_WTHRESH(1));
   8393 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8394 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8395 		} else {
   8396 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8397 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8398 			/* XXX should update with AIM? */
   8399 			CSR_WRITE(sc, WMREG_RDTR,
   8400 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   8401 			/* MUST be same */
   8402 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   8403 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   8404 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   8405 		}
   8406 	}
   8407 }
   8408 
   8409 static int
   8410 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8411 {
   8412 	struct wm_rxsoft *rxs;
   8413 	int error, i;
   8414 
   8415 	KASSERT(mutex_owned(rxq->rxq_lock));
   8416 
   8417 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8418 		rxs = &rxq->rxq_soft[i];
   8419 		if (rxs->rxs_mbuf == NULL) {
   8420 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   8421 				log(LOG_ERR, "%s: unable to allocate or map "
   8422 				    "rx buffer %d, error = %d\n",
   8423 				    device_xname(sc->sc_dev), i, error);
   8424 				/*
   8425 				 * XXX Should attempt to run with fewer receive
   8426 				 * XXX buffers instead of just failing.
   8427 				 */
   8428 				wm_rxdrain(rxq);
   8429 				return ENOMEM;
   8430 			}
   8431 		} else {
   8432 			/*
   8433 			 * For 82575 and 82576, the RX descriptors must be
   8434 			 * initialized after the setting of RCTL.EN in
   8435 			 * wm_set_filter()
   8436 			 */
   8437 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8438 				wm_init_rxdesc(rxq, i);
   8439 		}
   8440 	}
   8441 	rxq->rxq_ptr = 0;
   8442 	rxq->rxq_discard = 0;
   8443 	WM_RXCHAIN_RESET(rxq);
   8444 
   8445 	return 0;
   8446 }
   8447 
   8448 static int
   8449 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8450     struct wm_rxqueue *rxq)
   8451 {
   8452 
   8453 	KASSERT(mutex_owned(rxq->rxq_lock));
   8454 
   8455 	/*
   8456 	 * Set up some register offsets that are different between
   8457 	 * the i82542 and the i82543 and later chips.
   8458 	 */
   8459 	if (sc->sc_type < WM_T_82543)
   8460 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8461 	else
   8462 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8463 
   8464 	wm_init_rx_regs(sc, wmq, rxq);
   8465 	return wm_init_rx_buffer(sc, rxq);
   8466 }
   8467 
   8468 /*
   8469  * wm_init_quques:
   8470  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8471  */
   8472 static int
   8473 wm_init_txrx_queues(struct wm_softc *sc)
   8474 {
   8475 	int i, error = 0;
   8476 
   8477 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8478 		device_xname(sc->sc_dev), __func__));
   8479 
   8480 	for (i = 0; i < sc->sc_nqueues; i++) {
   8481 		struct wm_queue *wmq = &sc->sc_queue[i];
   8482 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8483 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8484 
   8485 		/*
   8486 		 * TODO
   8487 		 * Currently, use constant variable instead of AIM.
   8488 		 * Furthermore, the interrupt interval of multiqueue which use
   8489 		 * polling mode is less than default value.
   8490 		 * More tuning and AIM are required.
   8491 		 */
   8492 		if (wm_is_using_multiqueue(sc))
   8493 			wmq->wmq_itr = 50;
   8494 		else
   8495 			wmq->wmq_itr = sc->sc_itr_init;
   8496 		wmq->wmq_set_itr = true;
   8497 
   8498 		mutex_enter(txq->txq_lock);
   8499 		wm_init_tx_queue(sc, wmq, txq);
   8500 		mutex_exit(txq->txq_lock);
   8501 
   8502 		mutex_enter(rxq->rxq_lock);
   8503 		error = wm_init_rx_queue(sc, wmq, rxq);
   8504 		mutex_exit(rxq->rxq_lock);
   8505 		if (error)
   8506 			break;
   8507 	}
   8508 
   8509 	return error;
   8510 }
   8511 
   8512 /*
   8513  * wm_tx_offload:
   8514  *
   8515  *	Set up TCP/IP checksumming parameters for the
   8516  *	specified packet.
   8517  */
   8518 static void
   8519 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8520     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8521 {
   8522 	struct mbuf *m0 = txs->txs_mbuf;
   8523 	struct livengood_tcpip_ctxdesc *t;
   8524 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8525 	uint32_t ipcse;
   8526 	struct ether_header *eh;
   8527 	int offset, iphl;
   8528 	uint8_t fields;
   8529 
   8530 	/*
   8531 	 * XXX It would be nice if the mbuf pkthdr had offset
   8532 	 * fields for the protocol headers.
   8533 	 */
   8534 
   8535 	eh = mtod(m0, struct ether_header *);
   8536 	switch (htons(eh->ether_type)) {
   8537 	case ETHERTYPE_IP:
   8538 	case ETHERTYPE_IPV6:
   8539 		offset = ETHER_HDR_LEN;
   8540 		break;
   8541 
   8542 	case ETHERTYPE_VLAN:
   8543 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8544 		break;
   8545 
   8546 	default:
   8547 		/* Don't support this protocol or encapsulation. */
   8548 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8549 		txq->txq_last_hw_ipcs = 0;
   8550 		txq->txq_last_hw_tucs = 0;
   8551 		*fieldsp = 0;
   8552 		*cmdp = 0;
   8553 		return;
   8554 	}
   8555 
   8556 	if ((m0->m_pkthdr.csum_flags &
   8557 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8558 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8559 	} else
   8560 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8561 
   8562 	ipcse = offset + iphl - 1;
   8563 
   8564 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8565 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8566 	seg = 0;
   8567 	fields = 0;
   8568 
   8569 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8570 		int hlen = offset + iphl;
   8571 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8572 
   8573 		if (__predict_false(m0->m_len <
   8574 				    (hlen + sizeof(struct tcphdr)))) {
   8575 			/*
   8576 			 * TCP/IP headers are not in the first mbuf; we need
   8577 			 * to do this the slow and painful way. Let's just
   8578 			 * hope this doesn't happen very often.
   8579 			 */
   8580 			struct tcphdr th;
   8581 
   8582 			WM_Q_EVCNT_INCR(txq, tsopain);
   8583 
   8584 			m_copydata(m0, hlen, sizeof(th), &th);
   8585 			if (v4) {
   8586 				struct ip ip;
   8587 
   8588 				m_copydata(m0, offset, sizeof(ip), &ip);
   8589 				ip.ip_len = 0;
   8590 				m_copyback(m0,
   8591 				    offset + offsetof(struct ip, ip_len),
   8592 				    sizeof(ip.ip_len), &ip.ip_len);
   8593 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8594 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8595 			} else {
   8596 				struct ip6_hdr ip6;
   8597 
   8598 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8599 				ip6.ip6_plen = 0;
   8600 				m_copyback(m0,
   8601 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8602 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8603 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8604 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8605 			}
   8606 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8607 			    sizeof(th.th_sum), &th.th_sum);
   8608 
   8609 			hlen += th.th_off << 2;
   8610 		} else {
   8611 			/*
   8612 			 * TCP/IP headers are in the first mbuf; we can do
   8613 			 * this the easy way.
   8614 			 */
   8615 			struct tcphdr *th;
   8616 
   8617 			if (v4) {
   8618 				struct ip *ip =
   8619 				    (void *)(mtod(m0, char *) + offset);
   8620 				th = (void *)(mtod(m0, char *) + hlen);
   8621 
   8622 				ip->ip_len = 0;
   8623 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8624 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8625 			} else {
   8626 				struct ip6_hdr *ip6 =
   8627 				    (void *)(mtod(m0, char *) + offset);
   8628 				th = (void *)(mtod(m0, char *) + hlen);
   8629 
   8630 				ip6->ip6_plen = 0;
   8631 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8632 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8633 			}
   8634 			hlen += th->th_off << 2;
   8635 		}
   8636 
   8637 		if (v4) {
   8638 			WM_Q_EVCNT_INCR(txq, tso);
   8639 			cmdlen |= WTX_TCPIP_CMD_IP;
   8640 		} else {
   8641 			WM_Q_EVCNT_INCR(txq, tso6);
   8642 			ipcse = 0;
   8643 		}
   8644 		cmd |= WTX_TCPIP_CMD_TSE;
   8645 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8646 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8647 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8648 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8649 	}
   8650 
   8651 	/*
   8652 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8653 	 * offload feature, if we load the context descriptor, we
   8654 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8655 	 */
   8656 
   8657 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8658 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8659 	    WTX_TCPIP_IPCSE(ipcse);
   8660 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8661 		WM_Q_EVCNT_INCR(txq, ipsum);
   8662 		fields |= WTX_IXSM;
   8663 	}
   8664 
   8665 	offset += iphl;
   8666 
   8667 	if (m0->m_pkthdr.csum_flags &
   8668 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8669 		WM_Q_EVCNT_INCR(txq, tusum);
   8670 		fields |= WTX_TXSM;
   8671 		tucs = WTX_TCPIP_TUCSS(offset) |
   8672 		    WTX_TCPIP_TUCSO(offset +
   8673 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8674 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8675 	} else if ((m0->m_pkthdr.csum_flags &
   8676 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8677 		WM_Q_EVCNT_INCR(txq, tusum6);
   8678 		fields |= WTX_TXSM;
   8679 		tucs = WTX_TCPIP_TUCSS(offset) |
   8680 		    WTX_TCPIP_TUCSO(offset +
   8681 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8682 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8683 	} else {
   8684 		/* Just initialize it to a valid TCP context. */
   8685 		tucs = WTX_TCPIP_TUCSS(offset) |
   8686 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8687 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8688 	}
   8689 
   8690 	*cmdp = cmd;
   8691 	*fieldsp = fields;
   8692 
   8693 	/*
   8694 	 * We don't have to write context descriptor for every packet
   8695 	 * except for 82574. For 82574, we must write context descriptor
   8696 	 * for every packet when we use two descriptor queues.
   8697 	 *
   8698 	 * The 82574L can only remember the *last* context used
   8699 	 * regardless of queue that it was use for.  We cannot reuse
   8700 	 * contexts on this hardware platform and must generate a new
   8701 	 * context every time.  82574L hardware spec, section 7.2.6,
   8702 	 * second note.
   8703 	 */
   8704 	if (sc->sc_nqueues < 2) {
   8705 		/*
   8706 		 * Setting up new checksum offload context for every
   8707 		 * frames takes a lot of processing time for hardware.
   8708 		 * This also reduces performance a lot for small sized
   8709 		 * frames so avoid it if driver can use previously
   8710 		 * configured checksum offload context.
   8711 		 * For TSO, in theory we can use the same TSO context only if
   8712 		 * frame is the same type(IP/TCP) and the same MSS. However
   8713 		 * checking whether a frame has the same IP/TCP structure is a
   8714 		 * hard thing so just ignore that and always restablish a
   8715 		 * new TSO context.
   8716 		 */
   8717 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8718 		    == 0) {
   8719 			if (txq->txq_last_hw_cmd == cmd &&
   8720 			    txq->txq_last_hw_fields == fields &&
   8721 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8722 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8723 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8724 				return;
   8725 			}
   8726 		}
   8727 
   8728 		txq->txq_last_hw_cmd = cmd;
   8729 		txq->txq_last_hw_fields = fields;
   8730 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8731 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8732 	}
   8733 
   8734 	/* Fill in the context descriptor. */
   8735 	t = (struct livengood_tcpip_ctxdesc *)
   8736 	    &txq->txq_descs[txq->txq_next];
   8737 	t->tcpip_ipcs = htole32(ipcs);
   8738 	t->tcpip_tucs = htole32(tucs);
   8739 	t->tcpip_cmdlen = htole32(cmdlen);
   8740 	t->tcpip_seg = htole32(seg);
   8741 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8742 
   8743 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8744 	txs->txs_ndesc++;
   8745 }
   8746 
   8747 static inline int
   8748 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8749 {
   8750 	struct wm_softc *sc = ifp->if_softc;
   8751 	u_int cpuid = cpu_index(curcpu());
   8752 
   8753 	/*
   8754 	 * Currently, simple distribute strategy.
   8755 	 * TODO:
   8756 	 * distribute by flowid(RSS has value).
   8757 	 */
   8758 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8759 }
   8760 
   8761 static inline bool
   8762 wm_linkdown_discard(struct wm_txqueue *txq)
   8763 {
   8764 
   8765 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8766 		return true;
   8767 
   8768 	return false;
   8769 }
   8770 
   8771 /*
   8772  * wm_start:		[ifnet interface function]
   8773  *
   8774  *	Start packet transmission on the interface.
   8775  */
   8776 static void
   8777 wm_start(struct ifnet *ifp)
   8778 {
   8779 	struct wm_softc *sc = ifp->if_softc;
   8780 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8781 
   8782 	KASSERT(if_is_mpsafe(ifp));
   8783 	/*
   8784 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8785 	 */
   8786 
   8787 	mutex_enter(txq->txq_lock);
   8788 	if (!txq->txq_stopping)
   8789 		wm_start_locked(ifp);
   8790 	mutex_exit(txq->txq_lock);
   8791 }
   8792 
   8793 static void
   8794 wm_start_locked(struct ifnet *ifp)
   8795 {
   8796 	struct wm_softc *sc = ifp->if_softc;
   8797 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8798 
   8799 	wm_send_common_locked(ifp, txq, false);
   8800 }
   8801 
   8802 static int
   8803 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8804 {
   8805 	int qid;
   8806 	struct wm_softc *sc = ifp->if_softc;
   8807 	struct wm_txqueue *txq;
   8808 
   8809 	qid = wm_select_txqueue(ifp, m);
   8810 	txq = &sc->sc_queue[qid].wmq_txq;
   8811 
   8812 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8813 		m_freem(m);
   8814 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8815 		return ENOBUFS;
   8816 	}
   8817 
   8818 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8819 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8820 	if (m->m_flags & M_MCAST)
   8821 		if_statinc_ref(nsr, if_omcasts);
   8822 	IF_STAT_PUTREF(ifp);
   8823 
   8824 	if (mutex_tryenter(txq->txq_lock)) {
   8825 		if (!txq->txq_stopping)
   8826 			wm_transmit_locked(ifp, txq);
   8827 		mutex_exit(txq->txq_lock);
   8828 	}
   8829 
   8830 	return 0;
   8831 }
   8832 
   8833 static void
   8834 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8835 {
   8836 
   8837 	wm_send_common_locked(ifp, txq, true);
   8838 }
   8839 
   8840 static void
   8841 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8842     bool is_transmit)
   8843 {
   8844 	struct wm_softc *sc = ifp->if_softc;
   8845 	struct mbuf *m0;
   8846 	struct wm_txsoft *txs;
   8847 	bus_dmamap_t dmamap;
   8848 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8849 	bus_addr_t curaddr;
   8850 	bus_size_t seglen, curlen;
   8851 	uint32_t cksumcmd;
   8852 	uint8_t cksumfields;
   8853 	bool remap = true;
   8854 
   8855 	KASSERT(mutex_owned(txq->txq_lock));
   8856 	KASSERT(!txq->txq_stopping);
   8857 
   8858 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8859 		return;
   8860 
   8861 	if (__predict_false(wm_linkdown_discard(txq))) {
   8862 		do {
   8863 			if (is_transmit)
   8864 				m0 = pcq_get(txq->txq_interq);
   8865 			else
   8866 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8867 			/*
   8868 			 * increment successed packet counter as in the case
   8869 			 * which the packet is discarded by link down PHY.
   8870 			 */
   8871 			if (m0 != NULL) {
   8872 				if_statinc(ifp, if_opackets);
   8873 				m_freem(m0);
   8874 			}
   8875 		} while (m0 != NULL);
   8876 		return;
   8877 	}
   8878 
   8879 	/* Remember the previous number of free descriptors. */
   8880 	ofree = txq->txq_free;
   8881 
   8882 	/*
   8883 	 * Loop through the send queue, setting up transmit descriptors
   8884 	 * until we drain the queue, or use up all available transmit
   8885 	 * descriptors.
   8886 	 */
   8887 	for (;;) {
   8888 		m0 = NULL;
   8889 
   8890 		/* Get a work queue entry. */
   8891 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8892 			wm_txeof(txq, UINT_MAX);
   8893 			if (txq->txq_sfree == 0) {
   8894 				DPRINTF(sc, WM_DEBUG_TX,
   8895 				    ("%s: TX: no free job descriptors\n",
   8896 					device_xname(sc->sc_dev)));
   8897 				WM_Q_EVCNT_INCR(txq, txsstall);
   8898 				break;
   8899 			}
   8900 		}
   8901 
   8902 		/* Grab a packet off the queue. */
   8903 		if (is_transmit)
   8904 			m0 = pcq_get(txq->txq_interq);
   8905 		else
   8906 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8907 		if (m0 == NULL)
   8908 			break;
   8909 
   8910 		DPRINTF(sc, WM_DEBUG_TX,
   8911 		    ("%s: TX: have packet to transmit: %p\n",
   8912 			device_xname(sc->sc_dev), m0));
   8913 
   8914 		txs = &txq->txq_soft[txq->txq_snext];
   8915 		dmamap = txs->txs_dmamap;
   8916 
   8917 		use_tso = (m0->m_pkthdr.csum_flags &
   8918 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8919 
   8920 		/*
   8921 		 * So says the Linux driver:
   8922 		 * The controller does a simple calculation to make sure
   8923 		 * there is enough room in the FIFO before initiating the
   8924 		 * DMA for each buffer. The calc is:
   8925 		 *	4 = ceil(buffer len / MSS)
   8926 		 * To make sure we don't overrun the FIFO, adjust the max
   8927 		 * buffer len if the MSS drops.
   8928 		 */
   8929 		dmamap->dm_maxsegsz =
   8930 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8931 		    ? m0->m_pkthdr.segsz << 2
   8932 		    : WTX_MAX_LEN;
   8933 
   8934 		/*
   8935 		 * Load the DMA map.  If this fails, the packet either
   8936 		 * didn't fit in the allotted number of segments, or we
   8937 		 * were short on resources.  For the too-many-segments
   8938 		 * case, we simply report an error and drop the packet,
   8939 		 * since we can't sanely copy a jumbo packet to a single
   8940 		 * buffer.
   8941 		 */
   8942 retry:
   8943 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8944 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8945 		if (__predict_false(error)) {
   8946 			if (error == EFBIG) {
   8947 				if (remap == true) {
   8948 					struct mbuf *m;
   8949 
   8950 					remap = false;
   8951 					m = m_defrag(m0, M_NOWAIT);
   8952 					if (m != NULL) {
   8953 						WM_Q_EVCNT_INCR(txq, defrag);
   8954 						m0 = m;
   8955 						goto retry;
   8956 					}
   8957 				}
   8958 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8959 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8960 				    "DMA segments, dropping...\n",
   8961 				    device_xname(sc->sc_dev));
   8962 				wm_dump_mbuf_chain(sc, m0);
   8963 				m_freem(m0);
   8964 				continue;
   8965 			}
   8966 			/* Short on resources, just stop for now. */
   8967 			DPRINTF(sc, WM_DEBUG_TX,
   8968 			    ("%s: TX: dmamap load failed: %d\n",
   8969 				device_xname(sc->sc_dev), error));
   8970 			break;
   8971 		}
   8972 
   8973 		segs_needed = dmamap->dm_nsegs;
   8974 		if (use_tso) {
   8975 			/* For sentinel descriptor; see below. */
   8976 			segs_needed++;
   8977 		}
   8978 
   8979 		/*
   8980 		 * Ensure we have enough descriptors free to describe
   8981 		 * the packet. Note, we always reserve one descriptor
   8982 		 * at the end of the ring due to the semantics of the
   8983 		 * TDT register, plus one more in the event we need
   8984 		 * to load offload context.
   8985 		 */
   8986 		if (segs_needed > txq->txq_free - 2) {
   8987 			/*
   8988 			 * Not enough free descriptors to transmit this
   8989 			 * packet.  We haven't committed anything yet,
   8990 			 * so just unload the DMA map, put the packet
   8991 			 * pack on the queue, and punt. Notify the upper
   8992 			 * layer that there are no more slots left.
   8993 			 */
   8994 			DPRINTF(sc, WM_DEBUG_TX,
   8995 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8996 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8997 				segs_needed, txq->txq_free - 1));
   8998 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8999 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9000 			WM_Q_EVCNT_INCR(txq, txdstall);
   9001 			break;
   9002 		}
   9003 
   9004 		/*
   9005 		 * Check for 82547 Tx FIFO bug. We need to do this
   9006 		 * once we know we can transmit the packet, since we
   9007 		 * do some internal FIFO space accounting here.
   9008 		 */
   9009 		if (sc->sc_type == WM_T_82547 &&
   9010 		    wm_82547_txfifo_bugchk(sc, m0)) {
   9011 			DPRINTF(sc, WM_DEBUG_TX,
   9012 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   9013 				device_xname(sc->sc_dev)));
   9014 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9015 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9016 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   9017 			break;
   9018 		}
   9019 
   9020 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9021 
   9022 		DPRINTF(sc, WM_DEBUG_TX,
   9023 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9024 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9025 
   9026 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9027 
   9028 		/*
   9029 		 * Store a pointer to the packet so that we can free it
   9030 		 * later.
   9031 		 *
   9032 		 * Initially, we consider the number of descriptors the
   9033 		 * packet uses the number of DMA segments.  This may be
   9034 		 * incremented by 1 if we do checksum offload (a descriptor
   9035 		 * is used to set the checksum context).
   9036 		 */
   9037 		txs->txs_mbuf = m0;
   9038 		txs->txs_firstdesc = txq->txq_next;
   9039 		txs->txs_ndesc = segs_needed;
   9040 
   9041 		/* Set up offload parameters for this packet. */
   9042 		if (m0->m_pkthdr.csum_flags &
   9043 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9044 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9045 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9046 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   9047 		} else {
   9048 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   9049 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   9050 			cksumcmd = 0;
   9051 			cksumfields = 0;
   9052 		}
   9053 
   9054 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   9055 
   9056 		/* Sync the DMA map. */
   9057 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9058 		    BUS_DMASYNC_PREWRITE);
   9059 
   9060 		/* Initialize the transmit descriptor. */
   9061 		for (nexttx = txq->txq_next, seg = 0;
   9062 		     seg < dmamap->dm_nsegs; seg++) {
   9063 			for (seglen = dmamap->dm_segs[seg].ds_len,
   9064 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   9065 			     seglen != 0;
   9066 			     curaddr += curlen, seglen -= curlen,
   9067 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   9068 				curlen = seglen;
   9069 
   9070 				/*
   9071 				 * So says the Linux driver:
   9072 				 * Work around for premature descriptor
   9073 				 * write-backs in TSO mode.  Append a
   9074 				 * 4-byte sentinel descriptor.
   9075 				 */
   9076 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   9077 				    curlen > 8)
   9078 					curlen -= 4;
   9079 
   9080 				wm_set_dma_addr(
   9081 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   9082 				txq->txq_descs[nexttx].wtx_cmdlen
   9083 				    = htole32(cksumcmd | curlen);
   9084 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   9085 				    = 0;
   9086 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   9087 				    = cksumfields;
   9088 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9089 				lasttx = nexttx;
   9090 
   9091 				DPRINTF(sc, WM_DEBUG_TX,
   9092 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   9093 					"len %#04zx\n",
   9094 					device_xname(sc->sc_dev), nexttx,
   9095 					(uint64_t)curaddr, curlen));
   9096 			}
   9097 		}
   9098 
   9099 		KASSERT(lasttx != -1);
   9100 
   9101 		/*
   9102 		 * Set up the command byte on the last descriptor of
   9103 		 * the packet. If we're in the interrupt delay window,
   9104 		 * delay the interrupt.
   9105 		 */
   9106 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9107 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9108 
   9109 		/*
   9110 		 * If VLANs are enabled and the packet has a VLAN tag, set
   9111 		 * up the descriptor to encapsulate the packet for us.
   9112 		 *
   9113 		 * This is only valid on the last descriptor of the packet.
   9114 		 */
   9115 		if (vlan_has_tag(m0)) {
   9116 			txq->txq_descs[lasttx].wtx_cmdlen |=
   9117 			    htole32(WTX_CMD_VLE);
   9118 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   9119 			    = htole16(vlan_get_tag(m0));
   9120 		}
   9121 
   9122 		txs->txs_lastdesc = lasttx;
   9123 
   9124 		DPRINTF(sc, WM_DEBUG_TX,
   9125 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9126 			device_xname(sc->sc_dev),
   9127 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9128 
   9129 		/* Sync the descriptors we're using. */
   9130 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9131 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9132 
   9133 		/* Give the packet to the chip. */
   9134 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9135 
   9136 		DPRINTF(sc, WM_DEBUG_TX,
   9137 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9138 
   9139 		DPRINTF(sc, WM_DEBUG_TX,
   9140 		    ("%s: TX: finished transmitting packet, job %d\n",
   9141 			device_xname(sc->sc_dev), txq->txq_snext));
   9142 
   9143 		/* Advance the tx pointer. */
   9144 		txq->txq_free -= txs->txs_ndesc;
   9145 		txq->txq_next = nexttx;
   9146 
   9147 		txq->txq_sfree--;
   9148 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9149 
   9150 		/* Pass the packet to any BPF listeners. */
   9151 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9152 	}
   9153 
   9154 	if (m0 != NULL) {
   9155 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9156 		WM_Q_EVCNT_INCR(txq, descdrop);
   9157 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9158 			__func__));
   9159 		m_freem(m0);
   9160 	}
   9161 
   9162 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9163 		/* No more slots; notify upper layer. */
   9164 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9165 	}
   9166 
   9167 	if (txq->txq_free != ofree) {
   9168 		/* Set a watchdog timer in case the chip flakes out. */
   9169 		txq->txq_lastsent = time_uptime;
   9170 		txq->txq_sending = true;
   9171 	}
   9172 }
   9173 
   9174 /*
   9175  * wm_nq_tx_offload:
   9176  *
   9177  *	Set up TCP/IP checksumming parameters for the
   9178  *	specified packet, for NEWQUEUE devices
   9179  */
   9180 static void
   9181 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   9182     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   9183 {
   9184 	struct mbuf *m0 = txs->txs_mbuf;
   9185 	uint32_t vl_len, mssidx, cmdc;
   9186 	struct ether_header *eh;
   9187 	int offset, iphl;
   9188 
   9189 	/*
   9190 	 * XXX It would be nice if the mbuf pkthdr had offset
   9191 	 * fields for the protocol headers.
   9192 	 */
   9193 	*cmdlenp = 0;
   9194 	*fieldsp = 0;
   9195 
   9196 	eh = mtod(m0, struct ether_header *);
   9197 	switch (htons(eh->ether_type)) {
   9198 	case ETHERTYPE_IP:
   9199 	case ETHERTYPE_IPV6:
   9200 		offset = ETHER_HDR_LEN;
   9201 		break;
   9202 
   9203 	case ETHERTYPE_VLAN:
   9204 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   9205 		break;
   9206 
   9207 	default:
   9208 		/* Don't support this protocol or encapsulation. */
   9209 		*do_csum = false;
   9210 		return;
   9211 	}
   9212 	*do_csum = true;
   9213 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   9214 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   9215 
   9216 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   9217 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   9218 
   9219 	if ((m0->m_pkthdr.csum_flags &
   9220 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   9221 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   9222 	} else {
   9223 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   9224 	}
   9225 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   9226 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   9227 
   9228 	if (vlan_has_tag(m0)) {
   9229 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   9230 		    << NQTXC_VLLEN_VLAN_SHIFT);
   9231 		*cmdlenp |= NQTX_CMD_VLE;
   9232 	}
   9233 
   9234 	mssidx = 0;
   9235 
   9236 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   9237 		int hlen = offset + iphl;
   9238 		int tcp_hlen;
   9239 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   9240 
   9241 		if (__predict_false(m0->m_len <
   9242 				    (hlen + sizeof(struct tcphdr)))) {
   9243 			/*
   9244 			 * TCP/IP headers are not in the first mbuf; we need
   9245 			 * to do this the slow and painful way. Let's just
   9246 			 * hope this doesn't happen very often.
   9247 			 */
   9248 			struct tcphdr th;
   9249 
   9250 			WM_Q_EVCNT_INCR(txq, tsopain);
   9251 
   9252 			m_copydata(m0, hlen, sizeof(th), &th);
   9253 			if (v4) {
   9254 				struct ip ip;
   9255 
   9256 				m_copydata(m0, offset, sizeof(ip), &ip);
   9257 				ip.ip_len = 0;
   9258 				m_copyback(m0,
   9259 				    offset + offsetof(struct ip, ip_len),
   9260 				    sizeof(ip.ip_len), &ip.ip_len);
   9261 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   9262 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   9263 			} else {
   9264 				struct ip6_hdr ip6;
   9265 
   9266 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   9267 				ip6.ip6_plen = 0;
   9268 				m_copyback(m0,
   9269 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   9270 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   9271 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   9272 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   9273 			}
   9274 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   9275 			    sizeof(th.th_sum), &th.th_sum);
   9276 
   9277 			tcp_hlen = th.th_off << 2;
   9278 		} else {
   9279 			/*
   9280 			 * TCP/IP headers are in the first mbuf; we can do
   9281 			 * this the easy way.
   9282 			 */
   9283 			struct tcphdr *th;
   9284 
   9285 			if (v4) {
   9286 				struct ip *ip =
   9287 				    (void *)(mtod(m0, char *) + offset);
   9288 				th = (void *)(mtod(m0, char *) + hlen);
   9289 
   9290 				ip->ip_len = 0;
   9291 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   9292 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   9293 			} else {
   9294 				struct ip6_hdr *ip6 =
   9295 				    (void *)(mtod(m0, char *) + offset);
   9296 				th = (void *)(mtod(m0, char *) + hlen);
   9297 
   9298 				ip6->ip6_plen = 0;
   9299 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   9300 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   9301 			}
   9302 			tcp_hlen = th->th_off << 2;
   9303 		}
   9304 		hlen += tcp_hlen;
   9305 		*cmdlenp |= NQTX_CMD_TSE;
   9306 
   9307 		if (v4) {
   9308 			WM_Q_EVCNT_INCR(txq, tso);
   9309 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   9310 		} else {
   9311 			WM_Q_EVCNT_INCR(txq, tso6);
   9312 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   9313 		}
   9314 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   9315 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9316 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   9317 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   9318 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   9319 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   9320 	} else {
   9321 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   9322 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9323 	}
   9324 
   9325 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   9326 		*fieldsp |= NQTXD_FIELDS_IXSM;
   9327 		cmdc |= NQTXC_CMD_IP4;
   9328 	}
   9329 
   9330 	if (m0->m_pkthdr.csum_flags &
   9331 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   9332 		WM_Q_EVCNT_INCR(txq, tusum);
   9333 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   9334 			cmdc |= NQTXC_CMD_TCP;
   9335 		else
   9336 			cmdc |= NQTXC_CMD_UDP;
   9337 
   9338 		cmdc |= NQTXC_CMD_IP4;
   9339 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9340 	}
   9341 	if (m0->m_pkthdr.csum_flags &
   9342 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   9343 		WM_Q_EVCNT_INCR(txq, tusum6);
   9344 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   9345 			cmdc |= NQTXC_CMD_TCP;
   9346 		else
   9347 			cmdc |= NQTXC_CMD_UDP;
   9348 
   9349 		cmdc |= NQTXC_CMD_IP6;
   9350 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9351 	}
   9352 
   9353 	/*
   9354 	 * We don't have to write context descriptor for every packet to
   9355 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   9356 	 * I210 and I211. It is enough to write once per a Tx queue for these
   9357 	 * controllers.
   9358 	 * It would be overhead to write context descriptor for every packet,
   9359 	 * however it does not cause problems.
   9360 	 */
   9361 	/* Fill in the context descriptor. */
   9362 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
   9363 	    htole32(vl_len);
   9364 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
   9365 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
   9366 	    htole32(cmdc);
   9367 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
   9368 	    htole32(mssidx);
   9369 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   9370 	DPRINTF(sc, WM_DEBUG_TX,
   9371 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   9372 		txq->txq_next, 0, vl_len));
   9373 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   9374 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   9375 	txs->txs_ndesc++;
   9376 }
   9377 
   9378 /*
   9379  * wm_nq_start:		[ifnet interface function]
   9380  *
   9381  *	Start packet transmission on the interface for NEWQUEUE devices
   9382  */
   9383 static void
   9384 wm_nq_start(struct ifnet *ifp)
   9385 {
   9386 	struct wm_softc *sc = ifp->if_softc;
   9387 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9388 
   9389 	KASSERT(if_is_mpsafe(ifp));
   9390 	/*
   9391 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   9392 	 */
   9393 
   9394 	mutex_enter(txq->txq_lock);
   9395 	if (!txq->txq_stopping)
   9396 		wm_nq_start_locked(ifp);
   9397 	mutex_exit(txq->txq_lock);
   9398 }
   9399 
   9400 static void
   9401 wm_nq_start_locked(struct ifnet *ifp)
   9402 {
   9403 	struct wm_softc *sc = ifp->if_softc;
   9404 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9405 
   9406 	wm_nq_send_common_locked(ifp, txq, false);
   9407 }
   9408 
   9409 static int
   9410 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   9411 {
   9412 	int qid;
   9413 	struct wm_softc *sc = ifp->if_softc;
   9414 	struct wm_txqueue *txq;
   9415 
   9416 	qid = wm_select_txqueue(ifp, m);
   9417 	txq = &sc->sc_queue[qid].wmq_txq;
   9418 
   9419 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   9420 		m_freem(m);
   9421 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   9422 		return ENOBUFS;
   9423 	}
   9424 
   9425 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   9426 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9427 	if (m->m_flags & M_MCAST)
   9428 		if_statinc_ref(nsr, if_omcasts);
   9429 	IF_STAT_PUTREF(ifp);
   9430 
   9431 	/*
   9432 	 * The situations which this mutex_tryenter() fails at running time
   9433 	 * are below two patterns.
   9434 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9435 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9436 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9437 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9438 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9439 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9440 	 * stuck, either.
   9441 	 */
   9442 	if (mutex_tryenter(txq->txq_lock)) {
   9443 		if (!txq->txq_stopping)
   9444 			wm_nq_transmit_locked(ifp, txq);
   9445 		mutex_exit(txq->txq_lock);
   9446 	}
   9447 
   9448 	return 0;
   9449 }
   9450 
   9451 static void
   9452 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9453 {
   9454 
   9455 	wm_nq_send_common_locked(ifp, txq, true);
   9456 }
   9457 
   9458 static void
   9459 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9460     bool is_transmit)
   9461 {
   9462 	struct wm_softc *sc = ifp->if_softc;
   9463 	struct mbuf *m0;
   9464 	struct wm_txsoft *txs;
   9465 	bus_dmamap_t dmamap;
   9466 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9467 	bool do_csum, sent;
   9468 	bool remap = true;
   9469 
   9470 	KASSERT(mutex_owned(txq->txq_lock));
   9471 	KASSERT(!txq->txq_stopping);
   9472 
   9473 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9474 		return;
   9475 
   9476 	if (__predict_false(wm_linkdown_discard(txq))) {
   9477 		do {
   9478 			if (is_transmit)
   9479 				m0 = pcq_get(txq->txq_interq);
   9480 			else
   9481 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9482 			/*
   9483 			 * increment successed packet counter as in the case
   9484 			 * which the packet is discarded by link down PHY.
   9485 			 */
   9486 			if (m0 != NULL) {
   9487 				if_statinc(ifp, if_opackets);
   9488 				m_freem(m0);
   9489 			}
   9490 		} while (m0 != NULL);
   9491 		return;
   9492 	}
   9493 
   9494 	sent = false;
   9495 
   9496 	/*
   9497 	 * Loop through the send queue, setting up transmit descriptors
   9498 	 * until we drain the queue, or use up all available transmit
   9499 	 * descriptors.
   9500 	 */
   9501 	for (;;) {
   9502 		m0 = NULL;
   9503 
   9504 		/* Get a work queue entry. */
   9505 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9506 			wm_txeof(txq, UINT_MAX);
   9507 			if (txq->txq_sfree == 0) {
   9508 				DPRINTF(sc, WM_DEBUG_TX,
   9509 				    ("%s: TX: no free job descriptors\n",
   9510 					device_xname(sc->sc_dev)));
   9511 				WM_Q_EVCNT_INCR(txq, txsstall);
   9512 				break;
   9513 			}
   9514 		}
   9515 
   9516 		/* Grab a packet off the queue. */
   9517 		if (is_transmit)
   9518 			m0 = pcq_get(txq->txq_interq);
   9519 		else
   9520 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9521 		if (m0 == NULL)
   9522 			break;
   9523 
   9524 		DPRINTF(sc, WM_DEBUG_TX,
   9525 		    ("%s: TX: have packet to transmit: %p\n",
   9526 			device_xname(sc->sc_dev), m0));
   9527 
   9528 		txs = &txq->txq_soft[txq->txq_snext];
   9529 		dmamap = txs->txs_dmamap;
   9530 
   9531 		/*
   9532 		 * Load the DMA map.  If this fails, the packet either
   9533 		 * didn't fit in the allotted number of segments, or we
   9534 		 * were short on resources.  For the too-many-segments
   9535 		 * case, we simply report an error and drop the packet,
   9536 		 * since we can't sanely copy a jumbo packet to a single
   9537 		 * buffer.
   9538 		 */
   9539 retry:
   9540 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9541 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9542 		if (__predict_false(error)) {
   9543 			if (error == EFBIG) {
   9544 				if (remap == true) {
   9545 					struct mbuf *m;
   9546 
   9547 					remap = false;
   9548 					m = m_defrag(m0, M_NOWAIT);
   9549 					if (m != NULL) {
   9550 						WM_Q_EVCNT_INCR(txq, defrag);
   9551 						m0 = m;
   9552 						goto retry;
   9553 					}
   9554 				}
   9555 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9556 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9557 				    "DMA segments, dropping...\n",
   9558 				    device_xname(sc->sc_dev));
   9559 				wm_dump_mbuf_chain(sc, m0);
   9560 				m_freem(m0);
   9561 				continue;
   9562 			}
   9563 			/* Short on resources, just stop for now. */
   9564 			DPRINTF(sc, WM_DEBUG_TX,
   9565 			    ("%s: TX: dmamap load failed: %d\n",
   9566 				device_xname(sc->sc_dev), error));
   9567 			break;
   9568 		}
   9569 
   9570 		segs_needed = dmamap->dm_nsegs;
   9571 
   9572 		/*
   9573 		 * Ensure we have enough descriptors free to describe
   9574 		 * the packet. Note, we always reserve one descriptor
   9575 		 * at the end of the ring due to the semantics of the
   9576 		 * TDT register, plus one more in the event we need
   9577 		 * to load offload context.
   9578 		 */
   9579 		if (segs_needed > txq->txq_free - 2) {
   9580 			/*
   9581 			 * Not enough free descriptors to transmit this
   9582 			 * packet.  We haven't committed anything yet,
   9583 			 * so just unload the DMA map, put the packet
   9584 			 * pack on the queue, and punt. Notify the upper
   9585 			 * layer that there are no more slots left.
   9586 			 */
   9587 			DPRINTF(sc, WM_DEBUG_TX,
   9588 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9589 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9590 				segs_needed, txq->txq_free - 1));
   9591 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9592 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9593 			WM_Q_EVCNT_INCR(txq, txdstall);
   9594 			break;
   9595 		}
   9596 
   9597 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9598 
   9599 		DPRINTF(sc, WM_DEBUG_TX,
   9600 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9601 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9602 
   9603 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9604 
   9605 		/*
   9606 		 * Store a pointer to the packet so that we can free it
   9607 		 * later.
   9608 		 *
   9609 		 * Initially, we consider the number of descriptors the
   9610 		 * packet uses the number of DMA segments.  This may be
   9611 		 * incremented by 1 if we do checksum offload (a descriptor
   9612 		 * is used to set the checksum context).
   9613 		 */
   9614 		txs->txs_mbuf = m0;
   9615 		txs->txs_firstdesc = txq->txq_next;
   9616 		txs->txs_ndesc = segs_needed;
   9617 
   9618 		/* Set up offload parameters for this packet. */
   9619 		uint32_t cmdlen, fields, dcmdlen;
   9620 		if (m0->m_pkthdr.csum_flags &
   9621 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9622 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9623 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9624 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9625 			    &do_csum);
   9626 		} else {
   9627 			do_csum = false;
   9628 			cmdlen = 0;
   9629 			fields = 0;
   9630 		}
   9631 
   9632 		/* Sync the DMA map. */
   9633 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9634 		    BUS_DMASYNC_PREWRITE);
   9635 
   9636 		/* Initialize the first transmit descriptor. */
   9637 		nexttx = txq->txq_next;
   9638 		if (!do_csum) {
   9639 			/* Set up a legacy descriptor */
   9640 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9641 			    dmamap->dm_segs[0].ds_addr);
   9642 			txq->txq_descs[nexttx].wtx_cmdlen =
   9643 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9644 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9645 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9646 			if (vlan_has_tag(m0)) {
   9647 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9648 				    htole32(WTX_CMD_VLE);
   9649 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9650 				    htole16(vlan_get_tag(m0));
   9651 			} else
   9652 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9653 
   9654 			dcmdlen = 0;
   9655 		} else {
   9656 			/* Set up an advanced data descriptor */
   9657 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9658 			    htole64(dmamap->dm_segs[0].ds_addr);
   9659 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9660 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9661 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9662 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9663 			    htole32(fields);
   9664 			DPRINTF(sc, WM_DEBUG_TX,
   9665 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9666 				device_xname(sc->sc_dev), nexttx,
   9667 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9668 			DPRINTF(sc, WM_DEBUG_TX,
   9669 			    ("\t 0x%08x%08x\n", fields,
   9670 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9671 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9672 		}
   9673 
   9674 		lasttx = nexttx;
   9675 		nexttx = WM_NEXTTX(txq, nexttx);
   9676 		/*
   9677 		 * Fill in the next descriptors. Legacy or advanced format
   9678 		 * is the same here.
   9679 		 */
   9680 		for (seg = 1; seg < dmamap->dm_nsegs;
   9681 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9682 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9683 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9684 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9685 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9686 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9687 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9688 			lasttx = nexttx;
   9689 
   9690 			DPRINTF(sc, WM_DEBUG_TX,
   9691 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9692 				device_xname(sc->sc_dev), nexttx,
   9693 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9694 				dmamap->dm_segs[seg].ds_len));
   9695 		}
   9696 
   9697 		KASSERT(lasttx != -1);
   9698 
   9699 		/*
   9700 		 * Set up the command byte on the last descriptor of
   9701 		 * the packet. If we're in the interrupt delay window,
   9702 		 * delay the interrupt.
   9703 		 */
   9704 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9705 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9706 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9707 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9708 
   9709 		txs->txs_lastdesc = lasttx;
   9710 
   9711 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9712 		    device_xname(sc->sc_dev),
   9713 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9714 
   9715 		/* Sync the descriptors we're using. */
   9716 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9717 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9718 
   9719 		/* Give the packet to the chip. */
   9720 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9721 		sent = true;
   9722 
   9723 		DPRINTF(sc, WM_DEBUG_TX,
   9724 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9725 
   9726 		DPRINTF(sc, WM_DEBUG_TX,
   9727 		    ("%s: TX: finished transmitting packet, job %d\n",
   9728 			device_xname(sc->sc_dev), txq->txq_snext));
   9729 
   9730 		/* Advance the tx pointer. */
   9731 		txq->txq_free -= txs->txs_ndesc;
   9732 		txq->txq_next = nexttx;
   9733 
   9734 		txq->txq_sfree--;
   9735 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9736 
   9737 		/* Pass the packet to any BPF listeners. */
   9738 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9739 	}
   9740 
   9741 	if (m0 != NULL) {
   9742 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9743 		WM_Q_EVCNT_INCR(txq, descdrop);
   9744 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9745 			__func__));
   9746 		m_freem(m0);
   9747 	}
   9748 
   9749 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9750 		/* No more slots; notify upper layer. */
   9751 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9752 	}
   9753 
   9754 	if (sent) {
   9755 		/* Set a watchdog timer in case the chip flakes out. */
   9756 		txq->txq_lastsent = time_uptime;
   9757 		txq->txq_sending = true;
   9758 	}
   9759 }
   9760 
   9761 static void
   9762 wm_deferred_start_locked(struct wm_txqueue *txq)
   9763 {
   9764 	struct wm_softc *sc = txq->txq_sc;
   9765 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9766 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9767 	int qid = wmq->wmq_id;
   9768 
   9769 	KASSERT(mutex_owned(txq->txq_lock));
   9770 	KASSERT(!txq->txq_stopping);
   9771 
   9772 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9773 		/* XXX need for ALTQ or one CPU system */
   9774 		if (qid == 0)
   9775 			wm_nq_start_locked(ifp);
   9776 		wm_nq_transmit_locked(ifp, txq);
   9777 	} else {
   9778 		/* XXX need for ALTQ or one CPU system */
   9779 		if (qid == 0)
   9780 			wm_start_locked(ifp);
   9781 		wm_transmit_locked(ifp, txq);
   9782 	}
   9783 }
   9784 
   9785 /* Interrupt */
   9786 
   9787 /*
   9788  * wm_txeof:
   9789  *
   9790  *	Helper; handle transmit interrupts.
   9791  */
   9792 static bool
   9793 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9794 {
   9795 	struct wm_softc *sc = txq->txq_sc;
   9796 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9797 	struct wm_txsoft *txs;
   9798 	int count = 0;
   9799 	int i;
   9800 	uint8_t status;
   9801 	bool more = false;
   9802 
   9803 	KASSERT(mutex_owned(txq->txq_lock));
   9804 
   9805 	if (txq->txq_stopping)
   9806 		return false;
   9807 
   9808 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9809 
   9810 	/*
   9811 	 * Go through the Tx list and free mbufs for those
   9812 	 * frames which have been transmitted.
   9813 	 */
   9814 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9815 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9816 		txs = &txq->txq_soft[i];
   9817 
   9818 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9819 			device_xname(sc->sc_dev), i));
   9820 
   9821 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9822 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9823 
   9824 		status =
   9825 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9826 		if ((status & WTX_ST_DD) == 0) {
   9827 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9828 			    BUS_DMASYNC_PREREAD);
   9829 			break;
   9830 		}
   9831 
   9832 		if (limit-- == 0) {
   9833 			more = true;
   9834 			DPRINTF(sc, WM_DEBUG_TX,
   9835 			    ("%s: TX: loop limited, job %d is not processed\n",
   9836 				device_xname(sc->sc_dev), i));
   9837 			break;
   9838 		}
   9839 
   9840 		count++;
   9841 		DPRINTF(sc, WM_DEBUG_TX,
   9842 		    ("%s: TX: job %d done: descs %d..%d\n",
   9843 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9844 		    txs->txs_lastdesc));
   9845 
   9846 #ifdef WM_EVENT_COUNTERS
   9847 		if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
   9848 			WM_Q_EVCNT_INCR(txq, underrun);
   9849 #endif /* WM_EVENT_COUNTERS */
   9850 
   9851 		/*
   9852 		 * 82574 and newer's document says the status field has neither
   9853 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9854 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9855 		 * Developer's Manual", 82574 datasheet and newer.
   9856 		 *
   9857 		 * XXX I saw the LC bit was set on I218 even though the media
   9858 		 * was full duplex, so the bit might be used for other
   9859 		 * meaning ...(I have no document).
   9860 		 */
   9861 
   9862 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9863 		    && ((sc->sc_type < WM_T_82574)
   9864 			|| (sc->sc_type == WM_T_80003))) {
   9865 			if_statinc(ifp, if_oerrors);
   9866 			if (status & WTX_ST_LC)
   9867 				log(LOG_WARNING, "%s: late collision\n",
   9868 				    device_xname(sc->sc_dev));
   9869 			else if (status & WTX_ST_EC) {
   9870 				if_statadd(ifp, if_collisions,
   9871 				    TX_COLLISION_THRESHOLD + 1);
   9872 				log(LOG_WARNING, "%s: excessive collisions\n",
   9873 				    device_xname(sc->sc_dev));
   9874 			}
   9875 		} else
   9876 			if_statinc(ifp, if_opackets);
   9877 
   9878 		txq->txq_packets++;
   9879 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9880 
   9881 		txq->txq_free += txs->txs_ndesc;
   9882 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9883 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9884 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9885 		m_freem(txs->txs_mbuf);
   9886 		txs->txs_mbuf = NULL;
   9887 	}
   9888 
   9889 	/* Update the dirty transmit buffer pointer. */
   9890 	txq->txq_sdirty = i;
   9891 	DPRINTF(sc, WM_DEBUG_TX,
   9892 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9893 
   9894 	if (count != 0)
   9895 		rnd_add_uint32(&sc->rnd_source, count);
   9896 
   9897 	/*
   9898 	 * If there are no more pending transmissions, cancel the watchdog
   9899 	 * timer.
   9900 	 */
   9901 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9902 		txq->txq_sending = false;
   9903 
   9904 	return more;
   9905 }
   9906 
   9907 static inline uint32_t
   9908 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9909 {
   9910 	struct wm_softc *sc = rxq->rxq_sc;
   9911 
   9912 	if (sc->sc_type == WM_T_82574)
   9913 		return EXTRXC_STATUS(
   9914 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9915 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9916 		return NQRXC_STATUS(
   9917 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9918 	else
   9919 		return rxq->rxq_descs[idx].wrx_status;
   9920 }
   9921 
   9922 static inline uint32_t
   9923 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9924 {
   9925 	struct wm_softc *sc = rxq->rxq_sc;
   9926 
   9927 	if (sc->sc_type == WM_T_82574)
   9928 		return EXTRXC_ERROR(
   9929 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9930 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9931 		return NQRXC_ERROR(
   9932 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9933 	else
   9934 		return rxq->rxq_descs[idx].wrx_errors;
   9935 }
   9936 
   9937 static inline uint16_t
   9938 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9939 {
   9940 	struct wm_softc *sc = rxq->rxq_sc;
   9941 
   9942 	if (sc->sc_type == WM_T_82574)
   9943 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9944 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9945 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9946 	else
   9947 		return rxq->rxq_descs[idx].wrx_special;
   9948 }
   9949 
   9950 static inline int
   9951 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9952 {
   9953 	struct wm_softc *sc = rxq->rxq_sc;
   9954 
   9955 	if (sc->sc_type == WM_T_82574)
   9956 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9957 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9958 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9959 	else
   9960 		return rxq->rxq_descs[idx].wrx_len;
   9961 }
   9962 
   9963 #ifdef WM_DEBUG
   9964 static inline uint32_t
   9965 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9966 {
   9967 	struct wm_softc *sc = rxq->rxq_sc;
   9968 
   9969 	if (sc->sc_type == WM_T_82574)
   9970 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9971 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9972 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9973 	else
   9974 		return 0;
   9975 }
   9976 
   9977 static inline uint8_t
   9978 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9979 {
   9980 	struct wm_softc *sc = rxq->rxq_sc;
   9981 
   9982 	if (sc->sc_type == WM_T_82574)
   9983 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9984 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9985 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9986 	else
   9987 		return 0;
   9988 }
   9989 #endif /* WM_DEBUG */
   9990 
   9991 static inline bool
   9992 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9993     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9994 {
   9995 
   9996 	if (sc->sc_type == WM_T_82574)
   9997 		return (status & ext_bit) != 0;
   9998 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9999 		return (status & nq_bit) != 0;
   10000 	else
   10001 		return (status & legacy_bit) != 0;
   10002 }
   10003 
   10004 static inline bool
   10005 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   10006     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   10007 {
   10008 
   10009 	if (sc->sc_type == WM_T_82574)
   10010 		return (error & ext_bit) != 0;
   10011 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10012 		return (error & nq_bit) != 0;
   10013 	else
   10014 		return (error & legacy_bit) != 0;
   10015 }
   10016 
   10017 static inline bool
   10018 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   10019 {
   10020 
   10021 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10022 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   10023 		return true;
   10024 	else
   10025 		return false;
   10026 }
   10027 
   10028 static inline bool
   10029 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   10030 {
   10031 	struct wm_softc *sc = rxq->rxq_sc;
   10032 
   10033 	/* XXX missing error bit for newqueue? */
   10034 	if (wm_rxdesc_is_set_error(sc, errors,
   10035 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   10036 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   10037 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   10038 		NQRXC_ERROR_RXE)) {
   10039 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   10040 		    EXTRXC_ERROR_SE, 0))
   10041 			log(LOG_WARNING, "%s: symbol error\n",
   10042 			    device_xname(sc->sc_dev));
   10043 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   10044 		    EXTRXC_ERROR_SEQ, 0))
   10045 			log(LOG_WARNING, "%s: receive sequence error\n",
   10046 			    device_xname(sc->sc_dev));
   10047 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   10048 		    EXTRXC_ERROR_CE, 0))
   10049 			log(LOG_WARNING, "%s: CRC error\n",
   10050 			    device_xname(sc->sc_dev));
   10051 		return true;
   10052 	}
   10053 
   10054 	return false;
   10055 }
   10056 
   10057 static inline bool
   10058 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   10059 {
   10060 	struct wm_softc *sc = rxq->rxq_sc;
   10061 
   10062 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   10063 		NQRXC_STATUS_DD)) {
   10064 		/* We have processed all of the receive descriptors. */
   10065 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   10066 		return false;
   10067 	}
   10068 
   10069 	return true;
   10070 }
   10071 
   10072 static inline bool
   10073 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   10074     uint16_t vlantag, struct mbuf *m)
   10075 {
   10076 
   10077 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10078 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   10079 		vlan_set_tag(m, le16toh(vlantag));
   10080 	}
   10081 
   10082 	return true;
   10083 }
   10084 
   10085 static inline void
   10086 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   10087     uint32_t errors, struct mbuf *m)
   10088 {
   10089 	struct wm_softc *sc = rxq->rxq_sc;
   10090 
   10091 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   10092 		if (wm_rxdesc_is_set_status(sc, status,
   10093 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   10094 			WM_Q_EVCNT_INCR(rxq, ipsum);
   10095 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   10096 			if (wm_rxdesc_is_set_error(sc, errors,
   10097 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   10098 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   10099 		}
   10100 		if (wm_rxdesc_is_set_status(sc, status,
   10101 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   10102 			/*
   10103 			 * Note: we don't know if this was TCP or UDP,
   10104 			 * so we just set both bits, and expect the
   10105 			 * upper layers to deal.
   10106 			 */
   10107 			WM_Q_EVCNT_INCR(rxq, tusum);
   10108 			m->m_pkthdr.csum_flags |=
   10109 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   10110 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   10111 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   10112 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   10113 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   10114 		}
   10115 	}
   10116 }
   10117 
   10118 /*
   10119  * wm_rxeof:
   10120  *
   10121  *	Helper; handle receive interrupts.
   10122  */
   10123 static bool
   10124 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   10125 {
   10126 	struct wm_softc *sc = rxq->rxq_sc;
   10127 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10128 	struct wm_rxsoft *rxs;
   10129 	struct mbuf *m;
   10130 	int i, len;
   10131 	int count = 0;
   10132 	uint32_t status, errors;
   10133 	uint16_t vlantag;
   10134 	bool more = false;
   10135 
   10136 	KASSERT(mutex_owned(rxq->rxq_lock));
   10137 
   10138 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   10139 		rxs = &rxq->rxq_soft[i];
   10140 
   10141 		DPRINTF(sc, WM_DEBUG_RX,
   10142 		    ("%s: RX: checking descriptor %d\n",
   10143 			device_xname(sc->sc_dev), i));
   10144 		wm_cdrxsync(rxq, i,
   10145 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   10146 
   10147 		status = wm_rxdesc_get_status(rxq, i);
   10148 		errors = wm_rxdesc_get_errors(rxq, i);
   10149 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   10150 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   10151 #ifdef WM_DEBUG
   10152 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   10153 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   10154 #endif
   10155 
   10156 		if (!wm_rxdesc_dd(rxq, i, status))
   10157 			break;
   10158 
   10159 		if (limit-- == 0) {
   10160 			more = true;
   10161 			DPRINTF(sc, WM_DEBUG_RX,
   10162 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   10163 				device_xname(sc->sc_dev), i));
   10164 			break;
   10165 		}
   10166 
   10167 		count++;
   10168 		if (__predict_false(rxq->rxq_discard)) {
   10169 			DPRINTF(sc, WM_DEBUG_RX,
   10170 			    ("%s: RX: discarding contents of descriptor %d\n",
   10171 				device_xname(sc->sc_dev), i));
   10172 			wm_init_rxdesc(rxq, i);
   10173 			if (wm_rxdesc_is_eop(rxq, status)) {
   10174 				/* Reset our state. */
   10175 				DPRINTF(sc, WM_DEBUG_RX,
   10176 				    ("%s: RX: resetting rxdiscard -> 0\n",
   10177 					device_xname(sc->sc_dev)));
   10178 				rxq->rxq_discard = 0;
   10179 			}
   10180 			continue;
   10181 		}
   10182 
   10183 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10184 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   10185 
   10186 		m = rxs->rxs_mbuf;
   10187 
   10188 		/*
   10189 		 * Add a new receive buffer to the ring, unless of
   10190 		 * course the length is zero. Treat the latter as a
   10191 		 * failed mapping.
   10192 		 */
   10193 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   10194 			/*
   10195 			 * Failed, throw away what we've done so
   10196 			 * far, and discard the rest of the packet.
   10197 			 */
   10198 			if_statinc(ifp, if_ierrors);
   10199 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10200 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   10201 			wm_init_rxdesc(rxq, i);
   10202 			if (!wm_rxdesc_is_eop(rxq, status))
   10203 				rxq->rxq_discard = 1;
   10204 			if (rxq->rxq_head != NULL)
   10205 				m_freem(rxq->rxq_head);
   10206 			WM_RXCHAIN_RESET(rxq);
   10207 			DPRINTF(sc, WM_DEBUG_RX,
   10208 			    ("%s: RX: Rx buffer allocation failed, "
   10209 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   10210 				rxq->rxq_discard ? " (discard)" : ""));
   10211 			continue;
   10212 		}
   10213 
   10214 		m->m_len = len;
   10215 		rxq->rxq_len += len;
   10216 		DPRINTF(sc, WM_DEBUG_RX,
   10217 		    ("%s: RX: buffer at %p len %d\n",
   10218 			device_xname(sc->sc_dev), m->m_data, len));
   10219 
   10220 		/* If this is not the end of the packet, keep looking. */
   10221 		if (!wm_rxdesc_is_eop(rxq, status)) {
   10222 			WM_RXCHAIN_LINK(rxq, m);
   10223 			DPRINTF(sc, WM_DEBUG_RX,
   10224 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   10225 				device_xname(sc->sc_dev), rxq->rxq_len));
   10226 			continue;
   10227 		}
   10228 
   10229 		/*
   10230 		 * Okay, we have the entire packet now. The chip is
   10231 		 * configured to include the FCS except I35[04], I21[01].
   10232 		 * (not all chips can be configured to strip it), so we need
   10233 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   10234 		 * in RCTL register is always set, so we don't trim it.
   10235 		 * PCH2 and newer chip also not include FCS when jumbo
   10236 		 * frame is used to do workaround an errata.
   10237 		 * May need to adjust length of previous mbuf in the
   10238 		 * chain if the current mbuf is too short.
   10239 		 */
   10240 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   10241 			if (m->m_len < ETHER_CRC_LEN) {
   10242 				rxq->rxq_tail->m_len
   10243 				    -= (ETHER_CRC_LEN - m->m_len);
   10244 				m->m_len = 0;
   10245 			} else
   10246 				m->m_len -= ETHER_CRC_LEN;
   10247 			len = rxq->rxq_len - ETHER_CRC_LEN;
   10248 		} else
   10249 			len = rxq->rxq_len;
   10250 
   10251 		WM_RXCHAIN_LINK(rxq, m);
   10252 
   10253 		*rxq->rxq_tailp = NULL;
   10254 		m = rxq->rxq_head;
   10255 
   10256 		WM_RXCHAIN_RESET(rxq);
   10257 
   10258 		DPRINTF(sc, WM_DEBUG_RX,
   10259 		    ("%s: RX: have entire packet, len -> %d\n",
   10260 			device_xname(sc->sc_dev), len));
   10261 
   10262 		/* If an error occurred, update stats and drop the packet. */
   10263 		if (wm_rxdesc_has_errors(rxq, errors)) {
   10264 			m_freem(m);
   10265 			continue;
   10266 		}
   10267 
   10268 		/* No errors.  Receive the packet. */
   10269 		m_set_rcvif(m, ifp);
   10270 		m->m_pkthdr.len = len;
   10271 		/*
   10272 		 * TODO
   10273 		 * should be save rsshash and rsstype to this mbuf.
   10274 		 */
   10275 		DPRINTF(sc, WM_DEBUG_RX,
   10276 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   10277 			device_xname(sc->sc_dev), rsstype, rsshash));
   10278 
   10279 		/*
   10280 		 * If VLANs are enabled, VLAN packets have been unwrapped
   10281 		 * for us.  Associate the tag with the packet.
   10282 		 */
   10283 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   10284 			continue;
   10285 
   10286 		/* Set up checksum info for this packet. */
   10287 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   10288 
   10289 		rxq->rxq_packets++;
   10290 		rxq->rxq_bytes += len;
   10291 		/* Pass it on. */
   10292 		if_percpuq_enqueue(sc->sc_ipq, m);
   10293 
   10294 		if (rxq->rxq_stopping)
   10295 			break;
   10296 	}
   10297 	rxq->rxq_ptr = i;
   10298 
   10299 	if (count != 0)
   10300 		rnd_add_uint32(&sc->rnd_source, count);
   10301 
   10302 	DPRINTF(sc, WM_DEBUG_RX,
   10303 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   10304 
   10305 	return more;
   10306 }
   10307 
   10308 /*
   10309  * wm_linkintr_gmii:
   10310  *
   10311  *	Helper; handle link interrupts for GMII.
   10312  */
   10313 static void
   10314 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   10315 {
   10316 	device_t dev = sc->sc_dev;
   10317 	uint32_t status, reg;
   10318 	bool link;
   10319 	bool dopoll = true;
   10320 	int rv;
   10321 
   10322 	KASSERT(mutex_owned(sc->sc_core_lock));
   10323 
   10324 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   10325 		__func__));
   10326 
   10327 	if ((icr & ICR_LSC) == 0) {
   10328 		if (icr & ICR_RXSEQ)
   10329 			DPRINTF(sc, WM_DEBUG_LINK,
   10330 			    ("%s: LINK Receive sequence error\n",
   10331 				device_xname(dev)));
   10332 		return;
   10333 	}
   10334 
   10335 	/* Link status changed */
   10336 	status = CSR_READ(sc, WMREG_STATUS);
   10337 	link = status & STATUS_LU;
   10338 	if (link) {
   10339 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10340 			device_xname(dev),
   10341 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10342 		if (wm_phy_need_linkdown_discard(sc)) {
   10343 			DPRINTF(sc, WM_DEBUG_LINK,
   10344 			    ("%s: linkintr: Clear linkdown discard flag\n",
   10345 				device_xname(dev)));
   10346 			wm_clear_linkdown_discard(sc);
   10347 		}
   10348 	} else {
   10349 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10350 			device_xname(dev)));
   10351 		if (wm_phy_need_linkdown_discard(sc)) {
   10352 			DPRINTF(sc, WM_DEBUG_LINK,
   10353 			    ("%s: linkintr: Set linkdown discard flag\n",
   10354 				device_xname(dev)));
   10355 			wm_set_linkdown_discard(sc);
   10356 		}
   10357 	}
   10358 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   10359 		wm_gig_downshift_workaround_ich8lan(sc);
   10360 
   10361 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   10362 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   10363 
   10364 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   10365 		device_xname(dev)));
   10366 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   10367 		if (link) {
   10368 			/*
   10369 			 * To workaround the problem, it's required to wait
   10370 			 * several hundred miliseconds. The time depend
   10371 			 * on the environment. Wait 1 second for the safety.
   10372 			 */
   10373 			dopoll = false;
   10374 			getmicrotime(&sc->sc_linkup_delay_time);
   10375 			sc->sc_linkup_delay_time.tv_sec += 1;
   10376 		} else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   10377 			/*
   10378 			 * Simplify by checking tv_sec only. It's enough.
   10379 			 *
   10380 			 * Currently, it's not required to clear the time.
   10381 			 * It's just to know the timer is stopped
   10382 			 * (for debugging).
   10383 			 */
   10384 
   10385 			sc->sc_linkup_delay_time.tv_sec = 0;
   10386 			sc->sc_linkup_delay_time.tv_usec = 0;
   10387 		}
   10388 	}
   10389 
   10390 	/*
   10391 	 * Call mii_pollstat().
   10392 	 *
   10393 	 * Some (not all) systems use I35[04] or I21[01] don't send packet soon
   10394 	 * after linkup. The MAC send a packet to the PHY and any error is not
   10395 	 * observed. This behavior causes a problem that gratuitous ARP and/or
   10396 	 * IPv6 DAD packet are silently dropped. To avoid this problem, don't
   10397 	 * call mii_pollstat() here which will send LINK_STATE_UP notification
   10398 	 * to the upper layer. Instead, mii_pollstat() will be called in
   10399 	 * wm_gmii_mediastatus() or mii_tick() will be called in wm_tick().
   10400 	 */
   10401 	if (dopoll)
   10402 		mii_pollstat(&sc->sc_mii);
   10403 
   10404 	/* Do some workarounds soon after link status is changed. */
   10405 
   10406 	if (sc->sc_type == WM_T_82543) {
   10407 		int miistatus, active;
   10408 
   10409 		/*
   10410 		 * With 82543, we need to force speed and
   10411 		 * duplex on the MAC equal to what the PHY
   10412 		 * speed and duplex configuration is.
   10413 		 */
   10414 		miistatus = sc->sc_mii.mii_media_status;
   10415 
   10416 		if (miistatus & IFM_ACTIVE) {
   10417 			active = sc->sc_mii.mii_media_active;
   10418 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10419 			switch (IFM_SUBTYPE(active)) {
   10420 			case IFM_10_T:
   10421 				sc->sc_ctrl |= CTRL_SPEED_10;
   10422 				break;
   10423 			case IFM_100_TX:
   10424 				sc->sc_ctrl |= CTRL_SPEED_100;
   10425 				break;
   10426 			case IFM_1000_T:
   10427 				sc->sc_ctrl |= CTRL_SPEED_1000;
   10428 				break;
   10429 			default:
   10430 				/*
   10431 				 * Fiber?
   10432 				 * Shoud not enter here.
   10433 				 */
   10434 				device_printf(dev, "unknown media (%x)\n",
   10435 				    active);
   10436 				break;
   10437 			}
   10438 			if (active & IFM_FDX)
   10439 				sc->sc_ctrl |= CTRL_FD;
   10440 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10441 		}
   10442 	} else if (sc->sc_type == WM_T_PCH) {
   10443 		wm_k1_gig_workaround_hv(sc,
   10444 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10445 	}
   10446 
   10447 	/*
   10448 	 * When connected at 10Mbps half-duplex, some parts are excessively
   10449 	 * aggressive resulting in many collisions. To avoid this, increase
   10450 	 * the IPG and reduce Rx latency in the PHY.
   10451 	 */
   10452 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   10453 	    && link) {
   10454 		uint32_t tipg_reg;
   10455 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   10456 		bool fdx;
   10457 		uint16_t emi_addr, emi_val;
   10458 
   10459 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   10460 		tipg_reg &= ~TIPG_IPGT_MASK;
   10461 		fdx = status & STATUS_FD;
   10462 
   10463 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10464 			tipg_reg |= 0xff;
   10465 			/* Reduce Rx latency in analog PHY */
   10466 			emi_val = 0;
   10467 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10468 		    fdx && speed != STATUS_SPEED_1000) {
   10469 			tipg_reg |= 0xc;
   10470 			emi_val = 1;
   10471 		} else {
   10472 			/* Roll back the default values */
   10473 			tipg_reg |= 0x08;
   10474 			emi_val = 1;
   10475 		}
   10476 
   10477 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10478 
   10479 		rv = sc->phy.acquire(sc);
   10480 		if (rv)
   10481 			return;
   10482 
   10483 		if (sc->sc_type == WM_T_PCH2)
   10484 			emi_addr = I82579_RX_CONFIG;
   10485 		else
   10486 			emi_addr = I217_RX_CONFIG;
   10487 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10488 
   10489 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10490 			uint16_t phy_reg;
   10491 
   10492 			sc->phy.readreg_locked(dev, 2,
   10493 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10494 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10495 			if (speed == STATUS_SPEED_100
   10496 			    || speed == STATUS_SPEED_10)
   10497 				phy_reg |= 0x3e8;
   10498 			else
   10499 				phy_reg |= 0xfa;
   10500 			sc->phy.writereg_locked(dev, 2,
   10501 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10502 
   10503 			if (speed == STATUS_SPEED_1000) {
   10504 				sc->phy.readreg_locked(dev, 2,
   10505 				    HV_PM_CTRL, &phy_reg);
   10506 
   10507 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10508 
   10509 				sc->phy.writereg_locked(dev, 2,
   10510 				    HV_PM_CTRL, phy_reg);
   10511 			}
   10512 		}
   10513 		sc->phy.release(sc);
   10514 
   10515 		if (rv)
   10516 			return;
   10517 
   10518 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10519 			uint16_t data, ptr_gap;
   10520 
   10521 			if (speed == STATUS_SPEED_1000) {
   10522 				rv = sc->phy.acquire(sc);
   10523 				if (rv)
   10524 					return;
   10525 
   10526 				rv = sc->phy.readreg_locked(dev, 2,
   10527 				    I82579_UNKNOWN1, &data);
   10528 				if (rv) {
   10529 					sc->phy.release(sc);
   10530 					return;
   10531 				}
   10532 
   10533 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10534 				if (ptr_gap < 0x18) {
   10535 					data &= ~(0x3ff << 2);
   10536 					data |= (0x18 << 2);
   10537 					rv = sc->phy.writereg_locked(dev,
   10538 					    2, I82579_UNKNOWN1, data);
   10539 				}
   10540 				sc->phy.release(sc);
   10541 				if (rv)
   10542 					return;
   10543 			} else {
   10544 				rv = sc->phy.acquire(sc);
   10545 				if (rv)
   10546 					return;
   10547 
   10548 				rv = sc->phy.writereg_locked(dev, 2,
   10549 				    I82579_UNKNOWN1, 0xc023);
   10550 				sc->phy.release(sc);
   10551 				if (rv)
   10552 					return;
   10553 
   10554 			}
   10555 		}
   10556 	}
   10557 
   10558 	/*
   10559 	 * I217 Packet Loss issue:
   10560 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10561 	 * on power up.
   10562 	 * Set the Beacon Duration for I217 to 8 usec
   10563 	 */
   10564 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10565 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10566 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10567 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10568 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10569 	}
   10570 
   10571 	/* Work-around I218 hang issue */
   10572 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10573 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10574 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10575 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10576 		wm_k1_workaround_lpt_lp(sc, link);
   10577 
   10578 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10579 		/*
   10580 		 * Set platform power management values for Latency
   10581 		 * Tolerance Reporting (LTR)
   10582 		 */
   10583 		wm_platform_pm_pch_lpt(sc,
   10584 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10585 	}
   10586 
   10587 	/* Clear link partner's EEE ability */
   10588 	sc->eee_lp_ability = 0;
   10589 
   10590 	/* FEXTNVM6 K1-off workaround */
   10591 	if (sc->sc_type == WM_T_PCH_SPT) {
   10592 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10593 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10594 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10595 		else
   10596 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10597 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10598 	}
   10599 
   10600 	if (!link)
   10601 		return;
   10602 
   10603 	switch (sc->sc_type) {
   10604 	case WM_T_PCH2:
   10605 		wm_k1_workaround_lv(sc);
   10606 		/* FALLTHROUGH */
   10607 	case WM_T_PCH:
   10608 		if (sc->sc_phytype == WMPHY_82578)
   10609 			wm_link_stall_workaround_hv(sc);
   10610 		break;
   10611 	default:
   10612 		break;
   10613 	}
   10614 
   10615 	/* Enable/Disable EEE after link up */
   10616 	if (sc->sc_phytype > WMPHY_82579)
   10617 		wm_set_eee_pchlan(sc);
   10618 }
   10619 
   10620 /*
   10621  * wm_linkintr_tbi:
   10622  *
   10623  *	Helper; handle link interrupts for TBI mode.
   10624  */
   10625 static void
   10626 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10627 {
   10628 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10629 	uint32_t status;
   10630 
   10631 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10632 		__func__));
   10633 
   10634 	status = CSR_READ(sc, WMREG_STATUS);
   10635 	if (icr & ICR_LSC) {
   10636 		wm_check_for_link(sc);
   10637 		if (status & STATUS_LU) {
   10638 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10639 				device_xname(sc->sc_dev),
   10640 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10641 			/*
   10642 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10643 			 * so we should update sc->sc_ctrl
   10644 			 */
   10645 
   10646 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10647 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10648 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10649 			if (status & STATUS_FD)
   10650 				sc->sc_tctl |=
   10651 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10652 			else
   10653 				sc->sc_tctl |=
   10654 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10655 			if (sc->sc_ctrl & CTRL_TFCE)
   10656 				sc->sc_fcrtl |= FCRTL_XONE;
   10657 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10658 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10659 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10660 			sc->sc_tbi_linkup = 1;
   10661 			if_link_state_change(ifp, LINK_STATE_UP);
   10662 		} else {
   10663 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10664 				device_xname(sc->sc_dev)));
   10665 			sc->sc_tbi_linkup = 0;
   10666 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10667 		}
   10668 		/* Update LED */
   10669 		wm_tbi_serdes_set_linkled(sc);
   10670 	} else if (icr & ICR_RXSEQ)
   10671 		DPRINTF(sc, WM_DEBUG_LINK,
   10672 		    ("%s: LINK: Receive sequence error\n",
   10673 			device_xname(sc->sc_dev)));
   10674 }
   10675 
   10676 /*
   10677  * wm_linkintr_serdes:
   10678  *
   10679  *	Helper; handle link interrupts for TBI mode.
   10680  */
   10681 static void
   10682 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10683 {
   10684 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10685 	struct mii_data *mii = &sc->sc_mii;
   10686 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10687 	uint32_t pcs_adv, pcs_lpab, reg;
   10688 
   10689 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10690 		__func__));
   10691 
   10692 	if (icr & ICR_LSC) {
   10693 		/* Check PCS */
   10694 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10695 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10696 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10697 				device_xname(sc->sc_dev)));
   10698 			mii->mii_media_status |= IFM_ACTIVE;
   10699 			sc->sc_tbi_linkup = 1;
   10700 			if_link_state_change(ifp, LINK_STATE_UP);
   10701 		} else {
   10702 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10703 				device_xname(sc->sc_dev)));
   10704 			mii->mii_media_status |= IFM_NONE;
   10705 			sc->sc_tbi_linkup = 0;
   10706 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10707 			wm_tbi_serdes_set_linkled(sc);
   10708 			return;
   10709 		}
   10710 		mii->mii_media_active |= IFM_1000_SX;
   10711 		if ((reg & PCS_LSTS_FDX) != 0)
   10712 			mii->mii_media_active |= IFM_FDX;
   10713 		else
   10714 			mii->mii_media_active |= IFM_HDX;
   10715 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10716 			/* Check flow */
   10717 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10718 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10719 				DPRINTF(sc, WM_DEBUG_LINK,
   10720 				    ("XXX LINKOK but not ACOMP\n"));
   10721 				return;
   10722 			}
   10723 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10724 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10725 			DPRINTF(sc, WM_DEBUG_LINK,
   10726 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10727 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10728 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10729 				mii->mii_media_active |= IFM_FLOW
   10730 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10731 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10732 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10733 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10734 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10735 				mii->mii_media_active |= IFM_FLOW
   10736 				    | IFM_ETH_TXPAUSE;
   10737 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10738 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10739 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10740 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10741 				mii->mii_media_active |= IFM_FLOW
   10742 				    | IFM_ETH_RXPAUSE;
   10743 		}
   10744 		/* Update LED */
   10745 		wm_tbi_serdes_set_linkled(sc);
   10746 	} else
   10747 		DPRINTF(sc, WM_DEBUG_LINK,
   10748 		    ("%s: LINK: Receive sequence error\n",
   10749 		    device_xname(sc->sc_dev)));
   10750 }
   10751 
   10752 /*
   10753  * wm_linkintr:
   10754  *
   10755  *	Helper; handle link interrupts.
   10756  */
   10757 static void
   10758 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10759 {
   10760 
   10761 	KASSERT(mutex_owned(sc->sc_core_lock));
   10762 
   10763 	if (sc->sc_flags & WM_F_HAS_MII)
   10764 		wm_linkintr_gmii(sc, icr);
   10765 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10766 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10767 		wm_linkintr_serdes(sc, icr);
   10768 	else
   10769 		wm_linkintr_tbi(sc, icr);
   10770 }
   10771 
   10772 
   10773 static inline void
   10774 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10775 {
   10776 
   10777 	if (wmq->wmq_txrx_use_workqueue) {
   10778 		if (!wmq->wmq_wq_enqueued) {
   10779 			wmq->wmq_wq_enqueued = true;
   10780 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
   10781 			    curcpu());
   10782 		}
   10783 	} else
   10784 		softint_schedule(wmq->wmq_si);
   10785 }
   10786 
   10787 static inline void
   10788 wm_legacy_intr_disable(struct wm_softc *sc)
   10789 {
   10790 
   10791 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10792 }
   10793 
   10794 static inline void
   10795 wm_legacy_intr_enable(struct wm_softc *sc)
   10796 {
   10797 
   10798 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10799 }
   10800 
   10801 /*
   10802  * wm_intr_legacy:
   10803  *
   10804  *	Interrupt service routine for INTx and MSI.
   10805  */
   10806 static int
   10807 wm_intr_legacy(void *arg)
   10808 {
   10809 	struct wm_softc *sc = arg;
   10810 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10811 	struct wm_queue *wmq = &sc->sc_queue[0];
   10812 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10813 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10814 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10815 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10816 	uint32_t icr, rndval = 0;
   10817 	bool more = false;
   10818 
   10819 	icr = CSR_READ(sc, WMREG_ICR);
   10820 	if ((icr & sc->sc_icr) == 0)
   10821 		return 0;
   10822 
   10823 	DPRINTF(sc, WM_DEBUG_TX,
   10824 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10825 	if (rndval == 0)
   10826 		rndval = icr;
   10827 
   10828 	mutex_enter(txq->txq_lock);
   10829 
   10830 	if (txq->txq_stopping) {
   10831 		mutex_exit(txq->txq_lock);
   10832 		return 1;
   10833 	}
   10834 
   10835 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10836 	if (icr & ICR_TXDW) {
   10837 		DPRINTF(sc, WM_DEBUG_TX,
   10838 		    ("%s: TX: got TXDW interrupt\n",
   10839 			device_xname(sc->sc_dev)));
   10840 		WM_Q_EVCNT_INCR(txq, txdw);
   10841 	}
   10842 #endif
   10843 	if (txlimit > 0) {
   10844 		more |= wm_txeof(txq, txlimit);
   10845 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10846 			more = true;
   10847 	} else
   10848 		more = true;
   10849 	mutex_exit(txq->txq_lock);
   10850 
   10851 	mutex_enter(rxq->rxq_lock);
   10852 
   10853 	if (rxq->rxq_stopping) {
   10854 		mutex_exit(rxq->rxq_lock);
   10855 		return 1;
   10856 	}
   10857 
   10858 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10859 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10860 		DPRINTF(sc, WM_DEBUG_RX,
   10861 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10862 			device_xname(sc->sc_dev),
   10863 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10864 		WM_Q_EVCNT_INCR(rxq, intr);
   10865 	}
   10866 #endif
   10867 	if (rxlimit > 0) {
   10868 		/*
   10869 		 * wm_rxeof() does *not* call upper layer functions directly,
   10870 		 * as if_percpuq_enqueue() just call softint_schedule().
   10871 		 * So, we can call wm_rxeof() in interrupt context.
   10872 		 */
   10873 		more = wm_rxeof(rxq, rxlimit);
   10874 	} else
   10875 		more = true;
   10876 
   10877 	mutex_exit(rxq->rxq_lock);
   10878 
   10879 	mutex_enter(sc->sc_core_lock);
   10880 
   10881 	if (sc->sc_core_stopping) {
   10882 		mutex_exit(sc->sc_core_lock);
   10883 		return 1;
   10884 	}
   10885 
   10886 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10887 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10888 		wm_linkintr(sc, icr);
   10889 	}
   10890 	if ((icr & ICR_GPI(0)) != 0)
   10891 		device_printf(sc->sc_dev, "got module interrupt\n");
   10892 
   10893 	mutex_exit(sc->sc_core_lock);
   10894 
   10895 	if (icr & ICR_RXO) {
   10896 #if defined(WM_DEBUG)
   10897 		log(LOG_WARNING, "%s: Receive overrun\n",
   10898 		    device_xname(sc->sc_dev));
   10899 #endif /* defined(WM_DEBUG) */
   10900 	}
   10901 
   10902 	rnd_add_uint32(&sc->rnd_source, rndval);
   10903 
   10904 	if (more) {
   10905 		/* Try to get more packets going. */
   10906 		wm_legacy_intr_disable(sc);
   10907 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10908 		wm_sched_handle_queue(sc, wmq);
   10909 	}
   10910 
   10911 	return 1;
   10912 }
   10913 
   10914 static inline void
   10915 wm_txrxintr_disable(struct wm_queue *wmq)
   10916 {
   10917 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10918 
   10919 	if (__predict_false(!wm_is_using_msix(sc))) {
   10920 		wm_legacy_intr_disable(sc);
   10921 		return;
   10922 	}
   10923 
   10924 	if (sc->sc_type == WM_T_82574)
   10925 		CSR_WRITE(sc, WMREG_IMC,
   10926 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10927 	else if (sc->sc_type == WM_T_82575)
   10928 		CSR_WRITE(sc, WMREG_EIMC,
   10929 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10930 	else
   10931 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10932 }
   10933 
   10934 static inline void
   10935 wm_txrxintr_enable(struct wm_queue *wmq)
   10936 {
   10937 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10938 
   10939 	wm_itrs_calculate(sc, wmq);
   10940 
   10941 	if (__predict_false(!wm_is_using_msix(sc))) {
   10942 		wm_legacy_intr_enable(sc);
   10943 		return;
   10944 	}
   10945 
   10946 	/*
   10947 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10948 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10949 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10950 	 * while each wm_handle_queue(wmq) is runnig.
   10951 	 */
   10952 	if (sc->sc_type == WM_T_82574)
   10953 		CSR_WRITE(sc, WMREG_IMS,
   10954 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10955 	else if (sc->sc_type == WM_T_82575)
   10956 		CSR_WRITE(sc, WMREG_EIMS,
   10957 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10958 	else
   10959 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10960 }
   10961 
   10962 static int
   10963 wm_txrxintr_msix(void *arg)
   10964 {
   10965 	struct wm_queue *wmq = arg;
   10966 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10967 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10968 	struct wm_softc *sc = txq->txq_sc;
   10969 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10970 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10971 	bool txmore;
   10972 	bool rxmore;
   10973 
   10974 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10975 
   10976 	DPRINTF(sc, WM_DEBUG_TX,
   10977 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10978 
   10979 	wm_txrxintr_disable(wmq);
   10980 
   10981 	mutex_enter(txq->txq_lock);
   10982 
   10983 	if (txq->txq_stopping) {
   10984 		mutex_exit(txq->txq_lock);
   10985 		return 1;
   10986 	}
   10987 
   10988 	WM_Q_EVCNT_INCR(txq, txdw);
   10989 	if (txlimit > 0) {
   10990 		txmore = wm_txeof(txq, txlimit);
   10991 		/* wm_deferred start() is done in wm_handle_queue(). */
   10992 	} else
   10993 		txmore = true;
   10994 	mutex_exit(txq->txq_lock);
   10995 
   10996 	DPRINTF(sc, WM_DEBUG_RX,
   10997 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10998 	mutex_enter(rxq->rxq_lock);
   10999 
   11000 	if (rxq->rxq_stopping) {
   11001 		mutex_exit(rxq->rxq_lock);
   11002 		return 1;
   11003 	}
   11004 
   11005 	WM_Q_EVCNT_INCR(rxq, intr);
   11006 	if (rxlimit > 0) {
   11007 		rxmore = wm_rxeof(rxq, rxlimit);
   11008 	} else
   11009 		rxmore = true;
   11010 	mutex_exit(rxq->rxq_lock);
   11011 
   11012 	wm_itrs_writereg(sc, wmq);
   11013 
   11014 	if (txmore || rxmore) {
   11015 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11016 		wm_sched_handle_queue(sc, wmq);
   11017 	} else
   11018 		wm_txrxintr_enable(wmq);
   11019 
   11020 	return 1;
   11021 }
   11022 
   11023 static void
   11024 wm_handle_queue(void *arg)
   11025 {
   11026 	struct wm_queue *wmq = arg;
   11027 	struct wm_txqueue *txq = &wmq->wmq_txq;
   11028 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   11029 	struct wm_softc *sc = txq->txq_sc;
   11030 	u_int txlimit = sc->sc_tx_process_limit;
   11031 	u_int rxlimit = sc->sc_rx_process_limit;
   11032 	bool txmore;
   11033 	bool rxmore;
   11034 
   11035 	mutex_enter(txq->txq_lock);
   11036 	if (txq->txq_stopping) {
   11037 		mutex_exit(txq->txq_lock);
   11038 		return;
   11039 	}
   11040 	txmore = wm_txeof(txq, txlimit);
   11041 	wm_deferred_start_locked(txq);
   11042 	mutex_exit(txq->txq_lock);
   11043 
   11044 	mutex_enter(rxq->rxq_lock);
   11045 	if (rxq->rxq_stopping) {
   11046 		mutex_exit(rxq->rxq_lock);
   11047 		return;
   11048 	}
   11049 	WM_Q_EVCNT_INCR(rxq, defer);
   11050 	rxmore = wm_rxeof(rxq, rxlimit);
   11051 	mutex_exit(rxq->rxq_lock);
   11052 
   11053 	if (txmore || rxmore) {
   11054 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11055 		wm_sched_handle_queue(sc, wmq);
   11056 	} else
   11057 		wm_txrxintr_enable(wmq);
   11058 }
   11059 
   11060 static void
   11061 wm_handle_queue_work(struct work *wk, void *context)
   11062 {
   11063 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   11064 
   11065 	/*
   11066 	 * Some qemu environment workaround.  They don't stop interrupt
   11067 	 * immediately.
   11068 	 */
   11069 	wmq->wmq_wq_enqueued = false;
   11070 	wm_handle_queue(wmq);
   11071 }
   11072 
   11073 /*
   11074  * wm_linkintr_msix:
   11075  *
   11076  *	Interrupt service routine for link status change for MSI-X.
   11077  */
   11078 static int
   11079 wm_linkintr_msix(void *arg)
   11080 {
   11081 	struct wm_softc *sc = arg;
   11082 	uint32_t reg;
   11083 	bool has_rxo;
   11084 
   11085 	reg = CSR_READ(sc, WMREG_ICR);
   11086 	mutex_enter(sc->sc_core_lock);
   11087 	DPRINTF(sc, WM_DEBUG_LINK,
   11088 	    ("%s: LINK: got link intr. ICR = %08x\n",
   11089 		device_xname(sc->sc_dev), reg));
   11090 
   11091 	if (sc->sc_core_stopping)
   11092 		goto out;
   11093 
   11094 	if ((reg & ICR_LSC) != 0) {
   11095 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   11096 		wm_linkintr(sc, ICR_LSC);
   11097 	}
   11098 	if ((reg & ICR_GPI(0)) != 0)
   11099 		device_printf(sc->sc_dev, "got module interrupt\n");
   11100 
   11101 	/*
   11102 	 * XXX 82574 MSI-X mode workaround
   11103 	 *
   11104 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   11105 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   11106 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   11107 	 * interrupts by writing WMREG_ICS to process receive packets.
   11108 	 */
   11109 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   11110 #if defined(WM_DEBUG)
   11111 		log(LOG_WARNING, "%s: Receive overrun\n",
   11112 		    device_xname(sc->sc_dev));
   11113 #endif /* defined(WM_DEBUG) */
   11114 
   11115 		has_rxo = true;
   11116 		/*
   11117 		 * The RXO interrupt is very high rate when receive traffic is
   11118 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   11119 		 * interrupts. ICR_OTHER will be enabled at the end of
   11120 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   11121 		 * ICR_RXQ(1) interrupts.
   11122 		 */
   11123 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   11124 
   11125 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   11126 	}
   11127 
   11128 
   11129 
   11130 out:
   11131 	mutex_exit(sc->sc_core_lock);
   11132 
   11133 	if (sc->sc_type == WM_T_82574) {
   11134 		if (!has_rxo)
   11135 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   11136 		else
   11137 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   11138 	} else if (sc->sc_type == WM_T_82575)
   11139 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   11140 	else
   11141 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   11142 
   11143 	return 1;
   11144 }
   11145 
   11146 /*
   11147  * Media related.
   11148  * GMII, SGMII, TBI (and SERDES)
   11149  */
   11150 
   11151 /* Common */
   11152 
   11153 /*
   11154  * wm_tbi_serdes_set_linkled:
   11155  *
   11156  *	Update the link LED on TBI and SERDES devices.
   11157  */
   11158 static void
   11159 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   11160 {
   11161 
   11162 	if (sc->sc_tbi_linkup)
   11163 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   11164 	else
   11165 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   11166 
   11167 	/* 82540 or newer devices are active low */
   11168 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   11169 
   11170 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11171 }
   11172 
   11173 /* GMII related */
   11174 
   11175 /*
   11176  * wm_gmii_reset:
   11177  *
   11178  *	Reset the PHY.
   11179  */
   11180 static void
   11181 wm_gmii_reset(struct wm_softc *sc)
   11182 {
   11183 	uint32_t reg;
   11184 	int rv;
   11185 
   11186 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11187 		device_xname(sc->sc_dev), __func__));
   11188 
   11189 	rv = sc->phy.acquire(sc);
   11190 	if (rv != 0) {
   11191 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11192 		    __func__);
   11193 		return;
   11194 	}
   11195 
   11196 	switch (sc->sc_type) {
   11197 	case WM_T_82542_2_0:
   11198 	case WM_T_82542_2_1:
   11199 		/* null */
   11200 		break;
   11201 	case WM_T_82543:
   11202 		/*
   11203 		 * With 82543, we need to force speed and duplex on the MAC
   11204 		 * equal to what the PHY speed and duplex configuration is.
   11205 		 * In addition, we need to perform a hardware reset on the PHY
   11206 		 * to take it out of reset.
   11207 		 */
   11208 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11209 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11210 
   11211 		/* The PHY reset pin is active-low. */
   11212 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11213 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   11214 		    CTRL_EXT_SWDPIN(4));
   11215 		reg |= CTRL_EXT_SWDPIO(4);
   11216 
   11217 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11218 		CSR_WRITE_FLUSH(sc);
   11219 		delay(10*1000);
   11220 
   11221 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   11222 		CSR_WRITE_FLUSH(sc);
   11223 		delay(150);
   11224 #if 0
   11225 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   11226 #endif
   11227 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   11228 		break;
   11229 	case WM_T_82544:	/* Reset 10000us */
   11230 	case WM_T_82540:
   11231 	case WM_T_82545:
   11232 	case WM_T_82545_3:
   11233 	case WM_T_82546:
   11234 	case WM_T_82546_3:
   11235 	case WM_T_82541:
   11236 	case WM_T_82541_2:
   11237 	case WM_T_82547:
   11238 	case WM_T_82547_2:
   11239 	case WM_T_82571:	/* Reset 100us */
   11240 	case WM_T_82572:
   11241 	case WM_T_82573:
   11242 	case WM_T_82574:
   11243 	case WM_T_82575:
   11244 	case WM_T_82576:
   11245 	case WM_T_82580:
   11246 	case WM_T_I350:
   11247 	case WM_T_I354:
   11248 	case WM_T_I210:
   11249 	case WM_T_I211:
   11250 	case WM_T_82583:
   11251 	case WM_T_80003:
   11252 		/* Generic reset */
   11253 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11254 		CSR_WRITE_FLUSH(sc);
   11255 		delay(20000);
   11256 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11257 		CSR_WRITE_FLUSH(sc);
   11258 		delay(20000);
   11259 
   11260 		if ((sc->sc_type == WM_T_82541)
   11261 		    || (sc->sc_type == WM_T_82541_2)
   11262 		    || (sc->sc_type == WM_T_82547)
   11263 		    || (sc->sc_type == WM_T_82547_2)) {
   11264 			/* Workaround for igp are done in igp_reset() */
   11265 			/* XXX add code to set LED after phy reset */
   11266 		}
   11267 		break;
   11268 	case WM_T_ICH8:
   11269 	case WM_T_ICH9:
   11270 	case WM_T_ICH10:
   11271 	case WM_T_PCH:
   11272 	case WM_T_PCH2:
   11273 	case WM_T_PCH_LPT:
   11274 	case WM_T_PCH_SPT:
   11275 	case WM_T_PCH_CNP:
   11276 		/* Generic reset */
   11277 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11278 		CSR_WRITE_FLUSH(sc);
   11279 		delay(100);
   11280 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11281 		CSR_WRITE_FLUSH(sc);
   11282 		delay(150);
   11283 		break;
   11284 	default:
   11285 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   11286 		    __func__);
   11287 		break;
   11288 	}
   11289 
   11290 	sc->phy.release(sc);
   11291 
   11292 	/* get_cfg_done */
   11293 	wm_get_cfg_done(sc);
   11294 
   11295 	/* Extra setup */
   11296 	switch (sc->sc_type) {
   11297 	case WM_T_82542_2_0:
   11298 	case WM_T_82542_2_1:
   11299 	case WM_T_82543:
   11300 	case WM_T_82544:
   11301 	case WM_T_82540:
   11302 	case WM_T_82545:
   11303 	case WM_T_82545_3:
   11304 	case WM_T_82546:
   11305 	case WM_T_82546_3:
   11306 	case WM_T_82541_2:
   11307 	case WM_T_82547_2:
   11308 	case WM_T_82571:
   11309 	case WM_T_82572:
   11310 	case WM_T_82573:
   11311 	case WM_T_82574:
   11312 	case WM_T_82583:
   11313 	case WM_T_82575:
   11314 	case WM_T_82576:
   11315 	case WM_T_82580:
   11316 	case WM_T_I350:
   11317 	case WM_T_I354:
   11318 	case WM_T_I210:
   11319 	case WM_T_I211:
   11320 	case WM_T_80003:
   11321 		/* Null */
   11322 		break;
   11323 	case WM_T_82541:
   11324 	case WM_T_82547:
   11325 		/* XXX Configure actively LED after PHY reset */
   11326 		break;
   11327 	case WM_T_ICH8:
   11328 	case WM_T_ICH9:
   11329 	case WM_T_ICH10:
   11330 	case WM_T_PCH:
   11331 	case WM_T_PCH2:
   11332 	case WM_T_PCH_LPT:
   11333 	case WM_T_PCH_SPT:
   11334 	case WM_T_PCH_CNP:
   11335 		wm_phy_post_reset(sc);
   11336 		break;
   11337 	default:
   11338 		panic("%s: unknown type\n", __func__);
   11339 		break;
   11340 	}
   11341 }
   11342 
   11343 /*
   11344  * Set up sc_phytype and mii_{read|write}reg.
   11345  *
   11346  *  To identify PHY type, correct read/write function should be selected.
   11347  * To select correct read/write function, PCI ID or MAC type are required
   11348  * without accessing PHY registers.
   11349  *
   11350  *  On the first call of this function, PHY ID is not known yet. Check
   11351  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   11352  * result might be incorrect.
   11353  *
   11354  *  In the second call, PHY OUI and model is used to identify PHY type.
   11355  * It might not be perfect because of the lack of compared entry, but it
   11356  * would be better than the first call.
   11357  *
   11358  *  If the detected new result and previous assumption is different,
   11359  * a diagnostic message will be printed.
   11360  */
   11361 static void
   11362 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   11363     uint16_t phy_model)
   11364 {
   11365 	device_t dev = sc->sc_dev;
   11366 	struct mii_data *mii = &sc->sc_mii;
   11367 	uint16_t new_phytype = WMPHY_UNKNOWN;
   11368 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   11369 	mii_readreg_t new_readreg;
   11370 	mii_writereg_t new_writereg;
   11371 	bool dodiag = true;
   11372 
   11373 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11374 		device_xname(sc->sc_dev), __func__));
   11375 
   11376 	/*
   11377 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   11378 	 * incorrect. So don't print diag output when it's 2nd call.
   11379 	 */
   11380 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   11381 		dodiag = false;
   11382 
   11383 	if (mii->mii_readreg == NULL) {
   11384 		/*
   11385 		 *  This is the first call of this function. For ICH and PCH
   11386 		 * variants, it's difficult to determine the PHY access method
   11387 		 * by sc_type, so use the PCI product ID for some devices.
   11388 		 */
   11389 
   11390 		switch (sc->sc_pcidevid) {
   11391 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   11392 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   11393 			/* 82577 */
   11394 			new_phytype = WMPHY_82577;
   11395 			break;
   11396 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   11397 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   11398 			/* 82578 */
   11399 			new_phytype = WMPHY_82578;
   11400 			break;
   11401 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   11402 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   11403 			/* 82579 */
   11404 			new_phytype = WMPHY_82579;
   11405 			break;
   11406 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   11407 		case PCI_PRODUCT_INTEL_82801I_BM:
   11408 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   11409 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   11410 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   11411 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   11412 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   11413 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   11414 			/* ICH8, 9, 10 with 82567 */
   11415 			new_phytype = WMPHY_BM;
   11416 			break;
   11417 		default:
   11418 			break;
   11419 		}
   11420 	} else {
   11421 		/* It's not the first call. Use PHY OUI and model */
   11422 		switch (phy_oui) {
   11423 		case MII_OUI_ATTANSIC: /* atphy(4) */
   11424 			switch (phy_model) {
   11425 			case MII_MODEL_ATTANSIC_AR8021:
   11426 				new_phytype = WMPHY_82578;
   11427 				break;
   11428 			default:
   11429 				break;
   11430 			}
   11431 			break;
   11432 		case MII_OUI_xxMARVELL:
   11433 			switch (phy_model) {
   11434 			case MII_MODEL_xxMARVELL_I210:
   11435 				new_phytype = WMPHY_I210;
   11436 				break;
   11437 			case MII_MODEL_xxMARVELL_E1011:
   11438 			case MII_MODEL_xxMARVELL_E1000_3:
   11439 			case MII_MODEL_xxMARVELL_E1000_5:
   11440 			case MII_MODEL_xxMARVELL_E1112:
   11441 				new_phytype = WMPHY_M88;
   11442 				break;
   11443 			case MII_MODEL_xxMARVELL_E1149:
   11444 				new_phytype = WMPHY_BM;
   11445 				break;
   11446 			case MII_MODEL_xxMARVELL_E1111:
   11447 			case MII_MODEL_xxMARVELL_I347:
   11448 			case MII_MODEL_xxMARVELL_E1512:
   11449 			case MII_MODEL_xxMARVELL_E1340M:
   11450 			case MII_MODEL_xxMARVELL_E1543:
   11451 				new_phytype = WMPHY_M88;
   11452 				break;
   11453 			case MII_MODEL_xxMARVELL_I82563:
   11454 				new_phytype = WMPHY_GG82563;
   11455 				break;
   11456 			default:
   11457 				break;
   11458 			}
   11459 			break;
   11460 		case MII_OUI_INTEL:
   11461 			switch (phy_model) {
   11462 			case MII_MODEL_INTEL_I82577:
   11463 				new_phytype = WMPHY_82577;
   11464 				break;
   11465 			case MII_MODEL_INTEL_I82579:
   11466 				new_phytype = WMPHY_82579;
   11467 				break;
   11468 			case MII_MODEL_INTEL_I217:
   11469 				new_phytype = WMPHY_I217;
   11470 				break;
   11471 			case MII_MODEL_INTEL_I82580:
   11472 				new_phytype = WMPHY_82580;
   11473 				break;
   11474 			case MII_MODEL_INTEL_I350:
   11475 				new_phytype = WMPHY_I350;
   11476 				break;
   11477 			default:
   11478 				break;
   11479 			}
   11480 			break;
   11481 		case MII_OUI_yyINTEL:
   11482 			switch (phy_model) {
   11483 			case MII_MODEL_yyINTEL_I82562G:
   11484 			case MII_MODEL_yyINTEL_I82562EM:
   11485 			case MII_MODEL_yyINTEL_I82562ET:
   11486 				new_phytype = WMPHY_IFE;
   11487 				break;
   11488 			case MII_MODEL_yyINTEL_IGP01E1000:
   11489 				new_phytype = WMPHY_IGP;
   11490 				break;
   11491 			case MII_MODEL_yyINTEL_I82566:
   11492 				new_phytype = WMPHY_IGP_3;
   11493 				break;
   11494 			default:
   11495 				break;
   11496 			}
   11497 			break;
   11498 		default:
   11499 			break;
   11500 		}
   11501 
   11502 		if (dodiag) {
   11503 			if (new_phytype == WMPHY_UNKNOWN)
   11504 				aprint_verbose_dev(dev,
   11505 				    "%s: Unknown PHY model. OUI=%06x, "
   11506 				    "model=%04x\n", __func__, phy_oui,
   11507 				    phy_model);
   11508 
   11509 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11510 			    && (sc->sc_phytype != new_phytype)) {
   11511 				aprint_error_dev(dev, "Previously assumed PHY "
   11512 				    "type(%u) was incorrect. PHY type from PHY"
   11513 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11514 			}
   11515 		}
   11516 	}
   11517 
   11518 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11519 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11520 		/* SGMII */
   11521 		new_readreg = wm_sgmii_readreg;
   11522 		new_writereg = wm_sgmii_writereg;
   11523 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11524 		/* BM2 (phyaddr == 1) */
   11525 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11526 		    && (new_phytype != WMPHY_BM)
   11527 		    && (new_phytype != WMPHY_UNKNOWN))
   11528 			doubt_phytype = new_phytype;
   11529 		new_phytype = WMPHY_BM;
   11530 		new_readreg = wm_gmii_bm_readreg;
   11531 		new_writereg = wm_gmii_bm_writereg;
   11532 	} else if (sc->sc_type >= WM_T_PCH) {
   11533 		/* All PCH* use _hv_ */
   11534 		new_readreg = wm_gmii_hv_readreg;
   11535 		new_writereg = wm_gmii_hv_writereg;
   11536 	} else if (sc->sc_type >= WM_T_ICH8) {
   11537 		/* non-82567 ICH8, 9 and 10 */
   11538 		new_readreg = wm_gmii_i82544_readreg;
   11539 		new_writereg = wm_gmii_i82544_writereg;
   11540 	} else if (sc->sc_type >= WM_T_80003) {
   11541 		/* 80003 */
   11542 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11543 		    && (new_phytype != WMPHY_GG82563)
   11544 		    && (new_phytype != WMPHY_UNKNOWN))
   11545 			doubt_phytype = new_phytype;
   11546 		new_phytype = WMPHY_GG82563;
   11547 		new_readreg = wm_gmii_i80003_readreg;
   11548 		new_writereg = wm_gmii_i80003_writereg;
   11549 	} else if (sc->sc_type >= WM_T_I210) {
   11550 		/* I210 and I211 */
   11551 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11552 		    && (new_phytype != WMPHY_I210)
   11553 		    && (new_phytype != WMPHY_UNKNOWN))
   11554 			doubt_phytype = new_phytype;
   11555 		new_phytype = WMPHY_I210;
   11556 		new_readreg = wm_gmii_gs40g_readreg;
   11557 		new_writereg = wm_gmii_gs40g_writereg;
   11558 	} else if (sc->sc_type >= WM_T_82580) {
   11559 		/* 82580, I350 and I354 */
   11560 		new_readreg = wm_gmii_82580_readreg;
   11561 		new_writereg = wm_gmii_82580_writereg;
   11562 	} else if (sc->sc_type >= WM_T_82544) {
   11563 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11564 		new_readreg = wm_gmii_i82544_readreg;
   11565 		new_writereg = wm_gmii_i82544_writereg;
   11566 	} else {
   11567 		new_readreg = wm_gmii_i82543_readreg;
   11568 		new_writereg = wm_gmii_i82543_writereg;
   11569 	}
   11570 
   11571 	if (new_phytype == WMPHY_BM) {
   11572 		/* All BM use _bm_ */
   11573 		new_readreg = wm_gmii_bm_readreg;
   11574 		new_writereg = wm_gmii_bm_writereg;
   11575 	}
   11576 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11577 		/* All PCH* use _hv_ */
   11578 		new_readreg = wm_gmii_hv_readreg;
   11579 		new_writereg = wm_gmii_hv_writereg;
   11580 	}
   11581 
   11582 	/* Diag output */
   11583 	if (dodiag) {
   11584 		if (doubt_phytype != WMPHY_UNKNOWN)
   11585 			aprint_error_dev(dev, "Assumed new PHY type was "
   11586 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11587 			    new_phytype);
   11588 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11589 		    && (sc->sc_phytype != new_phytype))
   11590 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11591 			    "was incorrect. New PHY type = %u\n",
   11592 			    sc->sc_phytype, new_phytype);
   11593 
   11594 		if ((mii->mii_readreg != NULL) &&
   11595 		    (new_phytype == WMPHY_UNKNOWN))
   11596 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11597 
   11598 		if ((mii->mii_readreg != NULL) &&
   11599 		    (mii->mii_readreg != new_readreg))
   11600 			aprint_error_dev(dev, "Previously assumed PHY "
   11601 			    "read/write function was incorrect.\n");
   11602 	}
   11603 
   11604 	/* Update now */
   11605 	sc->sc_phytype = new_phytype;
   11606 	mii->mii_readreg = new_readreg;
   11607 	mii->mii_writereg = new_writereg;
   11608 	if (new_readreg == wm_gmii_hv_readreg) {
   11609 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11610 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11611 	} else if (new_readreg == wm_sgmii_readreg) {
   11612 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11613 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11614 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11615 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11616 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11617 	}
   11618 }
   11619 
   11620 /*
   11621  * wm_get_phy_id_82575:
   11622  *
   11623  * Return PHY ID. Return -1 if it failed.
   11624  */
   11625 static int
   11626 wm_get_phy_id_82575(struct wm_softc *sc)
   11627 {
   11628 	uint32_t reg;
   11629 	int phyid = -1;
   11630 
   11631 	/* XXX */
   11632 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11633 		return -1;
   11634 
   11635 	if (wm_sgmii_uses_mdio(sc)) {
   11636 		switch (sc->sc_type) {
   11637 		case WM_T_82575:
   11638 		case WM_T_82576:
   11639 			reg = CSR_READ(sc, WMREG_MDIC);
   11640 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11641 			break;
   11642 		case WM_T_82580:
   11643 		case WM_T_I350:
   11644 		case WM_T_I354:
   11645 		case WM_T_I210:
   11646 		case WM_T_I211:
   11647 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11648 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11649 			break;
   11650 		default:
   11651 			return -1;
   11652 		}
   11653 	}
   11654 
   11655 	return phyid;
   11656 }
   11657 
   11658 /*
   11659  * wm_gmii_mediainit:
   11660  *
   11661  *	Initialize media for use on 1000BASE-T devices.
   11662  */
   11663 static void
   11664 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11665 {
   11666 	device_t dev = sc->sc_dev;
   11667 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11668 	struct mii_data *mii = &sc->sc_mii;
   11669 
   11670 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11671 		device_xname(sc->sc_dev), __func__));
   11672 
   11673 	/* We have GMII. */
   11674 	sc->sc_flags |= WM_F_HAS_MII;
   11675 
   11676 	if (sc->sc_type == WM_T_80003)
   11677 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11678 	else
   11679 		sc->sc_tipg = TIPG_1000T_DFLT;
   11680 
   11681 	/*
   11682 	 * Let the chip set speed/duplex on its own based on
   11683 	 * signals from the PHY.
   11684 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11685 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11686 	 */
   11687 	sc->sc_ctrl |= CTRL_SLU;
   11688 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11689 
   11690 	/* Initialize our media structures and probe the GMII. */
   11691 	mii->mii_ifp = ifp;
   11692 
   11693 	mii->mii_statchg = wm_gmii_statchg;
   11694 
   11695 	/* get PHY control from SMBus to PCIe */
   11696 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11697 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11698 	    || (sc->sc_type == WM_T_PCH_CNP))
   11699 		wm_init_phy_workarounds_pchlan(sc);
   11700 
   11701 	wm_gmii_reset(sc);
   11702 
   11703 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11704 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11705 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11706 
   11707 	/* Setup internal SGMII PHY for SFP */
   11708 	wm_sgmii_sfp_preconfig(sc);
   11709 
   11710 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11711 	    || (sc->sc_type == WM_T_82580)
   11712 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11713 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11714 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11715 			/* Attach only one port */
   11716 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11717 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11718 		} else {
   11719 			int i, id;
   11720 			uint32_t ctrl_ext;
   11721 
   11722 			id = wm_get_phy_id_82575(sc);
   11723 			if (id != -1) {
   11724 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11725 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11726 			}
   11727 			if ((id == -1)
   11728 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11729 				/* Power on sgmii phy if it is disabled */
   11730 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11731 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11732 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11733 				CSR_WRITE_FLUSH(sc);
   11734 				delay(300*1000); /* XXX too long */
   11735 
   11736 				/*
   11737 				 * From 1 to 8.
   11738 				 *
   11739 				 * I2C access fails with I2C register's ERROR
   11740 				 * bit set, so prevent error message while
   11741 				 * scanning.
   11742 				 */
   11743 				sc->phy.no_errprint = true;
   11744 				for (i = 1; i < 8; i++)
   11745 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11746 					    0xffffffff, i, MII_OFFSET_ANY,
   11747 					    MIIF_DOPAUSE);
   11748 				sc->phy.no_errprint = false;
   11749 
   11750 				/* Restore previous sfp cage power state */
   11751 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11752 			}
   11753 		}
   11754 	} else
   11755 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11756 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11757 
   11758 	/*
   11759 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11760 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11761 	 */
   11762 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11763 		|| (sc->sc_type == WM_T_PCH_SPT)
   11764 		|| (sc->sc_type == WM_T_PCH_CNP))
   11765 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11766 		wm_set_mdio_slow_mode_hv(sc);
   11767 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11768 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11769 	}
   11770 
   11771 	/*
   11772 	 * (For ICH8 variants)
   11773 	 * If PHY detection failed, use BM's r/w function and retry.
   11774 	 */
   11775 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11776 		/* if failed, retry with *_bm_* */
   11777 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11778 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11779 		    sc->sc_phytype);
   11780 		sc->sc_phytype = WMPHY_BM;
   11781 		mii->mii_readreg = wm_gmii_bm_readreg;
   11782 		mii->mii_writereg = wm_gmii_bm_writereg;
   11783 
   11784 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11785 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11786 	}
   11787 
   11788 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11789 		/* Any PHY wasn't found */
   11790 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11791 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11792 		sc->sc_phytype = WMPHY_NONE;
   11793 	} else {
   11794 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11795 
   11796 		/*
   11797 		 * PHY found! Check PHY type again by the second call of
   11798 		 * wm_gmii_setup_phytype.
   11799 		 */
   11800 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11801 		    child->mii_mpd_model);
   11802 
   11803 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11804 	}
   11805 }
   11806 
   11807 /*
   11808  * wm_gmii_mediachange:	[ifmedia interface function]
   11809  *
   11810  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11811  */
   11812 static int
   11813 wm_gmii_mediachange(struct ifnet *ifp)
   11814 {
   11815 	struct wm_softc *sc = ifp->if_softc;
   11816 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11817 	uint32_t reg;
   11818 	int rc;
   11819 
   11820 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11821 		device_xname(sc->sc_dev), __func__));
   11822 
   11823 	KASSERT(mutex_owned(sc->sc_core_lock));
   11824 
   11825 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11826 		return 0;
   11827 
   11828 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11829 	if ((sc->sc_type == WM_T_82580)
   11830 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11831 	    || (sc->sc_type == WM_T_I211)) {
   11832 		reg = CSR_READ(sc, WMREG_PHPM);
   11833 		reg &= ~PHPM_GO_LINK_D;
   11834 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11835 	}
   11836 
   11837 	/* Disable D0 LPLU. */
   11838 	wm_lplu_d0_disable(sc);
   11839 
   11840 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11841 	sc->sc_ctrl |= CTRL_SLU;
   11842 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11843 	    || (sc->sc_type > WM_T_82543)) {
   11844 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11845 	} else {
   11846 		sc->sc_ctrl &= ~CTRL_ASDE;
   11847 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11848 		if (ife->ifm_media & IFM_FDX)
   11849 			sc->sc_ctrl |= CTRL_FD;
   11850 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11851 		case IFM_10_T:
   11852 			sc->sc_ctrl |= CTRL_SPEED_10;
   11853 			break;
   11854 		case IFM_100_TX:
   11855 			sc->sc_ctrl |= CTRL_SPEED_100;
   11856 			break;
   11857 		case IFM_1000_T:
   11858 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11859 			break;
   11860 		case IFM_NONE:
   11861 			/* There is no specific setting for IFM_NONE */
   11862 			break;
   11863 		default:
   11864 			panic("wm_gmii_mediachange: bad media 0x%x",
   11865 			    ife->ifm_media);
   11866 		}
   11867 	}
   11868 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11869 	CSR_WRITE_FLUSH(sc);
   11870 
   11871 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11872 		wm_serdes_mediachange(ifp);
   11873 
   11874 	if (sc->sc_type <= WM_T_82543)
   11875 		wm_gmii_reset(sc);
   11876 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11877 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11878 		/* allow time for SFP cage time to power up phy */
   11879 		delay(300 * 1000);
   11880 		wm_gmii_reset(sc);
   11881 	}
   11882 
   11883 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11884 		return 0;
   11885 	return rc;
   11886 }
   11887 
   11888 /*
   11889  * wm_gmii_mediastatus:	[ifmedia interface function]
   11890  *
   11891  *	Get the current interface media status on a 1000BASE-T device.
   11892  */
   11893 static void
   11894 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11895 {
   11896 	struct wm_softc *sc = ifp->if_softc;
   11897 	struct ethercom *ec = &sc->sc_ethercom;
   11898 	struct mii_data *mii;
   11899 	bool dopoll = true;
   11900 
   11901 	/*
   11902 	 * In normal drivers, ether_mediastatus() is called here.
   11903 	 * To avoid calling mii_pollstat(), ether_mediastatus() is open coded.
   11904 	 */
   11905 	KASSERT(mutex_owned(sc->sc_core_lock));
   11906 	KASSERT(ec->ec_mii != NULL);
   11907 	KASSERT(mii_locked(ec->ec_mii));
   11908 
   11909 	mii = ec->ec_mii;
   11910 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   11911 		struct timeval now;
   11912 
   11913 		getmicrotime(&now);
   11914 		if (timercmp(&now, &sc->sc_linkup_delay_time, <))
   11915 			dopoll = false;
   11916 		else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   11917 			/* Simplify by checking tv_sec only. It's enough. */
   11918 
   11919 			sc->sc_linkup_delay_time.tv_sec = 0;
   11920 			sc->sc_linkup_delay_time.tv_usec = 0;
   11921 		}
   11922 	}
   11923 
   11924 	/*
   11925 	 * Don't call mii_pollstat() while doing workaround.
   11926 	 * See also wm_linkintr_gmii() and wm_tick().
   11927 	 */
   11928 	if (dopoll)
   11929 		mii_pollstat(mii);
   11930 	ifmr->ifm_active = mii->mii_media_active;
   11931 	ifmr->ifm_status = mii->mii_media_status;
   11932 
   11933 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11934 	    | sc->sc_flowflags;
   11935 }
   11936 
   11937 #define	MDI_IO		CTRL_SWDPIN(2)
   11938 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11939 #define	MDI_CLK		CTRL_SWDPIN(3)
   11940 
   11941 static void
   11942 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11943 {
   11944 	uint32_t i, v;
   11945 
   11946 	v = CSR_READ(sc, WMREG_CTRL);
   11947 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11948 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11949 
   11950 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11951 		if (data & i)
   11952 			v |= MDI_IO;
   11953 		else
   11954 			v &= ~MDI_IO;
   11955 		CSR_WRITE(sc, WMREG_CTRL, v);
   11956 		CSR_WRITE_FLUSH(sc);
   11957 		delay(10);
   11958 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11959 		CSR_WRITE_FLUSH(sc);
   11960 		delay(10);
   11961 		CSR_WRITE(sc, WMREG_CTRL, v);
   11962 		CSR_WRITE_FLUSH(sc);
   11963 		delay(10);
   11964 	}
   11965 }
   11966 
   11967 static uint16_t
   11968 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11969 {
   11970 	uint32_t v, i;
   11971 	uint16_t data = 0;
   11972 
   11973 	v = CSR_READ(sc, WMREG_CTRL);
   11974 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11975 	v |= CTRL_SWDPIO(3);
   11976 
   11977 	CSR_WRITE(sc, WMREG_CTRL, v);
   11978 	CSR_WRITE_FLUSH(sc);
   11979 	delay(10);
   11980 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11981 	CSR_WRITE_FLUSH(sc);
   11982 	delay(10);
   11983 	CSR_WRITE(sc, WMREG_CTRL, v);
   11984 	CSR_WRITE_FLUSH(sc);
   11985 	delay(10);
   11986 
   11987 	for (i = 0; i < 16; i++) {
   11988 		data <<= 1;
   11989 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11990 		CSR_WRITE_FLUSH(sc);
   11991 		delay(10);
   11992 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11993 			data |= 1;
   11994 		CSR_WRITE(sc, WMREG_CTRL, v);
   11995 		CSR_WRITE_FLUSH(sc);
   11996 		delay(10);
   11997 	}
   11998 
   11999 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12000 	CSR_WRITE_FLUSH(sc);
   12001 	delay(10);
   12002 	CSR_WRITE(sc, WMREG_CTRL, v);
   12003 	CSR_WRITE_FLUSH(sc);
   12004 	delay(10);
   12005 
   12006 	return data;
   12007 }
   12008 
   12009 #undef MDI_IO
   12010 #undef MDI_DIR
   12011 #undef MDI_CLK
   12012 
   12013 /*
   12014  * wm_gmii_i82543_readreg:	[mii interface function]
   12015  *
   12016  *	Read a PHY register on the GMII (i82543 version).
   12017  */
   12018 static int
   12019 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12020 {
   12021 	struct wm_softc *sc = device_private(dev);
   12022 
   12023 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   12024 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   12025 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   12026 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   12027 
   12028 	DPRINTF(sc, WM_DEBUG_GMII,
   12029 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   12030 		device_xname(dev), phy, reg, *val));
   12031 
   12032 	return 0;
   12033 }
   12034 
   12035 /*
   12036  * wm_gmii_i82543_writereg:	[mii interface function]
   12037  *
   12038  *	Write a PHY register on the GMII (i82543 version).
   12039  */
   12040 static int
   12041 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   12042 {
   12043 	struct wm_softc *sc = device_private(dev);
   12044 
   12045 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   12046 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   12047 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   12048 	    (MII_COMMAND_START << 30), 32);
   12049 
   12050 	return 0;
   12051 }
   12052 
   12053 /*
   12054  * wm_gmii_mdic_readreg:	[mii interface function]
   12055  *
   12056  *	Read a PHY register on the GMII.
   12057  */
   12058 static int
   12059 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12060 {
   12061 	struct wm_softc *sc = device_private(dev);
   12062 	uint32_t mdic = 0;
   12063 	int i;
   12064 
   12065 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12066 	    && (reg > MII_ADDRMASK)) {
   12067 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12068 		    __func__, sc->sc_phytype, reg);
   12069 		reg &= MII_ADDRMASK;
   12070 	}
   12071 
   12072 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   12073 	    MDIC_REGADD(reg));
   12074 
   12075 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12076 		delay(50);
   12077 		mdic = CSR_READ(sc, WMREG_MDIC);
   12078 		if (mdic & MDIC_READY)
   12079 			break;
   12080 	}
   12081 
   12082 	if ((mdic & MDIC_READY) == 0) {
   12083 		DPRINTF(sc, WM_DEBUG_GMII,
   12084 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   12085 			device_xname(dev), phy, reg));
   12086 		return ETIMEDOUT;
   12087 	} else if (mdic & MDIC_E) {
   12088 		/* This is normal if no PHY is present. */
   12089 		DPRINTF(sc, WM_DEBUG_GMII,
   12090 		    ("%s: MDIC read error: phy %d reg %d\n",
   12091 			device_xname(sc->sc_dev), phy, reg));
   12092 		return -1;
   12093 	} else
   12094 		*val = MDIC_DATA(mdic);
   12095 
   12096 	/*
   12097 	 * Allow some time after each MDIC transaction to avoid
   12098 	 * reading duplicate data in the next MDIC transaction.
   12099 	 */
   12100 	if (sc->sc_type == WM_T_PCH2)
   12101 		delay(100);
   12102 
   12103 	return 0;
   12104 }
   12105 
   12106 /*
   12107  * wm_gmii_mdic_writereg:	[mii interface function]
   12108  *
   12109  *	Write a PHY register on the GMII.
   12110  */
   12111 static int
   12112 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   12113 {
   12114 	struct wm_softc *sc = device_private(dev);
   12115 	uint32_t mdic = 0;
   12116 	int i;
   12117 
   12118 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12119 	    && (reg > MII_ADDRMASK)) {
   12120 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12121 		    __func__, sc->sc_phytype, reg);
   12122 		reg &= MII_ADDRMASK;
   12123 	}
   12124 
   12125 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   12126 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   12127 
   12128 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12129 		delay(50);
   12130 		mdic = CSR_READ(sc, WMREG_MDIC);
   12131 		if (mdic & MDIC_READY)
   12132 			break;
   12133 	}
   12134 
   12135 	if ((mdic & MDIC_READY) == 0) {
   12136 		DPRINTF(sc, WM_DEBUG_GMII,
   12137 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   12138 			device_xname(dev), phy, reg));
   12139 		return ETIMEDOUT;
   12140 	} else if (mdic & MDIC_E) {
   12141 		DPRINTF(sc, WM_DEBUG_GMII,
   12142 		    ("%s: MDIC write error: phy %d reg %d\n",
   12143 			device_xname(dev), phy, reg));
   12144 		return -1;
   12145 	}
   12146 
   12147 	/*
   12148 	 * Allow some time after each MDIC transaction to avoid
   12149 	 * reading duplicate data in the next MDIC transaction.
   12150 	 */
   12151 	if (sc->sc_type == WM_T_PCH2)
   12152 		delay(100);
   12153 
   12154 	return 0;
   12155 }
   12156 
   12157 /*
   12158  * wm_gmii_i82544_readreg:	[mii interface function]
   12159  *
   12160  *	Read a PHY register on the GMII.
   12161  */
   12162 static int
   12163 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12164 {
   12165 	struct wm_softc *sc = device_private(dev);
   12166 	int rv;
   12167 
   12168 	rv = sc->phy.acquire(sc);
   12169 	if (rv != 0) {
   12170 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12171 		return rv;
   12172 	}
   12173 
   12174 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   12175 
   12176 	sc->phy.release(sc);
   12177 
   12178 	return rv;
   12179 }
   12180 
   12181 static int
   12182 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12183 {
   12184 	struct wm_softc *sc = device_private(dev);
   12185 	int rv;
   12186 
   12187 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12188 		switch (sc->sc_phytype) {
   12189 		case WMPHY_IGP:
   12190 		case WMPHY_IGP_2:
   12191 		case WMPHY_IGP_3:
   12192 			rv = wm_gmii_mdic_writereg(dev, phy,
   12193 			    IGPHY_PAGE_SELECT, reg);
   12194 			if (rv != 0)
   12195 				return rv;
   12196 			break;
   12197 		default:
   12198 #ifdef WM_DEBUG
   12199 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   12200 			    __func__, sc->sc_phytype, reg);
   12201 #endif
   12202 			break;
   12203 		}
   12204 	}
   12205 
   12206 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12207 }
   12208 
   12209 /*
   12210  * wm_gmii_i82544_writereg:	[mii interface function]
   12211  *
   12212  *	Write a PHY register on the GMII.
   12213  */
   12214 static int
   12215 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   12216 {
   12217 	struct wm_softc *sc = device_private(dev);
   12218 	int rv;
   12219 
   12220 	rv = sc->phy.acquire(sc);
   12221 	if (rv != 0) {
   12222 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12223 		return rv;
   12224 	}
   12225 
   12226 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   12227 	sc->phy.release(sc);
   12228 
   12229 	return rv;
   12230 }
   12231 
   12232 static int
   12233 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12234 {
   12235 	struct wm_softc *sc = device_private(dev);
   12236 	int rv;
   12237 
   12238 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12239 		switch (sc->sc_phytype) {
   12240 		case WMPHY_IGP:
   12241 		case WMPHY_IGP_2:
   12242 		case WMPHY_IGP_3:
   12243 			rv = wm_gmii_mdic_writereg(dev, phy,
   12244 			    IGPHY_PAGE_SELECT, reg);
   12245 			if (rv != 0)
   12246 				return rv;
   12247 			break;
   12248 		default:
   12249 #ifdef WM_DEBUG
   12250 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   12251 			    __func__, sc->sc_phytype, reg);
   12252 #endif
   12253 			break;
   12254 		}
   12255 	}
   12256 
   12257 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12258 }
   12259 
   12260 /*
   12261  * wm_gmii_i80003_readreg:	[mii interface function]
   12262  *
   12263  *	Read a PHY register on the kumeran
   12264  * This could be handled by the PHY layer if we didn't have to lock the
   12265  * resource ...
   12266  */
   12267 static int
   12268 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12269 {
   12270 	struct wm_softc *sc = device_private(dev);
   12271 	int page_select;
   12272 	uint16_t temp, temp2;
   12273 	int rv;
   12274 
   12275 	if (phy != 1) /* Only one PHY on kumeran bus */
   12276 		return -1;
   12277 
   12278 	rv = sc->phy.acquire(sc);
   12279 	if (rv != 0) {
   12280 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12281 		return rv;
   12282 	}
   12283 
   12284 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12285 		page_select = GG82563_PHY_PAGE_SELECT;
   12286 	else {
   12287 		/*
   12288 		 * Use Alternative Page Select register to access registers
   12289 		 * 30 and 31.
   12290 		 */
   12291 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12292 	}
   12293 	temp = reg >> GG82563_PAGE_SHIFT;
   12294 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12295 		goto out;
   12296 
   12297 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12298 		/*
   12299 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12300 		 * register.
   12301 		 */
   12302 		delay(200);
   12303 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12304 		if ((rv != 0) || (temp2 != temp)) {
   12305 			device_printf(dev, "%s failed\n", __func__);
   12306 			rv = -1;
   12307 			goto out;
   12308 		}
   12309 		delay(200);
   12310 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12311 		delay(200);
   12312 	} else
   12313 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12314 
   12315 out:
   12316 	sc->phy.release(sc);
   12317 	return rv;
   12318 }
   12319 
   12320 /*
   12321  * wm_gmii_i80003_writereg:	[mii interface function]
   12322  *
   12323  *	Write a PHY register on the kumeran.
   12324  * This could be handled by the PHY layer if we didn't have to lock the
   12325  * resource ...
   12326  */
   12327 static int
   12328 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   12329 {
   12330 	struct wm_softc *sc = device_private(dev);
   12331 	int page_select, rv;
   12332 	uint16_t temp, temp2;
   12333 
   12334 	if (phy != 1) /* Only one PHY on kumeran bus */
   12335 		return -1;
   12336 
   12337 	rv = sc->phy.acquire(sc);
   12338 	if (rv != 0) {
   12339 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12340 		return rv;
   12341 	}
   12342 
   12343 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12344 		page_select = GG82563_PHY_PAGE_SELECT;
   12345 	else {
   12346 		/*
   12347 		 * Use Alternative Page Select register to access registers
   12348 		 * 30 and 31.
   12349 		 */
   12350 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12351 	}
   12352 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   12353 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12354 		goto out;
   12355 
   12356 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12357 		/*
   12358 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12359 		 * register.
   12360 		 */
   12361 		delay(200);
   12362 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12363 		if ((rv != 0) || (temp2 != temp)) {
   12364 			device_printf(dev, "%s failed\n", __func__);
   12365 			rv = -1;
   12366 			goto out;
   12367 		}
   12368 		delay(200);
   12369 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12370 		delay(200);
   12371 	} else
   12372 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12373 
   12374 out:
   12375 	sc->phy.release(sc);
   12376 	return rv;
   12377 }
   12378 
   12379 /*
   12380  * wm_gmii_bm_readreg:	[mii interface function]
   12381  *
   12382  *	Read a PHY register on the kumeran
   12383  * This could be handled by the PHY layer if we didn't have to lock the
   12384  * resource ...
   12385  */
   12386 static int
   12387 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12388 {
   12389 	struct wm_softc *sc = device_private(dev);
   12390 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12391 	int rv;
   12392 
   12393 	rv = sc->phy.acquire(sc);
   12394 	if (rv != 0) {
   12395 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12396 		return rv;
   12397 	}
   12398 
   12399 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12400 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12401 		    || (reg == 31)) ? 1 : phy;
   12402 	/* Page 800 works differently than the rest so it has its own func */
   12403 	if (page == BM_WUC_PAGE) {
   12404 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12405 		goto release;
   12406 	}
   12407 
   12408 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12409 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12410 		    && (sc->sc_type != WM_T_82583))
   12411 			rv = wm_gmii_mdic_writereg(dev, phy,
   12412 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12413 		else
   12414 			rv = wm_gmii_mdic_writereg(dev, phy,
   12415 			    BME1000_PHY_PAGE_SELECT, page);
   12416 		if (rv != 0)
   12417 			goto release;
   12418 	}
   12419 
   12420 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12421 
   12422 release:
   12423 	sc->phy.release(sc);
   12424 	return rv;
   12425 }
   12426 
   12427 /*
   12428  * wm_gmii_bm_writereg:	[mii interface function]
   12429  *
   12430  *	Write a PHY register on the kumeran.
   12431  * This could be handled by the PHY layer if we didn't have to lock the
   12432  * resource ...
   12433  */
   12434 static int
   12435 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   12436 {
   12437 	struct wm_softc *sc = device_private(dev);
   12438 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12439 	int rv;
   12440 
   12441 	rv = sc->phy.acquire(sc);
   12442 	if (rv != 0) {
   12443 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12444 		return rv;
   12445 	}
   12446 
   12447 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12448 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12449 		    || (reg == 31)) ? 1 : phy;
   12450 	/* Page 800 works differently than the rest so it has its own func */
   12451 	if (page == BM_WUC_PAGE) {
   12452 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   12453 		goto release;
   12454 	}
   12455 
   12456 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12457 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12458 		    && (sc->sc_type != WM_T_82583))
   12459 			rv = wm_gmii_mdic_writereg(dev, phy,
   12460 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12461 		else
   12462 			rv = wm_gmii_mdic_writereg(dev, phy,
   12463 			    BME1000_PHY_PAGE_SELECT, page);
   12464 		if (rv != 0)
   12465 			goto release;
   12466 	}
   12467 
   12468 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12469 
   12470 release:
   12471 	sc->phy.release(sc);
   12472 	return rv;
   12473 }
   12474 
   12475 /*
   12476  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   12477  *  @dev: pointer to the HW structure
   12478  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   12479  *
   12480  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   12481  *  address to store contents of the BM_WUC_ENABLE_REG register.
   12482  */
   12483 static int
   12484 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12485 {
   12486 #ifdef WM_DEBUG
   12487 	struct wm_softc *sc = device_private(dev);
   12488 #endif
   12489 	uint16_t temp;
   12490 	int rv;
   12491 
   12492 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12493 		device_xname(dev), __func__));
   12494 
   12495 	if (!phy_regp)
   12496 		return -1;
   12497 
   12498 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12499 
   12500 	/* Select Port Control Registers page */
   12501 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12502 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12503 	if (rv != 0)
   12504 		return rv;
   12505 
   12506 	/* Read WUCE and save it */
   12507 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12508 	if (rv != 0)
   12509 		return rv;
   12510 
   12511 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12512 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12513 	 */
   12514 	temp = *phy_regp;
   12515 	temp |= BM_WUC_ENABLE_BIT;
   12516 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12517 
   12518 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12519 		return rv;
   12520 
   12521 	/* Select Host Wakeup Registers page - caller now able to write
   12522 	 * registers on the Wakeup registers page
   12523 	 */
   12524 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12525 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12526 }
   12527 
   12528 /*
   12529  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12530  *  @dev: pointer to the HW structure
   12531  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12532  *
   12533  *  Restore BM_WUC_ENABLE_REG to its original value.
   12534  *
   12535  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12536  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12537  *  caller.
   12538  */
   12539 static int
   12540 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12541 {
   12542 #ifdef WM_DEBUG
   12543 	struct wm_softc *sc = device_private(dev);
   12544 #endif
   12545 
   12546 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12547 		device_xname(dev), __func__));
   12548 
   12549 	if (!phy_regp)
   12550 		return -1;
   12551 
   12552 	/* Select Port Control Registers page */
   12553 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12554 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12555 
   12556 	/* Restore 769.17 to its original value */
   12557 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12558 
   12559 	return 0;
   12560 }
   12561 
   12562 /*
   12563  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12564  *  @sc: pointer to the HW structure
   12565  *  @offset: register offset to be read or written
   12566  *  @val: pointer to the data to read or write
   12567  *  @rd: determines if operation is read or write
   12568  *  @page_set: BM_WUC_PAGE already set and access enabled
   12569  *
   12570  *  Read the PHY register at offset and store the retrieved information in
   12571  *  data, or write data to PHY register at offset.  Note the procedure to
   12572  *  access the PHY wakeup registers is different than reading the other PHY
   12573  *  registers. It works as such:
   12574  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12575  *  2) Set page to 800 for host (801 if we were manageability)
   12576  *  3) Write the address using the address opcode (0x11)
   12577  *  4) Read or write the data using the data opcode (0x12)
   12578  *  5) Restore 769.17.2 to its original value
   12579  *
   12580  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12581  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12582  *
   12583  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12584  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12585  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12586  */
   12587 static int
   12588 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12589     bool page_set)
   12590 {
   12591 	struct wm_softc *sc = device_private(dev);
   12592 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12593 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12594 	uint16_t wuce;
   12595 	int rv = 0;
   12596 
   12597 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12598 		device_xname(dev), __func__));
   12599 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12600 	if ((sc->sc_type == WM_T_PCH)
   12601 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12602 		device_printf(dev,
   12603 		    "Attempting to access page %d while gig enabled.\n", page);
   12604 	}
   12605 
   12606 	if (!page_set) {
   12607 		/* Enable access to PHY wakeup registers */
   12608 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12609 		if (rv != 0) {
   12610 			device_printf(dev,
   12611 			    "%s: Could not enable PHY wakeup reg access\n",
   12612 			    __func__);
   12613 			return rv;
   12614 		}
   12615 	}
   12616 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12617 		device_xname(sc->sc_dev), __func__, page, regnum));
   12618 
   12619 	/*
   12620 	 * 2) Access PHY wakeup register.
   12621 	 * See wm_access_phy_wakeup_reg_bm.
   12622 	 */
   12623 
   12624 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12625 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12626 	if (rv != 0)
   12627 		return rv;
   12628 
   12629 	if (rd) {
   12630 		/* Read the Wakeup register page value using opcode 0x12 */
   12631 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12632 	} else {
   12633 		/* Write the Wakeup register page value using opcode 0x12 */
   12634 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12635 	}
   12636 	if (rv != 0)
   12637 		return rv;
   12638 
   12639 	if (!page_set)
   12640 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12641 
   12642 	return rv;
   12643 }
   12644 
   12645 /*
   12646  * wm_gmii_hv_readreg:	[mii interface function]
   12647  *
   12648  *	Read a PHY register on the kumeran
   12649  * This could be handled by the PHY layer if we didn't have to lock the
   12650  * resource ...
   12651  */
   12652 static int
   12653 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12654 {
   12655 	struct wm_softc *sc = device_private(dev);
   12656 	int rv;
   12657 
   12658 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12659 		device_xname(dev), __func__));
   12660 
   12661 	rv = sc->phy.acquire(sc);
   12662 	if (rv != 0) {
   12663 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12664 		return rv;
   12665 	}
   12666 
   12667 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12668 	sc->phy.release(sc);
   12669 	return rv;
   12670 }
   12671 
   12672 static int
   12673 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12674 {
   12675 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12676 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12677 	int rv;
   12678 
   12679 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12680 
   12681 	/* Page 800 works differently than the rest so it has its own func */
   12682 	if (page == BM_WUC_PAGE)
   12683 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12684 
   12685 	/*
   12686 	 * Lower than page 768 works differently than the rest so it has its
   12687 	 * own func
   12688 	 */
   12689 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12690 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12691 		return -1;
   12692 	}
   12693 
   12694 	/*
   12695 	 * XXX I21[789] documents say that the SMBus Address register is at
   12696 	 * PHY address 01, Page 0 (not 768), Register 26.
   12697 	 */
   12698 	if (page == HV_INTC_FC_PAGE_START)
   12699 		page = 0;
   12700 
   12701 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12702 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12703 		    page << BME1000_PAGE_SHIFT);
   12704 		if (rv != 0)
   12705 			return rv;
   12706 	}
   12707 
   12708 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12709 }
   12710 
   12711 /*
   12712  * wm_gmii_hv_writereg:	[mii interface function]
   12713  *
   12714  *	Write a PHY register on the kumeran.
   12715  * This could be handled by the PHY layer if we didn't have to lock the
   12716  * resource ...
   12717  */
   12718 static int
   12719 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12720 {
   12721 	struct wm_softc *sc = device_private(dev);
   12722 	int rv;
   12723 
   12724 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12725 		device_xname(dev), __func__));
   12726 
   12727 	rv = sc->phy.acquire(sc);
   12728 	if (rv != 0) {
   12729 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12730 		return rv;
   12731 	}
   12732 
   12733 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12734 	sc->phy.release(sc);
   12735 
   12736 	return rv;
   12737 }
   12738 
   12739 static int
   12740 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12741 {
   12742 	struct wm_softc *sc = device_private(dev);
   12743 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12744 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12745 	int rv;
   12746 
   12747 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12748 
   12749 	/* Page 800 works differently than the rest so it has its own func */
   12750 	if (page == BM_WUC_PAGE)
   12751 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12752 		    false);
   12753 
   12754 	/*
   12755 	 * Lower than page 768 works differently than the rest so it has its
   12756 	 * own func
   12757 	 */
   12758 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12759 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12760 		return -1;
   12761 	}
   12762 
   12763 	{
   12764 		/*
   12765 		 * XXX I21[789] documents say that the SMBus Address register
   12766 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12767 		 */
   12768 		if (page == HV_INTC_FC_PAGE_START)
   12769 			page = 0;
   12770 
   12771 		/*
   12772 		 * XXX Workaround MDIO accesses being disabled after entering
   12773 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12774 		 * register is set)
   12775 		 */
   12776 		if (sc->sc_phytype == WMPHY_82578) {
   12777 			struct mii_softc *child;
   12778 
   12779 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12780 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12781 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12782 			    && ((val & (1 << 11)) != 0)) {
   12783 				device_printf(dev, "XXX need workaround\n");
   12784 			}
   12785 		}
   12786 
   12787 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12788 			rv = wm_gmii_mdic_writereg(dev, 1,
   12789 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12790 			if (rv != 0)
   12791 				return rv;
   12792 		}
   12793 	}
   12794 
   12795 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12796 }
   12797 
   12798 /*
   12799  * wm_gmii_82580_readreg:	[mii interface function]
   12800  *
   12801  *	Read a PHY register on the 82580 and I350.
   12802  * This could be handled by the PHY layer if we didn't have to lock the
   12803  * resource ...
   12804  */
   12805 static int
   12806 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12807 {
   12808 	struct wm_softc *sc = device_private(dev);
   12809 	int rv;
   12810 
   12811 	rv = sc->phy.acquire(sc);
   12812 	if (rv != 0) {
   12813 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12814 		return rv;
   12815 	}
   12816 
   12817 #ifdef DIAGNOSTIC
   12818 	if (reg > MII_ADDRMASK) {
   12819 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12820 		    __func__, sc->sc_phytype, reg);
   12821 		reg &= MII_ADDRMASK;
   12822 	}
   12823 #endif
   12824 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12825 
   12826 	sc->phy.release(sc);
   12827 	return rv;
   12828 }
   12829 
   12830 /*
   12831  * wm_gmii_82580_writereg:	[mii interface function]
   12832  *
   12833  *	Write a PHY register on the 82580 and I350.
   12834  * This could be handled by the PHY layer if we didn't have to lock the
   12835  * resource ...
   12836  */
   12837 static int
   12838 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12839 {
   12840 	struct wm_softc *sc = device_private(dev);
   12841 	int rv;
   12842 
   12843 	rv = sc->phy.acquire(sc);
   12844 	if (rv != 0) {
   12845 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12846 		return rv;
   12847 	}
   12848 
   12849 #ifdef DIAGNOSTIC
   12850 	if (reg > MII_ADDRMASK) {
   12851 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12852 		    __func__, sc->sc_phytype, reg);
   12853 		reg &= MII_ADDRMASK;
   12854 	}
   12855 #endif
   12856 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12857 
   12858 	sc->phy.release(sc);
   12859 	return rv;
   12860 }
   12861 
   12862 /*
   12863  * wm_gmii_gs40g_readreg:	[mii interface function]
   12864  *
   12865  *	Read a PHY register on the I2100 and I211.
   12866  * This could be handled by the PHY layer if we didn't have to lock the
   12867  * resource ...
   12868  */
   12869 static int
   12870 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12871 {
   12872 	struct wm_softc *sc = device_private(dev);
   12873 	int page, offset;
   12874 	int rv;
   12875 
   12876 	/* Acquire semaphore */
   12877 	rv = sc->phy.acquire(sc);
   12878 	if (rv != 0) {
   12879 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12880 		return rv;
   12881 	}
   12882 
   12883 	/* Page select */
   12884 	page = reg >> GS40G_PAGE_SHIFT;
   12885 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12886 	if (rv != 0)
   12887 		goto release;
   12888 
   12889 	/* Read reg */
   12890 	offset = reg & GS40G_OFFSET_MASK;
   12891 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12892 
   12893 release:
   12894 	sc->phy.release(sc);
   12895 	return rv;
   12896 }
   12897 
   12898 /*
   12899  * wm_gmii_gs40g_writereg:	[mii interface function]
   12900  *
   12901  *	Write a PHY register on the I210 and I211.
   12902  * This could be handled by the PHY layer if we didn't have to lock the
   12903  * resource ...
   12904  */
   12905 static int
   12906 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12907 {
   12908 	struct wm_softc *sc = device_private(dev);
   12909 	uint16_t page;
   12910 	int offset, rv;
   12911 
   12912 	/* Acquire semaphore */
   12913 	rv = sc->phy.acquire(sc);
   12914 	if (rv != 0) {
   12915 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12916 		return rv;
   12917 	}
   12918 
   12919 	/* Page select */
   12920 	page = reg >> GS40G_PAGE_SHIFT;
   12921 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12922 	if (rv != 0)
   12923 		goto release;
   12924 
   12925 	/* Write reg */
   12926 	offset = reg & GS40G_OFFSET_MASK;
   12927 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12928 
   12929 release:
   12930 	/* Release semaphore */
   12931 	sc->phy.release(sc);
   12932 	return rv;
   12933 }
   12934 
   12935 /*
   12936  * wm_gmii_statchg:	[mii interface function]
   12937  *
   12938  *	Callback from MII layer when media changes.
   12939  */
   12940 static void
   12941 wm_gmii_statchg(struct ifnet *ifp)
   12942 {
   12943 	struct wm_softc *sc = ifp->if_softc;
   12944 	struct mii_data *mii = &sc->sc_mii;
   12945 
   12946 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12947 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12948 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12949 
   12950 	/* Get flow control negotiation result. */
   12951 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12952 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12953 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12954 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12955 	}
   12956 
   12957 	if (sc->sc_flowflags & IFM_FLOW) {
   12958 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12959 			sc->sc_ctrl |= CTRL_TFCE;
   12960 			sc->sc_fcrtl |= FCRTL_XONE;
   12961 		}
   12962 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12963 			sc->sc_ctrl |= CTRL_RFCE;
   12964 	}
   12965 
   12966 	if (mii->mii_media_active & IFM_FDX) {
   12967 		DPRINTF(sc, WM_DEBUG_LINK,
   12968 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12969 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12970 	} else {
   12971 		DPRINTF(sc, WM_DEBUG_LINK,
   12972 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12973 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12974 	}
   12975 
   12976 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12977 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12978 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12979 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12980 	if (sc->sc_type == WM_T_80003) {
   12981 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12982 		case IFM_1000_T:
   12983 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12984 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12985 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12986 			break;
   12987 		default:
   12988 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12989 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12990 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12991 			break;
   12992 		}
   12993 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12994 	}
   12995 }
   12996 
   12997 /* kumeran related (80003, ICH* and PCH*) */
   12998 
   12999 /*
   13000  * wm_kmrn_readreg:
   13001  *
   13002  *	Read a kumeran register
   13003  */
   13004 static int
   13005 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   13006 {
   13007 	int rv;
   13008 
   13009 	if (sc->sc_type == WM_T_80003)
   13010 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13011 	else
   13012 		rv = sc->phy.acquire(sc);
   13013 	if (rv != 0) {
   13014 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   13015 		    __func__);
   13016 		return rv;
   13017 	}
   13018 
   13019 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   13020 
   13021 	if (sc->sc_type == WM_T_80003)
   13022 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13023 	else
   13024 		sc->phy.release(sc);
   13025 
   13026 	return rv;
   13027 }
   13028 
   13029 static int
   13030 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   13031 {
   13032 
   13033 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   13034 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   13035 	    KUMCTRLSTA_REN);
   13036 	CSR_WRITE_FLUSH(sc);
   13037 	delay(2);
   13038 
   13039 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   13040 
   13041 	return 0;
   13042 }
   13043 
   13044 /*
   13045  * wm_kmrn_writereg:
   13046  *
   13047  *	Write a kumeran register
   13048  */
   13049 static int
   13050 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   13051 {
   13052 	int rv;
   13053 
   13054 	if (sc->sc_type == WM_T_80003)
   13055 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13056 	else
   13057 		rv = sc->phy.acquire(sc);
   13058 	if (rv != 0) {
   13059 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   13060 		    __func__);
   13061 		return rv;
   13062 	}
   13063 
   13064 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   13065 
   13066 	if (sc->sc_type == WM_T_80003)
   13067 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13068 	else
   13069 		sc->phy.release(sc);
   13070 
   13071 	return rv;
   13072 }
   13073 
   13074 static int
   13075 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   13076 {
   13077 
   13078 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   13079 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   13080 
   13081 	return 0;
   13082 }
   13083 
   13084 /*
   13085  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   13086  * This access method is different from IEEE MMD.
   13087  */
   13088 static int
   13089 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   13090 {
   13091 	struct wm_softc *sc = device_private(dev);
   13092 	int rv;
   13093 
   13094 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   13095 	if (rv != 0)
   13096 		return rv;
   13097 
   13098 	if (rd)
   13099 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   13100 	else
   13101 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   13102 	return rv;
   13103 }
   13104 
   13105 static int
   13106 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   13107 {
   13108 
   13109 	return wm_access_emi_reg_locked(dev, reg, val, true);
   13110 }
   13111 
   13112 static int
   13113 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   13114 {
   13115 
   13116 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   13117 }
   13118 
   13119 /* SGMII related */
   13120 
   13121 /*
   13122  * wm_sgmii_uses_mdio
   13123  *
   13124  * Check whether the transaction is to the internal PHY or the external
   13125  * MDIO interface. Return true if it's MDIO.
   13126  */
   13127 static bool
   13128 wm_sgmii_uses_mdio(struct wm_softc *sc)
   13129 {
   13130 	uint32_t reg;
   13131 	bool ismdio = false;
   13132 
   13133 	switch (sc->sc_type) {
   13134 	case WM_T_82575:
   13135 	case WM_T_82576:
   13136 		reg = CSR_READ(sc, WMREG_MDIC);
   13137 		ismdio = ((reg & MDIC_DEST) != 0);
   13138 		break;
   13139 	case WM_T_82580:
   13140 	case WM_T_I350:
   13141 	case WM_T_I354:
   13142 	case WM_T_I210:
   13143 	case WM_T_I211:
   13144 		reg = CSR_READ(sc, WMREG_MDICNFG);
   13145 		ismdio = ((reg & MDICNFG_DEST) != 0);
   13146 		break;
   13147 	default:
   13148 		break;
   13149 	}
   13150 
   13151 	return ismdio;
   13152 }
   13153 
   13154 /* Setup internal SGMII PHY for SFP */
   13155 static void
   13156 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   13157 {
   13158 	uint16_t id1, id2, phyreg;
   13159 	int i, rv;
   13160 
   13161 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   13162 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   13163 		return;
   13164 
   13165 	for (i = 0; i < MII_NPHY; i++) {
   13166 		sc->phy.no_errprint = true;
   13167 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   13168 		if (rv != 0)
   13169 			continue;
   13170 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   13171 		if (rv != 0)
   13172 			continue;
   13173 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   13174 			continue;
   13175 		sc->phy.no_errprint = false;
   13176 
   13177 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   13178 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   13179 		phyreg |= ESSR_SGMII_WOC_COPPER;
   13180 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   13181 		break;
   13182 	}
   13183 
   13184 }
   13185 
   13186 /*
   13187  * wm_sgmii_readreg:	[mii interface function]
   13188  *
   13189  *	Read a PHY register on the SGMII
   13190  * This could be handled by the PHY layer if we didn't have to lock the
   13191  * resource ...
   13192  */
   13193 static int
   13194 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   13195 {
   13196 	struct wm_softc *sc = device_private(dev);
   13197 	int rv;
   13198 
   13199 	rv = sc->phy.acquire(sc);
   13200 	if (rv != 0) {
   13201 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13202 		return rv;
   13203 	}
   13204 
   13205 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   13206 
   13207 	sc->phy.release(sc);
   13208 	return rv;
   13209 }
   13210 
   13211 static int
   13212 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   13213 {
   13214 	struct wm_softc *sc = device_private(dev);
   13215 	uint32_t i2ccmd;
   13216 	int i, rv = 0;
   13217 
   13218 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13219 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13220 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13221 
   13222 	/* Poll the ready bit */
   13223 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13224 		delay(50);
   13225 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13226 		if (i2ccmd & I2CCMD_READY)
   13227 			break;
   13228 	}
   13229 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13230 		device_printf(dev, "I2CCMD Read did not complete\n");
   13231 		rv = ETIMEDOUT;
   13232 	}
   13233 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13234 		if (!sc->phy.no_errprint)
   13235 			device_printf(dev, "I2CCMD Error bit set\n");
   13236 		rv = EIO;
   13237 	}
   13238 
   13239 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   13240 
   13241 	return rv;
   13242 }
   13243 
   13244 /*
   13245  * wm_sgmii_writereg:	[mii interface function]
   13246  *
   13247  *	Write a PHY register on the SGMII.
   13248  * This could be handled by the PHY layer if we didn't have to lock the
   13249  * resource ...
   13250  */
   13251 static int
   13252 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   13253 {
   13254 	struct wm_softc *sc = device_private(dev);
   13255 	int rv;
   13256 
   13257 	rv = sc->phy.acquire(sc);
   13258 	if (rv != 0) {
   13259 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13260 		return rv;
   13261 	}
   13262 
   13263 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   13264 
   13265 	sc->phy.release(sc);
   13266 
   13267 	return rv;
   13268 }
   13269 
   13270 static int
   13271 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   13272 {
   13273 	struct wm_softc *sc = device_private(dev);
   13274 	uint32_t i2ccmd;
   13275 	uint16_t swapdata;
   13276 	int rv = 0;
   13277 	int i;
   13278 
   13279 	/* Swap the data bytes for the I2C interface */
   13280 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   13281 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13282 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   13283 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13284 
   13285 	/* Poll the ready bit */
   13286 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13287 		delay(50);
   13288 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13289 		if (i2ccmd & I2CCMD_READY)
   13290 			break;
   13291 	}
   13292 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13293 		device_printf(dev, "I2CCMD Write did not complete\n");
   13294 		rv = ETIMEDOUT;
   13295 	}
   13296 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13297 		device_printf(dev, "I2CCMD Error bit set\n");
   13298 		rv = EIO;
   13299 	}
   13300 
   13301 	return rv;
   13302 }
   13303 
   13304 /* TBI related */
   13305 
   13306 static bool
   13307 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   13308 {
   13309 	bool sig;
   13310 
   13311 	sig = ctrl & CTRL_SWDPIN(1);
   13312 
   13313 	/*
   13314 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   13315 	 * detect a signal, 1 if they don't.
   13316 	 */
   13317 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   13318 		sig = !sig;
   13319 
   13320 	return sig;
   13321 }
   13322 
   13323 /*
   13324  * wm_tbi_mediainit:
   13325  *
   13326  *	Initialize media for use on 1000BASE-X devices.
   13327  */
   13328 static void
   13329 wm_tbi_mediainit(struct wm_softc *sc)
   13330 {
   13331 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13332 	const char *sep = "";
   13333 
   13334 	if (sc->sc_type < WM_T_82543)
   13335 		sc->sc_tipg = TIPG_WM_DFLT;
   13336 	else
   13337 		sc->sc_tipg = TIPG_LG_DFLT;
   13338 
   13339 	sc->sc_tbi_serdes_anegticks = 5;
   13340 
   13341 	/* Initialize our media structures */
   13342 	sc->sc_mii.mii_ifp = ifp;
   13343 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   13344 
   13345 	ifp->if_baudrate = IF_Gbps(1);
   13346 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   13347 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13348 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13349 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   13350 		    sc->sc_core_lock);
   13351 	} else {
   13352 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13353 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   13354 	}
   13355 
   13356 	/*
   13357 	 * SWD Pins:
   13358 	 *
   13359 	 *	0 = Link LED (output)
   13360 	 *	1 = Loss Of Signal (input)
   13361 	 */
   13362 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   13363 
   13364 	/* XXX Perhaps this is only for TBI */
   13365 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13366 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   13367 
   13368 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   13369 		sc->sc_ctrl &= ~CTRL_LRST;
   13370 
   13371 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13372 
   13373 #define	ADD(ss, mm, dd)							  \
   13374 do {									  \
   13375 	aprint_normal("%s%s", sep, ss);					  \
   13376 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   13377 	sep = ", ";							  \
   13378 } while (/*CONSTCOND*/0)
   13379 
   13380 	aprint_normal_dev(sc->sc_dev, "");
   13381 
   13382 	if (sc->sc_type == WM_T_I354) {
   13383 		uint32_t status;
   13384 
   13385 		status = CSR_READ(sc, WMREG_STATUS);
   13386 		if (((status & STATUS_2P5_SKU) != 0)
   13387 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13388 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   13389 		} else
   13390 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   13391 	} else if (sc->sc_type == WM_T_82545) {
   13392 		/* Only 82545 is LX (XXX except SFP) */
   13393 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13394 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13395 	} else if (sc->sc_sfptype != 0) {
   13396 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   13397 		switch (sc->sc_sfptype) {
   13398 		default:
   13399 		case SFF_SFP_ETH_FLAGS_1000SX:
   13400 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13401 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13402 			break;
   13403 		case SFF_SFP_ETH_FLAGS_1000LX:
   13404 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13405 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13406 			break;
   13407 		case SFF_SFP_ETH_FLAGS_1000CX:
   13408 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   13409 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   13410 			break;
   13411 		case SFF_SFP_ETH_FLAGS_1000T:
   13412 			ADD("1000baseT", IFM_1000_T, 0);
   13413 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   13414 			break;
   13415 		case SFF_SFP_ETH_FLAGS_100FX:
   13416 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   13417 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   13418 			break;
   13419 		}
   13420 	} else {
   13421 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13422 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13423 	}
   13424 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   13425 	aprint_normal("\n");
   13426 
   13427 #undef ADD
   13428 
   13429 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   13430 }
   13431 
   13432 /*
   13433  * wm_tbi_mediachange:	[ifmedia interface function]
   13434  *
   13435  *	Set hardware to newly-selected media on a 1000BASE-X device.
   13436  */
   13437 static int
   13438 wm_tbi_mediachange(struct ifnet *ifp)
   13439 {
   13440 	struct wm_softc *sc = ifp->if_softc;
   13441 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13442 	uint32_t status, ctrl;
   13443 	bool signal;
   13444 	int i;
   13445 
   13446 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   13447 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13448 		/* XXX need some work for >= 82571 and < 82575 */
   13449 		if (sc->sc_type < WM_T_82575)
   13450 			return 0;
   13451 	}
   13452 
   13453 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13454 	    || (sc->sc_type >= WM_T_82575))
   13455 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13456 
   13457 	sc->sc_ctrl &= ~CTRL_LRST;
   13458 	sc->sc_txcw = TXCW_ANE;
   13459 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13460 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   13461 	else if (ife->ifm_media & IFM_FDX)
   13462 		sc->sc_txcw |= TXCW_FD;
   13463 	else
   13464 		sc->sc_txcw |= TXCW_HD;
   13465 
   13466 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   13467 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   13468 
   13469 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   13470 		device_xname(sc->sc_dev), sc->sc_txcw));
   13471 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13472 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13473 	CSR_WRITE_FLUSH(sc);
   13474 	delay(1000);
   13475 
   13476 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13477 	signal = wm_tbi_havesignal(sc, ctrl);
   13478 
   13479 	DPRINTF(sc, WM_DEBUG_LINK,
   13480 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   13481 
   13482 	if (signal) {
   13483 		/* Have signal; wait for the link to come up. */
   13484 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   13485 			delay(10000);
   13486 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   13487 				break;
   13488 		}
   13489 
   13490 		DPRINTF(sc, WM_DEBUG_LINK,
   13491 		    ("%s: i = %d after waiting for link\n",
   13492 			device_xname(sc->sc_dev), i));
   13493 
   13494 		status = CSR_READ(sc, WMREG_STATUS);
   13495 		DPRINTF(sc, WM_DEBUG_LINK,
   13496 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13497 			__PRIxBIT "\n",
   13498 			device_xname(sc->sc_dev), status, STATUS_LU));
   13499 		if (status & STATUS_LU) {
   13500 			/* Link is up. */
   13501 			DPRINTF(sc, WM_DEBUG_LINK,
   13502 			    ("%s: LINK: set media -> link up %s\n",
   13503 				device_xname(sc->sc_dev),
   13504 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13505 
   13506 			/*
   13507 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13508 			 * so we should update sc->sc_ctrl
   13509 			 */
   13510 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13511 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13512 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13513 			if (status & STATUS_FD)
   13514 				sc->sc_tctl |=
   13515 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13516 			else
   13517 				sc->sc_tctl |=
   13518 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13519 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13520 				sc->sc_fcrtl |= FCRTL_XONE;
   13521 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13522 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13523 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13524 			sc->sc_tbi_linkup = 1;
   13525 		} else {
   13526 			if (i == WM_LINKUP_TIMEOUT)
   13527 				wm_check_for_link(sc);
   13528 			/* Link is down. */
   13529 			DPRINTF(sc, WM_DEBUG_LINK,
   13530 			    ("%s: LINK: set media -> link down\n",
   13531 				device_xname(sc->sc_dev)));
   13532 			sc->sc_tbi_linkup = 0;
   13533 		}
   13534 	} else {
   13535 		DPRINTF(sc, WM_DEBUG_LINK,
   13536 		    ("%s: LINK: set media -> no signal\n",
   13537 			device_xname(sc->sc_dev)));
   13538 		sc->sc_tbi_linkup = 0;
   13539 	}
   13540 
   13541 	wm_tbi_serdes_set_linkled(sc);
   13542 
   13543 	return 0;
   13544 }
   13545 
   13546 /*
   13547  * wm_tbi_mediastatus:	[ifmedia interface function]
   13548  *
   13549  *	Get the current interface media status on a 1000BASE-X device.
   13550  */
   13551 static void
   13552 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13553 {
   13554 	struct wm_softc *sc = ifp->if_softc;
   13555 	uint32_t ctrl, status;
   13556 
   13557 	ifmr->ifm_status = IFM_AVALID;
   13558 	ifmr->ifm_active = IFM_ETHER;
   13559 
   13560 	status = CSR_READ(sc, WMREG_STATUS);
   13561 	if ((status & STATUS_LU) == 0) {
   13562 		ifmr->ifm_active |= IFM_NONE;
   13563 		return;
   13564 	}
   13565 
   13566 	ifmr->ifm_status |= IFM_ACTIVE;
   13567 	/* Only 82545 is LX */
   13568 	if (sc->sc_type == WM_T_82545)
   13569 		ifmr->ifm_active |= IFM_1000_LX;
   13570 	else
   13571 		ifmr->ifm_active |= IFM_1000_SX;
   13572 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13573 		ifmr->ifm_active |= IFM_FDX;
   13574 	else
   13575 		ifmr->ifm_active |= IFM_HDX;
   13576 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13577 	if (ctrl & CTRL_RFCE)
   13578 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13579 	if (ctrl & CTRL_TFCE)
   13580 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13581 }
   13582 
   13583 /* XXX TBI only */
   13584 static int
   13585 wm_check_for_link(struct wm_softc *sc)
   13586 {
   13587 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13588 	uint32_t rxcw;
   13589 	uint32_t ctrl;
   13590 	uint32_t status;
   13591 	bool signal;
   13592 
   13593 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13594 		device_xname(sc->sc_dev), __func__));
   13595 
   13596 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13597 		/* XXX need some work for >= 82571 */
   13598 		if (sc->sc_type >= WM_T_82571) {
   13599 			sc->sc_tbi_linkup = 1;
   13600 			return 0;
   13601 		}
   13602 	}
   13603 
   13604 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13605 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13606 	status = CSR_READ(sc, WMREG_STATUS);
   13607 	signal = wm_tbi_havesignal(sc, ctrl);
   13608 
   13609 	DPRINTF(sc, WM_DEBUG_LINK,
   13610 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13611 		device_xname(sc->sc_dev), __func__, signal,
   13612 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13613 
   13614 	/*
   13615 	 * SWDPIN   LU RXCW
   13616 	 *	0    0	  0
   13617 	 *	0    0	  1	(should not happen)
   13618 	 *	0    1	  0	(should not happen)
   13619 	 *	0    1	  1	(should not happen)
   13620 	 *	1    0	  0	Disable autonego and force linkup
   13621 	 *	1    0	  1	got /C/ but not linkup yet
   13622 	 *	1    1	  0	(linkup)
   13623 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13624 	 *
   13625 	 */
   13626 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13627 		DPRINTF(sc, WM_DEBUG_LINK,
   13628 		    ("%s: %s: force linkup and fullduplex\n",
   13629 			device_xname(sc->sc_dev), __func__));
   13630 		sc->sc_tbi_linkup = 0;
   13631 		/* Disable auto-negotiation in the TXCW register */
   13632 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13633 
   13634 		/*
   13635 		 * Force link-up and also force full-duplex.
   13636 		 *
   13637 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13638 		 * so we should update sc->sc_ctrl
   13639 		 */
   13640 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13641 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13642 	} else if (((status & STATUS_LU) != 0)
   13643 	    && ((rxcw & RXCW_C) != 0)
   13644 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13645 		sc->sc_tbi_linkup = 1;
   13646 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13647 			device_xname(sc->sc_dev), __func__));
   13648 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13649 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13650 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13651 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13652 			device_xname(sc->sc_dev), __func__));
   13653 	} else {
   13654 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13655 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13656 			status));
   13657 	}
   13658 
   13659 	return 0;
   13660 }
   13661 
   13662 /*
   13663  * wm_tbi_tick:
   13664  *
   13665  *	Check the link on TBI devices.
   13666  *	This function acts as mii_tick().
   13667  */
   13668 static void
   13669 wm_tbi_tick(struct wm_softc *sc)
   13670 {
   13671 	struct mii_data *mii = &sc->sc_mii;
   13672 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13673 	uint32_t status;
   13674 
   13675 	KASSERT(mutex_owned(sc->sc_core_lock));
   13676 
   13677 	status = CSR_READ(sc, WMREG_STATUS);
   13678 
   13679 	/* XXX is this needed? */
   13680 	(void)CSR_READ(sc, WMREG_RXCW);
   13681 	(void)CSR_READ(sc, WMREG_CTRL);
   13682 
   13683 	/* set link status */
   13684 	if ((status & STATUS_LU) == 0) {
   13685 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13686 			device_xname(sc->sc_dev)));
   13687 		sc->sc_tbi_linkup = 0;
   13688 	} else if (sc->sc_tbi_linkup == 0) {
   13689 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13690 			device_xname(sc->sc_dev),
   13691 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13692 		sc->sc_tbi_linkup = 1;
   13693 		sc->sc_tbi_serdes_ticks = 0;
   13694 	}
   13695 
   13696 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13697 		goto setled;
   13698 
   13699 	if ((status & STATUS_LU) == 0) {
   13700 		sc->sc_tbi_linkup = 0;
   13701 		/* If the timer expired, retry autonegotiation */
   13702 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13703 		    && (++sc->sc_tbi_serdes_ticks
   13704 			>= sc->sc_tbi_serdes_anegticks)) {
   13705 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13706 				device_xname(sc->sc_dev), __func__));
   13707 			sc->sc_tbi_serdes_ticks = 0;
   13708 			/*
   13709 			 * Reset the link, and let autonegotiation do
   13710 			 * its thing
   13711 			 */
   13712 			sc->sc_ctrl |= CTRL_LRST;
   13713 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13714 			CSR_WRITE_FLUSH(sc);
   13715 			delay(1000);
   13716 			sc->sc_ctrl &= ~CTRL_LRST;
   13717 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13718 			CSR_WRITE_FLUSH(sc);
   13719 			delay(1000);
   13720 			CSR_WRITE(sc, WMREG_TXCW,
   13721 			    sc->sc_txcw & ~TXCW_ANE);
   13722 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13723 		}
   13724 	}
   13725 
   13726 setled:
   13727 	wm_tbi_serdes_set_linkled(sc);
   13728 }
   13729 
   13730 /* SERDES related */
   13731 static void
   13732 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13733 {
   13734 	uint32_t reg;
   13735 
   13736 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13737 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13738 		return;
   13739 
   13740 	/* Enable PCS to turn on link */
   13741 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13742 	reg |= PCS_CFG_PCS_EN;
   13743 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13744 
   13745 	/* Power up the laser */
   13746 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13747 	reg &= ~CTRL_EXT_SWDPIN(3);
   13748 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13749 
   13750 	/* Flush the write to verify completion */
   13751 	CSR_WRITE_FLUSH(sc);
   13752 	delay(1000);
   13753 }
   13754 
   13755 static int
   13756 wm_serdes_mediachange(struct ifnet *ifp)
   13757 {
   13758 	struct wm_softc *sc = ifp->if_softc;
   13759 	bool pcs_autoneg = true; /* XXX */
   13760 	uint32_t ctrl_ext, pcs_lctl, reg;
   13761 
   13762 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13763 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13764 		return 0;
   13765 
   13766 	/* XXX Currently, this function is not called on 8257[12] */
   13767 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13768 	    || (sc->sc_type >= WM_T_82575))
   13769 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13770 
   13771 	/* Power on the sfp cage if present */
   13772 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13773 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13774 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13775 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13776 
   13777 	sc->sc_ctrl |= CTRL_SLU;
   13778 
   13779 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13780 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13781 
   13782 		reg = CSR_READ(sc, WMREG_CONNSW);
   13783 		reg |= CONNSW_ENRGSRC;
   13784 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13785 	}
   13786 
   13787 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13788 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13789 	case CTRL_EXT_LINK_MODE_SGMII:
   13790 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13791 		pcs_autoneg = true;
   13792 		/* Autoneg time out should be disabled for SGMII mode */
   13793 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13794 		break;
   13795 	case CTRL_EXT_LINK_MODE_1000KX:
   13796 		pcs_autoneg = false;
   13797 		/* FALLTHROUGH */
   13798 	default:
   13799 		if ((sc->sc_type == WM_T_82575)
   13800 		    || (sc->sc_type == WM_T_82576)) {
   13801 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13802 				pcs_autoneg = false;
   13803 		}
   13804 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13805 		    | CTRL_FRCFDX;
   13806 
   13807 		/* Set speed of 1000/Full if speed/duplex is forced */
   13808 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13809 	}
   13810 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13811 
   13812 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13813 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13814 
   13815 	if (pcs_autoneg) {
   13816 		/* Set PCS register for autoneg */
   13817 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13818 
   13819 		/* Disable force flow control for autoneg */
   13820 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13821 
   13822 		/* Configure flow control advertisement for autoneg */
   13823 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13824 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13825 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13826 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13827 	} else
   13828 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13829 
   13830 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13831 
   13832 	return 0;
   13833 }
   13834 
   13835 static void
   13836 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13837 {
   13838 	struct wm_softc *sc = ifp->if_softc;
   13839 	struct mii_data *mii = &sc->sc_mii;
   13840 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13841 	uint32_t pcs_adv, pcs_lpab, reg;
   13842 
   13843 	ifmr->ifm_status = IFM_AVALID;
   13844 	ifmr->ifm_active = IFM_ETHER;
   13845 
   13846 	/* Check PCS */
   13847 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13848 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13849 		ifmr->ifm_active |= IFM_NONE;
   13850 		sc->sc_tbi_linkup = 0;
   13851 		goto setled;
   13852 	}
   13853 
   13854 	sc->sc_tbi_linkup = 1;
   13855 	ifmr->ifm_status |= IFM_ACTIVE;
   13856 	if (sc->sc_type == WM_T_I354) {
   13857 		uint32_t status;
   13858 
   13859 		status = CSR_READ(sc, WMREG_STATUS);
   13860 		if (((status & STATUS_2P5_SKU) != 0)
   13861 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13862 			ifmr->ifm_active |= IFM_2500_KX;
   13863 		} else
   13864 			ifmr->ifm_active |= IFM_1000_KX;
   13865 	} else {
   13866 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13867 		case PCS_LSTS_SPEED_10:
   13868 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13869 			break;
   13870 		case PCS_LSTS_SPEED_100:
   13871 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13872 			break;
   13873 		case PCS_LSTS_SPEED_1000:
   13874 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13875 			break;
   13876 		default:
   13877 			device_printf(sc->sc_dev, "Unknown speed\n");
   13878 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13879 			break;
   13880 		}
   13881 	}
   13882 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13883 	if ((reg & PCS_LSTS_FDX) != 0)
   13884 		ifmr->ifm_active |= IFM_FDX;
   13885 	else
   13886 		ifmr->ifm_active |= IFM_HDX;
   13887 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13888 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13889 		/* Check flow */
   13890 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13891 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13892 			DPRINTF(sc, WM_DEBUG_LINK,
   13893 			    ("XXX LINKOK but not ACOMP\n"));
   13894 			goto setled;
   13895 		}
   13896 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13897 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13898 		DPRINTF(sc, WM_DEBUG_LINK,
   13899 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13900 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13901 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13902 			mii->mii_media_active |= IFM_FLOW
   13903 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13904 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13905 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13906 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13907 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13908 			mii->mii_media_active |= IFM_FLOW
   13909 			    | IFM_ETH_TXPAUSE;
   13910 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13911 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13912 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13913 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13914 			mii->mii_media_active |= IFM_FLOW
   13915 			    | IFM_ETH_RXPAUSE;
   13916 		}
   13917 	}
   13918 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13919 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13920 setled:
   13921 	wm_tbi_serdes_set_linkled(sc);
   13922 }
   13923 
   13924 /*
   13925  * wm_serdes_tick:
   13926  *
   13927  *	Check the link on serdes devices.
   13928  */
   13929 static void
   13930 wm_serdes_tick(struct wm_softc *sc)
   13931 {
   13932 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13933 	struct mii_data *mii = &sc->sc_mii;
   13934 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13935 	uint32_t reg;
   13936 
   13937 	KASSERT(mutex_owned(sc->sc_core_lock));
   13938 
   13939 	mii->mii_media_status = IFM_AVALID;
   13940 	mii->mii_media_active = IFM_ETHER;
   13941 
   13942 	/* Check PCS */
   13943 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13944 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13945 		mii->mii_media_status |= IFM_ACTIVE;
   13946 		sc->sc_tbi_linkup = 1;
   13947 		sc->sc_tbi_serdes_ticks = 0;
   13948 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13949 		if ((reg & PCS_LSTS_FDX) != 0)
   13950 			mii->mii_media_active |= IFM_FDX;
   13951 		else
   13952 			mii->mii_media_active |= IFM_HDX;
   13953 	} else {
   13954 		mii->mii_media_status |= IFM_NONE;
   13955 		sc->sc_tbi_linkup = 0;
   13956 		/* If the timer expired, retry autonegotiation */
   13957 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13958 		    && (++sc->sc_tbi_serdes_ticks
   13959 			>= sc->sc_tbi_serdes_anegticks)) {
   13960 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13961 				device_xname(sc->sc_dev), __func__));
   13962 			sc->sc_tbi_serdes_ticks = 0;
   13963 			/* XXX */
   13964 			wm_serdes_mediachange(ifp);
   13965 		}
   13966 	}
   13967 
   13968 	wm_tbi_serdes_set_linkled(sc);
   13969 }
   13970 
   13971 /* SFP related */
   13972 
   13973 static int
   13974 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13975 {
   13976 	uint32_t i2ccmd;
   13977 	int i;
   13978 
   13979 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13980 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13981 
   13982 	/* Poll the ready bit */
   13983 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13984 		delay(50);
   13985 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13986 		if (i2ccmd & I2CCMD_READY)
   13987 			break;
   13988 	}
   13989 	if ((i2ccmd & I2CCMD_READY) == 0)
   13990 		return -1;
   13991 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13992 		return -1;
   13993 
   13994 	*data = i2ccmd & 0x00ff;
   13995 
   13996 	return 0;
   13997 }
   13998 
   13999 static uint32_t
   14000 wm_sfp_get_media_type(struct wm_softc *sc)
   14001 {
   14002 	uint32_t ctrl_ext;
   14003 	uint8_t val = 0;
   14004 	int timeout = 3;
   14005 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   14006 	int rv = -1;
   14007 
   14008 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14009 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   14010 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   14011 	CSR_WRITE_FLUSH(sc);
   14012 
   14013 	/* Read SFP module data */
   14014 	while (timeout) {
   14015 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   14016 		if (rv == 0)
   14017 			break;
   14018 		delay(100*1000); /* XXX too big */
   14019 		timeout--;
   14020 	}
   14021 	if (rv != 0)
   14022 		goto out;
   14023 
   14024 	switch (val) {
   14025 	case SFF_SFP_ID_SFF:
   14026 		aprint_normal_dev(sc->sc_dev,
   14027 		    "Module/Connector soldered to board\n");
   14028 		break;
   14029 	case SFF_SFP_ID_SFP:
   14030 		sc->sc_flags |= WM_F_SFP;
   14031 		break;
   14032 	case SFF_SFP_ID_UNKNOWN:
   14033 		goto out;
   14034 	default:
   14035 		break;
   14036 	}
   14037 
   14038 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   14039 	if (rv != 0)
   14040 		goto out;
   14041 
   14042 	sc->sc_sfptype = val;
   14043 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   14044 		mediatype = WM_MEDIATYPE_SERDES;
   14045 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   14046 		sc->sc_flags |= WM_F_SGMII;
   14047 		mediatype = WM_MEDIATYPE_COPPER;
   14048 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   14049 		sc->sc_flags |= WM_F_SGMII;
   14050 		mediatype = WM_MEDIATYPE_SERDES;
   14051 	} else {
   14052 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   14053 		    __func__, sc->sc_sfptype);
   14054 		sc->sc_sfptype = 0; /* XXX unknown */
   14055 	}
   14056 
   14057 out:
   14058 	/* Restore I2C interface setting */
   14059 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14060 
   14061 	return mediatype;
   14062 }
   14063 
   14064 /*
   14065  * NVM related.
   14066  * Microwire, SPI (w/wo EERD) and Flash.
   14067  */
   14068 
   14069 /* Both spi and uwire */
   14070 
   14071 /*
   14072  * wm_eeprom_sendbits:
   14073  *
   14074  *	Send a series of bits to the EEPROM.
   14075  */
   14076 static void
   14077 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   14078 {
   14079 	uint32_t reg;
   14080 	int x;
   14081 
   14082 	reg = CSR_READ(sc, WMREG_EECD);
   14083 
   14084 	for (x = nbits; x > 0; x--) {
   14085 		if (bits & (1U << (x - 1)))
   14086 			reg |= EECD_DI;
   14087 		else
   14088 			reg &= ~EECD_DI;
   14089 		CSR_WRITE(sc, WMREG_EECD, reg);
   14090 		CSR_WRITE_FLUSH(sc);
   14091 		delay(2);
   14092 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14093 		CSR_WRITE_FLUSH(sc);
   14094 		delay(2);
   14095 		CSR_WRITE(sc, WMREG_EECD, reg);
   14096 		CSR_WRITE_FLUSH(sc);
   14097 		delay(2);
   14098 	}
   14099 }
   14100 
   14101 /*
   14102  * wm_eeprom_recvbits:
   14103  *
   14104  *	Receive a series of bits from the EEPROM.
   14105  */
   14106 static void
   14107 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   14108 {
   14109 	uint32_t reg, val;
   14110 	int x;
   14111 
   14112 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   14113 
   14114 	val = 0;
   14115 	for (x = nbits; x > 0; x--) {
   14116 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14117 		CSR_WRITE_FLUSH(sc);
   14118 		delay(2);
   14119 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   14120 			val |= (1U << (x - 1));
   14121 		CSR_WRITE(sc, WMREG_EECD, reg);
   14122 		CSR_WRITE_FLUSH(sc);
   14123 		delay(2);
   14124 	}
   14125 	*valp = val;
   14126 }
   14127 
   14128 /* Microwire */
   14129 
   14130 /*
   14131  * wm_nvm_read_uwire:
   14132  *
   14133  *	Read a word from the EEPROM using the MicroWire protocol.
   14134  */
   14135 static int
   14136 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14137 {
   14138 	uint32_t reg, val;
   14139 	int i, rv;
   14140 
   14141 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14142 		device_xname(sc->sc_dev), __func__));
   14143 
   14144 	rv = sc->nvm.acquire(sc);
   14145 	if (rv != 0)
   14146 		return rv;
   14147 
   14148 	for (i = 0; i < wordcnt; i++) {
   14149 		/* Clear SK and DI. */
   14150 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   14151 		CSR_WRITE(sc, WMREG_EECD, reg);
   14152 
   14153 		/*
   14154 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   14155 		 * and Xen.
   14156 		 *
   14157 		 * We use this workaround only for 82540 because qemu's
   14158 		 * e1000 act as 82540.
   14159 		 */
   14160 		if (sc->sc_type == WM_T_82540) {
   14161 			reg |= EECD_SK;
   14162 			CSR_WRITE(sc, WMREG_EECD, reg);
   14163 			reg &= ~EECD_SK;
   14164 			CSR_WRITE(sc, WMREG_EECD, reg);
   14165 			CSR_WRITE_FLUSH(sc);
   14166 			delay(2);
   14167 		}
   14168 		/* XXX: end of workaround */
   14169 
   14170 		/* Set CHIP SELECT. */
   14171 		reg |= EECD_CS;
   14172 		CSR_WRITE(sc, WMREG_EECD, reg);
   14173 		CSR_WRITE_FLUSH(sc);
   14174 		delay(2);
   14175 
   14176 		/* Shift in the READ command. */
   14177 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   14178 
   14179 		/* Shift in address. */
   14180 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   14181 
   14182 		/* Shift out the data. */
   14183 		wm_eeprom_recvbits(sc, &val, 16);
   14184 		data[i] = val & 0xffff;
   14185 
   14186 		/* Clear CHIP SELECT. */
   14187 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   14188 		CSR_WRITE(sc, WMREG_EECD, reg);
   14189 		CSR_WRITE_FLUSH(sc);
   14190 		delay(2);
   14191 	}
   14192 
   14193 	sc->nvm.release(sc);
   14194 	return 0;
   14195 }
   14196 
   14197 /* SPI */
   14198 
   14199 /*
   14200  * Set SPI and FLASH related information from the EECD register.
   14201  * For 82541 and 82547, the word size is taken from EEPROM.
   14202  */
   14203 static int
   14204 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   14205 {
   14206 	int size;
   14207 	uint32_t reg;
   14208 	uint16_t data;
   14209 
   14210 	reg = CSR_READ(sc, WMREG_EECD);
   14211 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   14212 
   14213 	/* Read the size of NVM from EECD by default */
   14214 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14215 	switch (sc->sc_type) {
   14216 	case WM_T_82541:
   14217 	case WM_T_82541_2:
   14218 	case WM_T_82547:
   14219 	case WM_T_82547_2:
   14220 		/* Set dummy value to access EEPROM */
   14221 		sc->sc_nvm_wordsize = 64;
   14222 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   14223 			aprint_error_dev(sc->sc_dev,
   14224 			    "%s: failed to read EEPROM size\n", __func__);
   14225 		}
   14226 		reg = data;
   14227 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14228 		if (size == 0)
   14229 			size = 6; /* 64 word size */
   14230 		else
   14231 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   14232 		break;
   14233 	case WM_T_80003:
   14234 	case WM_T_82571:
   14235 	case WM_T_82572:
   14236 	case WM_T_82573: /* SPI case */
   14237 	case WM_T_82574: /* SPI case */
   14238 	case WM_T_82583: /* SPI case */
   14239 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14240 		if (size > 14)
   14241 			size = 14;
   14242 		break;
   14243 	case WM_T_82575:
   14244 	case WM_T_82576:
   14245 	case WM_T_82580:
   14246 	case WM_T_I350:
   14247 	case WM_T_I354:
   14248 	case WM_T_I210:
   14249 	case WM_T_I211:
   14250 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14251 		if (size > 15)
   14252 			size = 15;
   14253 		break;
   14254 	default:
   14255 		aprint_error_dev(sc->sc_dev,
   14256 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   14257 		return -1;
   14258 		break;
   14259 	}
   14260 
   14261 	sc->sc_nvm_wordsize = 1 << size;
   14262 
   14263 	return 0;
   14264 }
   14265 
   14266 /*
   14267  * wm_nvm_ready_spi:
   14268  *
   14269  *	Wait for a SPI EEPROM to be ready for commands.
   14270  */
   14271 static int
   14272 wm_nvm_ready_spi(struct wm_softc *sc)
   14273 {
   14274 	uint32_t val;
   14275 	int usec;
   14276 
   14277 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14278 		device_xname(sc->sc_dev), __func__));
   14279 
   14280 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   14281 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   14282 		wm_eeprom_recvbits(sc, &val, 8);
   14283 		if ((val & SPI_SR_RDY) == 0)
   14284 			break;
   14285 	}
   14286 	if (usec >= SPI_MAX_RETRIES) {
   14287 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   14288 		return -1;
   14289 	}
   14290 	return 0;
   14291 }
   14292 
   14293 /*
   14294  * wm_nvm_read_spi:
   14295  *
   14296  *	Read a work from the EEPROM using the SPI protocol.
   14297  */
   14298 static int
   14299 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14300 {
   14301 	uint32_t reg, val;
   14302 	int i;
   14303 	uint8_t opc;
   14304 	int rv;
   14305 
   14306 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14307 		device_xname(sc->sc_dev), __func__));
   14308 
   14309 	rv = sc->nvm.acquire(sc);
   14310 	if (rv != 0)
   14311 		return rv;
   14312 
   14313 	/* Clear SK and CS. */
   14314 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   14315 	CSR_WRITE(sc, WMREG_EECD, reg);
   14316 	CSR_WRITE_FLUSH(sc);
   14317 	delay(2);
   14318 
   14319 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   14320 		goto out;
   14321 
   14322 	/* Toggle CS to flush commands. */
   14323 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   14324 	CSR_WRITE_FLUSH(sc);
   14325 	delay(2);
   14326 	CSR_WRITE(sc, WMREG_EECD, reg);
   14327 	CSR_WRITE_FLUSH(sc);
   14328 	delay(2);
   14329 
   14330 	opc = SPI_OPC_READ;
   14331 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   14332 		opc |= SPI_OPC_A8;
   14333 
   14334 	wm_eeprom_sendbits(sc, opc, 8);
   14335 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   14336 
   14337 	for (i = 0; i < wordcnt; i++) {
   14338 		wm_eeprom_recvbits(sc, &val, 16);
   14339 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   14340 	}
   14341 
   14342 	/* Raise CS and clear SK. */
   14343 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   14344 	CSR_WRITE(sc, WMREG_EECD, reg);
   14345 	CSR_WRITE_FLUSH(sc);
   14346 	delay(2);
   14347 
   14348 out:
   14349 	sc->nvm.release(sc);
   14350 	return rv;
   14351 }
   14352 
   14353 /* Using with EERD */
   14354 
   14355 static int
   14356 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   14357 {
   14358 	uint32_t attempts = 100000;
   14359 	uint32_t i, reg = 0;
   14360 	int32_t done = -1;
   14361 
   14362 	for (i = 0; i < attempts; i++) {
   14363 		reg = CSR_READ(sc, rw);
   14364 
   14365 		if (reg & EERD_DONE) {
   14366 			done = 0;
   14367 			break;
   14368 		}
   14369 		delay(5);
   14370 	}
   14371 
   14372 	return done;
   14373 }
   14374 
   14375 static int
   14376 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   14377 {
   14378 	int i, eerd = 0;
   14379 	int rv;
   14380 
   14381 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14382 		device_xname(sc->sc_dev), __func__));
   14383 
   14384 	rv = sc->nvm.acquire(sc);
   14385 	if (rv != 0)
   14386 		return rv;
   14387 
   14388 	for (i = 0; i < wordcnt; i++) {
   14389 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   14390 		CSR_WRITE(sc, WMREG_EERD, eerd);
   14391 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   14392 		if (rv != 0) {
   14393 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   14394 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   14395 			break;
   14396 		}
   14397 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   14398 	}
   14399 
   14400 	sc->nvm.release(sc);
   14401 	return rv;
   14402 }
   14403 
   14404 /* Flash */
   14405 
   14406 static int
   14407 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   14408 {
   14409 	uint32_t eecd;
   14410 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   14411 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   14412 	uint32_t nvm_dword = 0;
   14413 	uint8_t sig_byte = 0;
   14414 	int rv;
   14415 
   14416 	switch (sc->sc_type) {
   14417 	case WM_T_PCH_SPT:
   14418 	case WM_T_PCH_CNP:
   14419 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   14420 		act_offset = ICH_NVM_SIG_WORD * 2;
   14421 
   14422 		/* Set bank to 0 in case flash read fails. */
   14423 		*bank = 0;
   14424 
   14425 		/* Check bank 0 */
   14426 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   14427 		if (rv != 0)
   14428 			return rv;
   14429 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14430 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14431 			*bank = 0;
   14432 			return 0;
   14433 		}
   14434 
   14435 		/* Check bank 1 */
   14436 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   14437 		    &nvm_dword);
   14438 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14439 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14440 			*bank = 1;
   14441 			return 0;
   14442 		}
   14443 		aprint_error_dev(sc->sc_dev,
   14444 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   14445 		return -1;
   14446 	case WM_T_ICH8:
   14447 	case WM_T_ICH9:
   14448 		eecd = CSR_READ(sc, WMREG_EECD);
   14449 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   14450 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   14451 			return 0;
   14452 		}
   14453 		/* FALLTHROUGH */
   14454 	default:
   14455 		/* Default to 0 */
   14456 		*bank = 0;
   14457 
   14458 		/* Check bank 0 */
   14459 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   14460 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14461 			*bank = 0;
   14462 			return 0;
   14463 		}
   14464 
   14465 		/* Check bank 1 */
   14466 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   14467 		    &sig_byte);
   14468 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14469 			*bank = 1;
   14470 			return 0;
   14471 		}
   14472 	}
   14473 
   14474 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   14475 		device_xname(sc->sc_dev)));
   14476 	return -1;
   14477 }
   14478 
   14479 /******************************************************************************
   14480  * This function does initial flash setup so that a new read/write/erase cycle
   14481  * can be started.
   14482  *
   14483  * sc - The pointer to the hw structure
   14484  ****************************************************************************/
   14485 static int32_t
   14486 wm_ich8_cycle_init(struct wm_softc *sc)
   14487 {
   14488 	uint16_t hsfsts;
   14489 	int32_t error = 1;
   14490 	int32_t i     = 0;
   14491 
   14492 	if (sc->sc_type >= WM_T_PCH_SPT)
   14493 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14494 	else
   14495 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14496 
   14497 	/* May be check the Flash Des Valid bit in Hw status */
   14498 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14499 		return error;
   14500 
   14501 	/* Clear FCERR in Hw status by writing 1 */
   14502 	/* Clear DAEL in Hw status by writing a 1 */
   14503 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14504 
   14505 	if (sc->sc_type >= WM_T_PCH_SPT)
   14506 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14507 	else
   14508 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14509 
   14510 	/*
   14511 	 * Either we should have a hardware SPI cycle in progress bit to check
   14512 	 * against, in order to start a new cycle or FDONE bit should be
   14513 	 * changed in the hardware so that it is 1 after hardware reset, which
   14514 	 * can then be used as an indication whether a cycle is in progress or
   14515 	 * has been completed .. we should also have some software semaphore
   14516 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14517 	 * threads access to those bits can be sequentiallized or a way so that
   14518 	 * 2 threads don't start the cycle at the same time
   14519 	 */
   14520 
   14521 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14522 		/*
   14523 		 * There is no cycle running at present, so we can start a
   14524 		 * cycle
   14525 		 */
   14526 
   14527 		/* Begin by setting Flash Cycle Done. */
   14528 		hsfsts |= HSFSTS_DONE;
   14529 		if (sc->sc_type >= WM_T_PCH_SPT)
   14530 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14531 			    hsfsts & 0xffffUL);
   14532 		else
   14533 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14534 		error = 0;
   14535 	} else {
   14536 		/*
   14537 		 * Otherwise poll for sometime so the current cycle has a
   14538 		 * chance to end before giving up.
   14539 		 */
   14540 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14541 			if (sc->sc_type >= WM_T_PCH_SPT)
   14542 				hsfsts = ICH8_FLASH_READ32(sc,
   14543 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14544 			else
   14545 				hsfsts = ICH8_FLASH_READ16(sc,
   14546 				    ICH_FLASH_HSFSTS);
   14547 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14548 				error = 0;
   14549 				break;
   14550 			}
   14551 			delay(1);
   14552 		}
   14553 		if (error == 0) {
   14554 			/*
   14555 			 * Successful in waiting for previous cycle to timeout,
   14556 			 * now set the Flash Cycle Done.
   14557 			 */
   14558 			hsfsts |= HSFSTS_DONE;
   14559 			if (sc->sc_type >= WM_T_PCH_SPT)
   14560 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14561 				    hsfsts & 0xffffUL);
   14562 			else
   14563 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14564 				    hsfsts);
   14565 		}
   14566 	}
   14567 	return error;
   14568 }
   14569 
   14570 /******************************************************************************
   14571  * This function starts a flash cycle and waits for its completion
   14572  *
   14573  * sc - The pointer to the hw structure
   14574  ****************************************************************************/
   14575 static int32_t
   14576 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14577 {
   14578 	uint16_t hsflctl;
   14579 	uint16_t hsfsts;
   14580 	int32_t error = 1;
   14581 	uint32_t i = 0;
   14582 
   14583 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14584 	if (sc->sc_type >= WM_T_PCH_SPT)
   14585 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14586 	else
   14587 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14588 	hsflctl |= HSFCTL_GO;
   14589 	if (sc->sc_type >= WM_T_PCH_SPT)
   14590 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14591 		    (uint32_t)hsflctl << 16);
   14592 	else
   14593 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14594 
   14595 	/* Wait till FDONE bit is set to 1 */
   14596 	do {
   14597 		if (sc->sc_type >= WM_T_PCH_SPT)
   14598 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14599 			    & 0xffffUL;
   14600 		else
   14601 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14602 		if (hsfsts & HSFSTS_DONE)
   14603 			break;
   14604 		delay(1);
   14605 		i++;
   14606 	} while (i < timeout);
   14607 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14608 		error = 0;
   14609 
   14610 	return error;
   14611 }
   14612 
   14613 /******************************************************************************
   14614  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14615  *
   14616  * sc - The pointer to the hw structure
   14617  * index - The index of the byte or word to read.
   14618  * size - Size of data to read, 1=byte 2=word, 4=dword
   14619  * data - Pointer to the word to store the value read.
   14620  *****************************************************************************/
   14621 static int32_t
   14622 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14623     uint32_t size, uint32_t *data)
   14624 {
   14625 	uint16_t hsfsts;
   14626 	uint16_t hsflctl;
   14627 	uint32_t flash_linear_address;
   14628 	uint32_t flash_data = 0;
   14629 	int32_t error = 1;
   14630 	int32_t count = 0;
   14631 
   14632 	if (size < 1  || size > 4 || data == 0x0 ||
   14633 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14634 		return error;
   14635 
   14636 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14637 	    sc->sc_ich8_flash_base;
   14638 
   14639 	do {
   14640 		delay(1);
   14641 		/* Steps */
   14642 		error = wm_ich8_cycle_init(sc);
   14643 		if (error)
   14644 			break;
   14645 
   14646 		if (sc->sc_type >= WM_T_PCH_SPT)
   14647 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14648 			    >> 16;
   14649 		else
   14650 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14651 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14652 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14653 		    & HSFCTL_BCOUNT_MASK;
   14654 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14655 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14656 			/*
   14657 			 * In SPT, This register is in Lan memory space, not
   14658 			 * flash. Therefore, only 32 bit access is supported.
   14659 			 */
   14660 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14661 			    (uint32_t)hsflctl << 16);
   14662 		} else
   14663 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14664 
   14665 		/*
   14666 		 * Write the last 24 bits of index into Flash Linear address
   14667 		 * field in Flash Address
   14668 		 */
   14669 		/* TODO: TBD maybe check the index against the size of flash */
   14670 
   14671 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14672 
   14673 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14674 
   14675 		/*
   14676 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14677 		 * the whole sequence a few more times, else read in (shift in)
   14678 		 * the Flash Data0, the order is least significant byte first
   14679 		 * msb to lsb
   14680 		 */
   14681 		if (error == 0) {
   14682 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14683 			if (size == 1)
   14684 				*data = (uint8_t)(flash_data & 0x000000FF);
   14685 			else if (size == 2)
   14686 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14687 			else if (size == 4)
   14688 				*data = (uint32_t)flash_data;
   14689 			break;
   14690 		} else {
   14691 			/*
   14692 			 * If we've gotten here, then things are probably
   14693 			 * completely hosed, but if the error condition is
   14694 			 * detected, it won't hurt to give it another try...
   14695 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14696 			 */
   14697 			if (sc->sc_type >= WM_T_PCH_SPT)
   14698 				hsfsts = ICH8_FLASH_READ32(sc,
   14699 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14700 			else
   14701 				hsfsts = ICH8_FLASH_READ16(sc,
   14702 				    ICH_FLASH_HSFSTS);
   14703 
   14704 			if (hsfsts & HSFSTS_ERR) {
   14705 				/* Repeat for some time before giving up. */
   14706 				continue;
   14707 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14708 				break;
   14709 		}
   14710 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14711 
   14712 	return error;
   14713 }
   14714 
   14715 /******************************************************************************
   14716  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14717  *
   14718  * sc - pointer to wm_hw structure
   14719  * index - The index of the byte to read.
   14720  * data - Pointer to a byte to store the value read.
   14721  *****************************************************************************/
   14722 static int32_t
   14723 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14724 {
   14725 	int32_t status;
   14726 	uint32_t word = 0;
   14727 
   14728 	status = wm_read_ich8_data(sc, index, 1, &word);
   14729 	if (status == 0)
   14730 		*data = (uint8_t)word;
   14731 	else
   14732 		*data = 0;
   14733 
   14734 	return status;
   14735 }
   14736 
   14737 /******************************************************************************
   14738  * Reads a word from the NVM using the ICH8 flash access registers.
   14739  *
   14740  * sc - pointer to wm_hw structure
   14741  * index - The starting byte index of the word to read.
   14742  * data - Pointer to a word to store the value read.
   14743  *****************************************************************************/
   14744 static int32_t
   14745 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14746 {
   14747 	int32_t status;
   14748 	uint32_t word = 0;
   14749 
   14750 	status = wm_read_ich8_data(sc, index, 2, &word);
   14751 	if (status == 0)
   14752 		*data = (uint16_t)word;
   14753 	else
   14754 		*data = 0;
   14755 
   14756 	return status;
   14757 }
   14758 
   14759 /******************************************************************************
   14760  * Reads a dword from the NVM using the ICH8 flash access registers.
   14761  *
   14762  * sc - pointer to wm_hw structure
   14763  * index - The starting byte index of the word to read.
   14764  * data - Pointer to a word to store the value read.
   14765  *****************************************************************************/
   14766 static int32_t
   14767 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14768 {
   14769 	int32_t status;
   14770 
   14771 	status = wm_read_ich8_data(sc, index, 4, data);
   14772 	return status;
   14773 }
   14774 
   14775 /******************************************************************************
   14776  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14777  * register.
   14778  *
   14779  * sc - Struct containing variables accessed by shared code
   14780  * offset - offset of word in the EEPROM to read
   14781  * data - word read from the EEPROM
   14782  * words - number of words to read
   14783  *****************************************************************************/
   14784 static int
   14785 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14786 {
   14787 	int rv;
   14788 	uint32_t flash_bank = 0;
   14789 	uint32_t act_offset = 0;
   14790 	uint32_t bank_offset = 0;
   14791 	uint16_t word = 0;
   14792 	uint16_t i = 0;
   14793 
   14794 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14795 		device_xname(sc->sc_dev), __func__));
   14796 
   14797 	rv = sc->nvm.acquire(sc);
   14798 	if (rv != 0)
   14799 		return rv;
   14800 
   14801 	/*
   14802 	 * We need to know which is the valid flash bank.  In the event
   14803 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14804 	 * managing flash_bank. So it cannot be trusted and needs
   14805 	 * to be updated with each read.
   14806 	 */
   14807 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14808 	if (rv) {
   14809 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14810 			device_xname(sc->sc_dev)));
   14811 		flash_bank = 0;
   14812 	}
   14813 
   14814 	/*
   14815 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14816 	 * size
   14817 	 */
   14818 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14819 
   14820 	for (i = 0; i < words; i++) {
   14821 		/* The NVM part needs a byte offset, hence * 2 */
   14822 		act_offset = bank_offset + ((offset + i) * 2);
   14823 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14824 		if (rv) {
   14825 			aprint_error_dev(sc->sc_dev,
   14826 			    "%s: failed to read NVM\n", __func__);
   14827 			break;
   14828 		}
   14829 		data[i] = word;
   14830 	}
   14831 
   14832 	sc->nvm.release(sc);
   14833 	return rv;
   14834 }
   14835 
   14836 /******************************************************************************
   14837  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14838  * register.
   14839  *
   14840  * sc - Struct containing variables accessed by shared code
   14841  * offset - offset of word in the EEPROM to read
   14842  * data - word read from the EEPROM
   14843  * words - number of words to read
   14844  *****************************************************************************/
   14845 static int
   14846 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14847 {
   14848 	int	 rv;
   14849 	uint32_t flash_bank = 0;
   14850 	uint32_t act_offset = 0;
   14851 	uint32_t bank_offset = 0;
   14852 	uint32_t dword = 0;
   14853 	uint16_t i = 0;
   14854 
   14855 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14856 		device_xname(sc->sc_dev), __func__));
   14857 
   14858 	rv = sc->nvm.acquire(sc);
   14859 	if (rv != 0)
   14860 		return rv;
   14861 
   14862 	/*
   14863 	 * We need to know which is the valid flash bank.  In the event
   14864 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14865 	 * managing flash_bank. So it cannot be trusted and needs
   14866 	 * to be updated with each read.
   14867 	 */
   14868 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14869 	if (rv) {
   14870 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14871 			device_xname(sc->sc_dev)));
   14872 		flash_bank = 0;
   14873 	}
   14874 
   14875 	/*
   14876 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14877 	 * size
   14878 	 */
   14879 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14880 
   14881 	for (i = 0; i < words; i++) {
   14882 		/* The NVM part needs a byte offset, hence * 2 */
   14883 		act_offset = bank_offset + ((offset + i) * 2);
   14884 		/* but we must read dword aligned, so mask ... */
   14885 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14886 		if (rv) {
   14887 			aprint_error_dev(sc->sc_dev,
   14888 			    "%s: failed to read NVM\n", __func__);
   14889 			break;
   14890 		}
   14891 		/* ... and pick out low or high word */
   14892 		if ((act_offset & 0x2) == 0)
   14893 			data[i] = (uint16_t)(dword & 0xFFFF);
   14894 		else
   14895 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14896 	}
   14897 
   14898 	sc->nvm.release(sc);
   14899 	return rv;
   14900 }
   14901 
   14902 /* iNVM */
   14903 
   14904 static int
   14905 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14906 {
   14907 	int32_t	 rv = 0;
   14908 	uint32_t invm_dword;
   14909 	uint16_t i;
   14910 	uint8_t record_type, word_address;
   14911 
   14912 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14913 		device_xname(sc->sc_dev), __func__));
   14914 
   14915 	for (i = 0; i < INVM_SIZE; i++) {
   14916 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14917 		/* Get record type */
   14918 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14919 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14920 			break;
   14921 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14922 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14923 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14924 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14925 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14926 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14927 			if (word_address == address) {
   14928 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14929 				rv = 0;
   14930 				break;
   14931 			}
   14932 		}
   14933 	}
   14934 
   14935 	return rv;
   14936 }
   14937 
   14938 static int
   14939 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14940 {
   14941 	int i, rv;
   14942 
   14943 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14944 		device_xname(sc->sc_dev), __func__));
   14945 
   14946 	rv = sc->nvm.acquire(sc);
   14947 	if (rv != 0)
   14948 		return rv;
   14949 
   14950 	for (i = 0; i < words; i++) {
   14951 		switch (offset + i) {
   14952 		case NVM_OFF_MACADDR:
   14953 		case NVM_OFF_MACADDR1:
   14954 		case NVM_OFF_MACADDR2:
   14955 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14956 			if (rv != 0) {
   14957 				data[i] = 0xffff;
   14958 				rv = -1;
   14959 			}
   14960 			break;
   14961 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14962 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14963 			if (rv != 0) {
   14964 				*data = INVM_DEFAULT_AL;
   14965 				rv = 0;
   14966 			}
   14967 			break;
   14968 		case NVM_OFF_CFG2:
   14969 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14970 			if (rv != 0) {
   14971 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14972 				rv = 0;
   14973 			}
   14974 			break;
   14975 		case NVM_OFF_CFG4:
   14976 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14977 			if (rv != 0) {
   14978 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14979 				rv = 0;
   14980 			}
   14981 			break;
   14982 		case NVM_OFF_LED_1_CFG:
   14983 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14984 			if (rv != 0) {
   14985 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14986 				rv = 0;
   14987 			}
   14988 			break;
   14989 		case NVM_OFF_LED_0_2_CFG:
   14990 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14991 			if (rv != 0) {
   14992 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14993 				rv = 0;
   14994 			}
   14995 			break;
   14996 		case NVM_OFF_ID_LED_SETTINGS:
   14997 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14998 			if (rv != 0) {
   14999 				*data = ID_LED_RESERVED_FFFF;
   15000 				rv = 0;
   15001 			}
   15002 			break;
   15003 		default:
   15004 			DPRINTF(sc, WM_DEBUG_NVM,
   15005 			    ("NVM word 0x%02x is not mapped.\n", offset));
   15006 			*data = NVM_RESERVED_WORD;
   15007 			break;
   15008 		}
   15009 	}
   15010 
   15011 	sc->nvm.release(sc);
   15012 	return rv;
   15013 }
   15014 
   15015 /* Lock, detecting NVM type, validate checksum, version and read */
   15016 
   15017 static int
   15018 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   15019 {
   15020 	uint32_t eecd = 0;
   15021 
   15022 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   15023 	    || sc->sc_type == WM_T_82583) {
   15024 		eecd = CSR_READ(sc, WMREG_EECD);
   15025 
   15026 		/* Isolate bits 15 & 16 */
   15027 		eecd = ((eecd >> 15) & 0x03);
   15028 
   15029 		/* If both bits are set, device is Flash type */
   15030 		if (eecd == 0x03)
   15031 			return 0;
   15032 	}
   15033 	return 1;
   15034 }
   15035 
   15036 static int
   15037 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   15038 {
   15039 	uint32_t eec;
   15040 
   15041 	eec = CSR_READ(sc, WMREG_EEC);
   15042 	if ((eec & EEC_FLASH_DETECTED) != 0)
   15043 		return 1;
   15044 
   15045 	return 0;
   15046 }
   15047 
   15048 /*
   15049  * wm_nvm_validate_checksum
   15050  *
   15051  * The checksum is defined as the sum of the first 64 (16 bit) words.
   15052  */
   15053 static int
   15054 wm_nvm_validate_checksum(struct wm_softc *sc)
   15055 {
   15056 	uint16_t checksum;
   15057 	uint16_t eeprom_data;
   15058 #ifdef WM_DEBUG
   15059 	uint16_t csum_wordaddr, valid_checksum;
   15060 #endif
   15061 	int i;
   15062 
   15063 	checksum = 0;
   15064 
   15065 	/* Don't check for I211 */
   15066 	if (sc->sc_type == WM_T_I211)
   15067 		return 0;
   15068 
   15069 #ifdef WM_DEBUG
   15070 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   15071 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   15072 		csum_wordaddr = NVM_OFF_COMPAT;
   15073 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   15074 	} else {
   15075 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   15076 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   15077 	}
   15078 
   15079 	/* Dump EEPROM image for debug */
   15080 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15081 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15082 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   15083 		/* XXX PCH_SPT? */
   15084 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   15085 		if ((eeprom_data & valid_checksum) == 0)
   15086 			DPRINTF(sc, WM_DEBUG_NVM,
   15087 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   15088 				device_xname(sc->sc_dev), eeprom_data,
   15089 				valid_checksum));
   15090 	}
   15091 
   15092 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   15093 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   15094 		for (i = 0; i < NVM_SIZE; i++) {
   15095 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   15096 				printf("XXXX ");
   15097 			else
   15098 				printf("%04hx ", eeprom_data);
   15099 			if (i % 8 == 7)
   15100 				printf("\n");
   15101 		}
   15102 	}
   15103 
   15104 #endif /* WM_DEBUG */
   15105 
   15106 	for (i = 0; i < NVM_SIZE; i++) {
   15107 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   15108 			return -1;
   15109 		checksum += eeprom_data;
   15110 	}
   15111 
   15112 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   15113 #ifdef WM_DEBUG
   15114 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   15115 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   15116 #endif
   15117 	}
   15118 
   15119 	return 0;
   15120 }
   15121 
   15122 static void
   15123 wm_nvm_version_invm(struct wm_softc *sc)
   15124 {
   15125 	uint32_t dword;
   15126 
   15127 	/*
   15128 	 * Linux's code to decode version is very strange, so we don't
   15129 	 * obey that algorithm and just use word 61 as the document.
   15130 	 * Perhaps it's not perfect though...
   15131 	 *
   15132 	 * Example:
   15133 	 *
   15134 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   15135 	 */
   15136 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   15137 	dword = __SHIFTOUT(dword, INVM_VER_1);
   15138 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   15139 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   15140 }
   15141 
   15142 static void
   15143 wm_nvm_version(struct wm_softc *sc)
   15144 {
   15145 	uint16_t major, minor, build, patch;
   15146 	uint16_t uid0, uid1;
   15147 	uint16_t nvm_data;
   15148 	uint16_t off;
   15149 	bool check_version = false;
   15150 	bool check_optionrom = false;
   15151 	bool have_build = false;
   15152 	bool have_uid = true;
   15153 
   15154 	/*
   15155 	 * Version format:
   15156 	 *
   15157 	 * XYYZ
   15158 	 * X0YZ
   15159 	 * X0YY
   15160 	 *
   15161 	 * Example:
   15162 	 *
   15163 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   15164 	 *	82571	0x50a6	5.10.6?
   15165 	 *	82572	0x506a	5.6.10?
   15166 	 *	82572EI	0x5069	5.6.9?
   15167 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   15168 	 *		0x2013	2.1.3?
   15169 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   15170 	 * ICH8+82567	0x0040	0.4.0?
   15171 	 * ICH9+82566	0x1040	1.4.0?
   15172 	 *ICH10+82567	0x0043	0.4.3?
   15173 	 *  PCH+82577	0x00c1	0.12.1?
   15174 	 * PCH2+82579	0x00d3	0.13.3?
   15175 	 *		0x00d4	0.13.4?
   15176 	 *  LPT+I218	0x0023	0.2.3?
   15177 	 *  SPT+I219	0x0084	0.8.4?
   15178 	 *  CNP+I219	0x0054	0.5.4?
   15179 	 */
   15180 
   15181 	/*
   15182 	 * XXX
   15183 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   15184 	 * I've never seen real 82574 hardware with such small SPI ROM.
   15185 	 */
   15186 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   15187 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   15188 		have_uid = false;
   15189 
   15190 	switch (sc->sc_type) {
   15191 	case WM_T_82571:
   15192 	case WM_T_82572:
   15193 	case WM_T_82574:
   15194 	case WM_T_82583:
   15195 		check_version = true;
   15196 		check_optionrom = true;
   15197 		have_build = true;
   15198 		break;
   15199 	case WM_T_ICH8:
   15200 	case WM_T_ICH9:
   15201 	case WM_T_ICH10:
   15202 	case WM_T_PCH:
   15203 	case WM_T_PCH2:
   15204 	case WM_T_PCH_LPT:
   15205 	case WM_T_PCH_SPT:
   15206 	case WM_T_PCH_CNP:
   15207 		check_version = true;
   15208 		have_build = true;
   15209 		have_uid = false;
   15210 		break;
   15211 	case WM_T_82575:
   15212 	case WM_T_82576:
   15213 	case WM_T_82580:
   15214 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   15215 			check_version = true;
   15216 		break;
   15217 	case WM_T_I211:
   15218 		wm_nvm_version_invm(sc);
   15219 		have_uid = false;
   15220 		goto printver;
   15221 	case WM_T_I210:
   15222 		if (!wm_nvm_flash_presence_i210(sc)) {
   15223 			wm_nvm_version_invm(sc);
   15224 			have_uid = false;
   15225 			goto printver;
   15226 		}
   15227 		/* FALLTHROUGH */
   15228 	case WM_T_I350:
   15229 	case WM_T_I354:
   15230 		check_version = true;
   15231 		check_optionrom = true;
   15232 		break;
   15233 	default:
   15234 		return;
   15235 	}
   15236 	if (check_version
   15237 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   15238 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   15239 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   15240 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   15241 			build = nvm_data & NVM_BUILD_MASK;
   15242 			have_build = true;
   15243 		} else
   15244 			minor = nvm_data & 0x00ff;
   15245 
   15246 		/* Decimal */
   15247 		minor = (minor / 16) * 10 + (minor % 16);
   15248 		sc->sc_nvm_ver_major = major;
   15249 		sc->sc_nvm_ver_minor = minor;
   15250 
   15251 printver:
   15252 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   15253 		    sc->sc_nvm_ver_minor);
   15254 		if (have_build) {
   15255 			sc->sc_nvm_ver_build = build;
   15256 			aprint_verbose(".%d", build);
   15257 		}
   15258 	}
   15259 
   15260 	/* Assume the Option ROM area is at avove NVM_SIZE */
   15261 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   15262 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   15263 		/* Option ROM Version */
   15264 		if ((off != 0x0000) && (off != 0xffff)) {
   15265 			int rv;
   15266 
   15267 			off += NVM_COMBO_VER_OFF;
   15268 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   15269 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   15270 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   15271 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   15272 				/* 16bits */
   15273 				major = uid0 >> 8;
   15274 				build = (uid0 << 8) | (uid1 >> 8);
   15275 				patch = uid1 & 0x00ff;
   15276 				aprint_verbose(", option ROM Version %d.%d.%d",
   15277 				    major, build, patch);
   15278 			}
   15279 		}
   15280 	}
   15281 
   15282 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   15283 		aprint_verbose(", Image Unique ID %08x",
   15284 		    ((uint32_t)uid1 << 16) | uid0);
   15285 }
   15286 
   15287 /*
   15288  * wm_nvm_read:
   15289  *
   15290  *	Read data from the serial EEPROM.
   15291  */
   15292 static int
   15293 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   15294 {
   15295 	int rv;
   15296 
   15297 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   15298 		device_xname(sc->sc_dev), __func__));
   15299 
   15300 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   15301 		return -1;
   15302 
   15303 	rv = sc->nvm.read(sc, word, wordcnt, data);
   15304 
   15305 	return rv;
   15306 }
   15307 
   15308 /*
   15309  * Hardware semaphores.
   15310  * Very complexed...
   15311  */
   15312 
   15313 static int
   15314 wm_get_null(struct wm_softc *sc)
   15315 {
   15316 
   15317 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15318 		device_xname(sc->sc_dev), __func__));
   15319 	return 0;
   15320 }
   15321 
   15322 static void
   15323 wm_put_null(struct wm_softc *sc)
   15324 {
   15325 
   15326 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15327 		device_xname(sc->sc_dev), __func__));
   15328 	return;
   15329 }
   15330 
   15331 static int
   15332 wm_get_eecd(struct wm_softc *sc)
   15333 {
   15334 	uint32_t reg;
   15335 	int x;
   15336 
   15337 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15338 		device_xname(sc->sc_dev), __func__));
   15339 
   15340 	reg = CSR_READ(sc, WMREG_EECD);
   15341 
   15342 	/* Request EEPROM access. */
   15343 	reg |= EECD_EE_REQ;
   15344 	CSR_WRITE(sc, WMREG_EECD, reg);
   15345 
   15346 	/* ..and wait for it to be granted. */
   15347 	for (x = 0; x < 1000; x++) {
   15348 		reg = CSR_READ(sc, WMREG_EECD);
   15349 		if (reg & EECD_EE_GNT)
   15350 			break;
   15351 		delay(5);
   15352 	}
   15353 	if ((reg & EECD_EE_GNT) == 0) {
   15354 		aprint_error_dev(sc->sc_dev,
   15355 		    "could not acquire EEPROM GNT\n");
   15356 		reg &= ~EECD_EE_REQ;
   15357 		CSR_WRITE(sc, WMREG_EECD, reg);
   15358 		return -1;
   15359 	}
   15360 
   15361 	return 0;
   15362 }
   15363 
   15364 static void
   15365 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   15366 {
   15367 
   15368 	*eecd |= EECD_SK;
   15369 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15370 	CSR_WRITE_FLUSH(sc);
   15371 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15372 		delay(1);
   15373 	else
   15374 		delay(50);
   15375 }
   15376 
   15377 static void
   15378 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   15379 {
   15380 
   15381 	*eecd &= ~EECD_SK;
   15382 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15383 	CSR_WRITE_FLUSH(sc);
   15384 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15385 		delay(1);
   15386 	else
   15387 		delay(50);
   15388 }
   15389 
   15390 static void
   15391 wm_put_eecd(struct wm_softc *sc)
   15392 {
   15393 	uint32_t reg;
   15394 
   15395 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15396 		device_xname(sc->sc_dev), __func__));
   15397 
   15398 	/* Stop nvm */
   15399 	reg = CSR_READ(sc, WMREG_EECD);
   15400 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   15401 		/* Pull CS high */
   15402 		reg |= EECD_CS;
   15403 		wm_nvm_eec_clock_lower(sc, &reg);
   15404 	} else {
   15405 		/* CS on Microwire is active-high */
   15406 		reg &= ~(EECD_CS | EECD_DI);
   15407 		CSR_WRITE(sc, WMREG_EECD, reg);
   15408 		wm_nvm_eec_clock_raise(sc, &reg);
   15409 		wm_nvm_eec_clock_lower(sc, &reg);
   15410 	}
   15411 
   15412 	reg = CSR_READ(sc, WMREG_EECD);
   15413 	reg &= ~EECD_EE_REQ;
   15414 	CSR_WRITE(sc, WMREG_EECD, reg);
   15415 
   15416 	return;
   15417 }
   15418 
   15419 /*
   15420  * Get hardware semaphore.
   15421  * Same as e1000_get_hw_semaphore_generic()
   15422  */
   15423 static int
   15424 wm_get_swsm_semaphore(struct wm_softc *sc)
   15425 {
   15426 	int32_t timeout;
   15427 	uint32_t swsm;
   15428 
   15429 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15430 		device_xname(sc->sc_dev), __func__));
   15431 	KASSERT(sc->sc_nvm_wordsize > 0);
   15432 
   15433 retry:
   15434 	/* Get the SW semaphore. */
   15435 	timeout = sc->sc_nvm_wordsize + 1;
   15436 	while (timeout) {
   15437 		swsm = CSR_READ(sc, WMREG_SWSM);
   15438 
   15439 		if ((swsm & SWSM_SMBI) == 0)
   15440 			break;
   15441 
   15442 		delay(50);
   15443 		timeout--;
   15444 	}
   15445 
   15446 	if (timeout == 0) {
   15447 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   15448 			/*
   15449 			 * In rare circumstances, the SW semaphore may already
   15450 			 * be held unintentionally. Clear the semaphore once
   15451 			 * before giving up.
   15452 			 */
   15453 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   15454 			wm_put_swsm_semaphore(sc);
   15455 			goto retry;
   15456 		}
   15457 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   15458 		return -1;
   15459 	}
   15460 
   15461 	/* Get the FW semaphore. */
   15462 	timeout = sc->sc_nvm_wordsize + 1;
   15463 	while (timeout) {
   15464 		swsm = CSR_READ(sc, WMREG_SWSM);
   15465 		swsm |= SWSM_SWESMBI;
   15466 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   15467 		/* If we managed to set the bit we got the semaphore. */
   15468 		swsm = CSR_READ(sc, WMREG_SWSM);
   15469 		if (swsm & SWSM_SWESMBI)
   15470 			break;
   15471 
   15472 		delay(50);
   15473 		timeout--;
   15474 	}
   15475 
   15476 	if (timeout == 0) {
   15477 		aprint_error_dev(sc->sc_dev,
   15478 		    "could not acquire SWSM SWESMBI\n");
   15479 		/* Release semaphores */
   15480 		wm_put_swsm_semaphore(sc);
   15481 		return -1;
   15482 	}
   15483 	return 0;
   15484 }
   15485 
   15486 /*
   15487  * Put hardware semaphore.
   15488  * Same as e1000_put_hw_semaphore_generic()
   15489  */
   15490 static void
   15491 wm_put_swsm_semaphore(struct wm_softc *sc)
   15492 {
   15493 	uint32_t swsm;
   15494 
   15495 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15496 		device_xname(sc->sc_dev), __func__));
   15497 
   15498 	swsm = CSR_READ(sc, WMREG_SWSM);
   15499 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15500 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15501 }
   15502 
   15503 /*
   15504  * Get SW/FW semaphore.
   15505  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15506  */
   15507 static int
   15508 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15509 {
   15510 	uint32_t swfw_sync;
   15511 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15512 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15513 	int timeout;
   15514 
   15515 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15516 		device_xname(sc->sc_dev), __func__));
   15517 
   15518 	if (sc->sc_type == WM_T_80003)
   15519 		timeout = 50;
   15520 	else
   15521 		timeout = 200;
   15522 
   15523 	while (timeout) {
   15524 		if (wm_get_swsm_semaphore(sc)) {
   15525 			aprint_error_dev(sc->sc_dev,
   15526 			    "%s: failed to get semaphore\n",
   15527 			    __func__);
   15528 			return -1;
   15529 		}
   15530 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15531 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15532 			swfw_sync |= swmask;
   15533 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15534 			wm_put_swsm_semaphore(sc);
   15535 			return 0;
   15536 		}
   15537 		wm_put_swsm_semaphore(sc);
   15538 		delay(5000);
   15539 		timeout--;
   15540 	}
   15541 	device_printf(sc->sc_dev,
   15542 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15543 	    mask, swfw_sync);
   15544 	return -1;
   15545 }
   15546 
   15547 static void
   15548 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15549 {
   15550 	uint32_t swfw_sync;
   15551 
   15552 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15553 		device_xname(sc->sc_dev), __func__));
   15554 
   15555 	while (wm_get_swsm_semaphore(sc) != 0)
   15556 		continue;
   15557 
   15558 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15559 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15560 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15561 
   15562 	wm_put_swsm_semaphore(sc);
   15563 }
   15564 
   15565 static int
   15566 wm_get_nvm_80003(struct wm_softc *sc)
   15567 {
   15568 	int rv;
   15569 
   15570 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15571 		device_xname(sc->sc_dev), __func__));
   15572 
   15573 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15574 		aprint_error_dev(sc->sc_dev,
   15575 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15576 		return rv;
   15577 	}
   15578 
   15579 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15580 	    && (rv = wm_get_eecd(sc)) != 0) {
   15581 		aprint_error_dev(sc->sc_dev,
   15582 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15583 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15584 		return rv;
   15585 	}
   15586 
   15587 	return 0;
   15588 }
   15589 
   15590 static void
   15591 wm_put_nvm_80003(struct wm_softc *sc)
   15592 {
   15593 
   15594 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15595 		device_xname(sc->sc_dev), __func__));
   15596 
   15597 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15598 		wm_put_eecd(sc);
   15599 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15600 }
   15601 
   15602 static int
   15603 wm_get_nvm_82571(struct wm_softc *sc)
   15604 {
   15605 	int rv;
   15606 
   15607 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15608 		device_xname(sc->sc_dev), __func__));
   15609 
   15610 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15611 		return rv;
   15612 
   15613 	switch (sc->sc_type) {
   15614 	case WM_T_82573:
   15615 		break;
   15616 	default:
   15617 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15618 			rv = wm_get_eecd(sc);
   15619 		break;
   15620 	}
   15621 
   15622 	if (rv != 0) {
   15623 		aprint_error_dev(sc->sc_dev,
   15624 		    "%s: failed to get semaphore\n",
   15625 		    __func__);
   15626 		wm_put_swsm_semaphore(sc);
   15627 	}
   15628 
   15629 	return rv;
   15630 }
   15631 
   15632 static void
   15633 wm_put_nvm_82571(struct wm_softc *sc)
   15634 {
   15635 
   15636 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15637 		device_xname(sc->sc_dev), __func__));
   15638 
   15639 	switch (sc->sc_type) {
   15640 	case WM_T_82573:
   15641 		break;
   15642 	default:
   15643 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15644 			wm_put_eecd(sc);
   15645 		break;
   15646 	}
   15647 
   15648 	wm_put_swsm_semaphore(sc);
   15649 }
   15650 
   15651 static int
   15652 wm_get_phy_82575(struct wm_softc *sc)
   15653 {
   15654 
   15655 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15656 		device_xname(sc->sc_dev), __func__));
   15657 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15658 }
   15659 
   15660 static void
   15661 wm_put_phy_82575(struct wm_softc *sc)
   15662 {
   15663 
   15664 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15665 		device_xname(sc->sc_dev), __func__));
   15666 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15667 }
   15668 
   15669 static int
   15670 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15671 {
   15672 	uint32_t ext_ctrl;
   15673 	int timeout = 200;
   15674 
   15675 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15676 		device_xname(sc->sc_dev), __func__));
   15677 
   15678 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15679 	for (timeout = 0; timeout < 200; timeout++) {
   15680 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15681 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15682 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15683 
   15684 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15685 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15686 			return 0;
   15687 		delay(5000);
   15688 	}
   15689 	device_printf(sc->sc_dev,
   15690 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15691 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15692 	return -1;
   15693 }
   15694 
   15695 static void
   15696 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15697 {
   15698 	uint32_t ext_ctrl;
   15699 
   15700 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15701 		device_xname(sc->sc_dev), __func__));
   15702 
   15703 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15704 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15705 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15706 
   15707 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15708 }
   15709 
   15710 static int
   15711 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15712 {
   15713 	uint32_t ext_ctrl;
   15714 	int timeout;
   15715 
   15716 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15717 		device_xname(sc->sc_dev), __func__));
   15718 	mutex_enter(sc->sc_ich_phymtx);
   15719 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15720 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15721 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15722 			break;
   15723 		delay(1000);
   15724 	}
   15725 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15726 		device_printf(sc->sc_dev,
   15727 		    "SW has already locked the resource\n");
   15728 		goto out;
   15729 	}
   15730 
   15731 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15732 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15733 	for (timeout = 0; timeout < 1000; timeout++) {
   15734 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15735 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15736 			break;
   15737 		delay(1000);
   15738 	}
   15739 	if (timeout >= 1000) {
   15740 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15741 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15742 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15743 		goto out;
   15744 	}
   15745 	return 0;
   15746 
   15747 out:
   15748 	mutex_exit(sc->sc_ich_phymtx);
   15749 	return -1;
   15750 }
   15751 
   15752 static void
   15753 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15754 {
   15755 	uint32_t ext_ctrl;
   15756 
   15757 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15758 		device_xname(sc->sc_dev), __func__));
   15759 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15760 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15761 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15762 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15763 	} else
   15764 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15765 
   15766 	mutex_exit(sc->sc_ich_phymtx);
   15767 }
   15768 
   15769 static int
   15770 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15771 {
   15772 
   15773 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15774 		device_xname(sc->sc_dev), __func__));
   15775 	mutex_enter(sc->sc_ich_nvmmtx);
   15776 
   15777 	return 0;
   15778 }
   15779 
   15780 static void
   15781 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15782 {
   15783 
   15784 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15785 		device_xname(sc->sc_dev), __func__));
   15786 	mutex_exit(sc->sc_ich_nvmmtx);
   15787 }
   15788 
   15789 static int
   15790 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15791 {
   15792 	int i = 0;
   15793 	uint32_t reg;
   15794 
   15795 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15796 		device_xname(sc->sc_dev), __func__));
   15797 
   15798 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15799 	do {
   15800 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15801 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15802 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15803 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15804 			break;
   15805 		delay(2*1000);
   15806 		i++;
   15807 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15808 
   15809 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15810 		wm_put_hw_semaphore_82573(sc);
   15811 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15812 		    device_xname(sc->sc_dev));
   15813 		return -1;
   15814 	}
   15815 
   15816 	return 0;
   15817 }
   15818 
   15819 static void
   15820 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15821 {
   15822 	uint32_t reg;
   15823 
   15824 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15825 		device_xname(sc->sc_dev), __func__));
   15826 
   15827 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15828 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15829 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15830 }
   15831 
   15832 /*
   15833  * Management mode and power management related subroutines.
   15834  * BMC, AMT, suspend/resume and EEE.
   15835  */
   15836 
   15837 #ifdef WM_WOL
   15838 static int
   15839 wm_check_mng_mode(struct wm_softc *sc)
   15840 {
   15841 	int rv;
   15842 
   15843 	switch (sc->sc_type) {
   15844 	case WM_T_ICH8:
   15845 	case WM_T_ICH9:
   15846 	case WM_T_ICH10:
   15847 	case WM_T_PCH:
   15848 	case WM_T_PCH2:
   15849 	case WM_T_PCH_LPT:
   15850 	case WM_T_PCH_SPT:
   15851 	case WM_T_PCH_CNP:
   15852 		rv = wm_check_mng_mode_ich8lan(sc);
   15853 		break;
   15854 	case WM_T_82574:
   15855 	case WM_T_82583:
   15856 		rv = wm_check_mng_mode_82574(sc);
   15857 		break;
   15858 	case WM_T_82571:
   15859 	case WM_T_82572:
   15860 	case WM_T_82573:
   15861 	case WM_T_80003:
   15862 		rv = wm_check_mng_mode_generic(sc);
   15863 		break;
   15864 	default:
   15865 		/* Noting to do */
   15866 		rv = 0;
   15867 		break;
   15868 	}
   15869 
   15870 	return rv;
   15871 }
   15872 
   15873 static int
   15874 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15875 {
   15876 	uint32_t fwsm;
   15877 
   15878 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15879 
   15880 	if (((fwsm & FWSM_FW_VALID) != 0)
   15881 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15882 		return 1;
   15883 
   15884 	return 0;
   15885 }
   15886 
   15887 static int
   15888 wm_check_mng_mode_82574(struct wm_softc *sc)
   15889 {
   15890 	uint16_t data;
   15891 
   15892 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15893 
   15894 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15895 		return 1;
   15896 
   15897 	return 0;
   15898 }
   15899 
   15900 static int
   15901 wm_check_mng_mode_generic(struct wm_softc *sc)
   15902 {
   15903 	uint32_t fwsm;
   15904 
   15905 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15906 
   15907 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15908 		return 1;
   15909 
   15910 	return 0;
   15911 }
   15912 #endif /* WM_WOL */
   15913 
   15914 static int
   15915 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15916 {
   15917 	uint32_t manc, fwsm, factps;
   15918 
   15919 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15920 		return 0;
   15921 
   15922 	manc = CSR_READ(sc, WMREG_MANC);
   15923 
   15924 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15925 		device_xname(sc->sc_dev), manc));
   15926 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15927 		return 0;
   15928 
   15929 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15930 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15931 		factps = CSR_READ(sc, WMREG_FACTPS);
   15932 		if (((factps & FACTPS_MNGCG) == 0)
   15933 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15934 			return 1;
   15935 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15936 		uint16_t data;
   15937 
   15938 		factps = CSR_READ(sc, WMREG_FACTPS);
   15939 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15940 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15941 			device_xname(sc->sc_dev), factps, data));
   15942 		if (((factps & FACTPS_MNGCG) == 0)
   15943 		    && ((data & NVM_CFG2_MNGM_MASK)
   15944 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15945 			return 1;
   15946 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15947 	    && ((manc & MANC_ASF_EN) == 0))
   15948 		return 1;
   15949 
   15950 	return 0;
   15951 }
   15952 
   15953 static bool
   15954 wm_phy_resetisblocked(struct wm_softc *sc)
   15955 {
   15956 	bool blocked = false;
   15957 	uint32_t reg;
   15958 	int i = 0;
   15959 
   15960 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15961 		device_xname(sc->sc_dev), __func__));
   15962 
   15963 	switch (sc->sc_type) {
   15964 	case WM_T_ICH8:
   15965 	case WM_T_ICH9:
   15966 	case WM_T_ICH10:
   15967 	case WM_T_PCH:
   15968 	case WM_T_PCH2:
   15969 	case WM_T_PCH_LPT:
   15970 	case WM_T_PCH_SPT:
   15971 	case WM_T_PCH_CNP:
   15972 		do {
   15973 			reg = CSR_READ(sc, WMREG_FWSM);
   15974 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15975 				blocked = true;
   15976 				delay(10*1000);
   15977 				continue;
   15978 			}
   15979 			blocked = false;
   15980 		} while (blocked && (i++ < 30));
   15981 		return blocked;
   15982 		break;
   15983 	case WM_T_82571:
   15984 	case WM_T_82572:
   15985 	case WM_T_82573:
   15986 	case WM_T_82574:
   15987 	case WM_T_82583:
   15988 	case WM_T_80003:
   15989 		reg = CSR_READ(sc, WMREG_MANC);
   15990 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15991 			return true;
   15992 		else
   15993 			return false;
   15994 		break;
   15995 	default:
   15996 		/* No problem */
   15997 		break;
   15998 	}
   15999 
   16000 	return false;
   16001 }
   16002 
   16003 static void
   16004 wm_get_hw_control(struct wm_softc *sc)
   16005 {
   16006 	uint32_t reg;
   16007 
   16008 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   16009 		device_xname(sc->sc_dev), __func__));
   16010 
   16011 	if (sc->sc_type == WM_T_82573) {
   16012 		reg = CSR_READ(sc, WMREG_SWSM);
   16013 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   16014 	} else if (sc->sc_type >= WM_T_82571) {
   16015 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16016 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   16017 	}
   16018 }
   16019 
   16020 static void
   16021 wm_release_hw_control(struct wm_softc *sc)
   16022 {
   16023 	uint32_t reg;
   16024 
   16025 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   16026 		device_xname(sc->sc_dev), __func__));
   16027 
   16028 	if (sc->sc_type == WM_T_82573) {
   16029 		reg = CSR_READ(sc, WMREG_SWSM);
   16030 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   16031 	} else if (sc->sc_type >= WM_T_82571) {
   16032 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16033 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   16034 	}
   16035 }
   16036 
   16037 static void
   16038 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   16039 {
   16040 	uint32_t reg;
   16041 
   16042 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16043 		device_xname(sc->sc_dev), __func__));
   16044 
   16045 	if (sc->sc_type < WM_T_PCH2)
   16046 		return;
   16047 
   16048 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   16049 
   16050 	if (gate)
   16051 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   16052 	else
   16053 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   16054 
   16055 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   16056 }
   16057 
   16058 static int
   16059 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   16060 {
   16061 	uint32_t fwsm, reg;
   16062 	int rv;
   16063 
   16064 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16065 		device_xname(sc->sc_dev), __func__));
   16066 
   16067 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   16068 	wm_gate_hw_phy_config_ich8lan(sc, true);
   16069 
   16070 	/* Disable ULP */
   16071 	wm_ulp_disable(sc);
   16072 
   16073 	/* Acquire PHY semaphore */
   16074 	rv = sc->phy.acquire(sc);
   16075 	if (rv != 0) {
   16076 		DPRINTF(sc, WM_DEBUG_INIT,
   16077 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16078 		return rv;
   16079 	}
   16080 
   16081 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   16082 	 * inaccessible and resetting the PHY is not blocked, toggle the
   16083 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   16084 	 */
   16085 	fwsm = CSR_READ(sc, WMREG_FWSM);
   16086 	switch (sc->sc_type) {
   16087 	case WM_T_PCH_LPT:
   16088 	case WM_T_PCH_SPT:
   16089 	case WM_T_PCH_CNP:
   16090 		if (wm_phy_is_accessible_pchlan(sc))
   16091 			break;
   16092 
   16093 		/* Before toggling LANPHYPC, see if PHY is accessible by
   16094 		 * forcing MAC to SMBus mode first.
   16095 		 */
   16096 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16097 		reg |= CTRL_EXT_FORCE_SMBUS;
   16098 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16099 #if 0
   16100 		/* XXX Isn't this required??? */
   16101 		CSR_WRITE_FLUSH(sc);
   16102 #endif
   16103 		/* Wait 50 milliseconds for MAC to finish any retries
   16104 		 * that it might be trying to perform from previous
   16105 		 * attempts to acknowledge any phy read requests.
   16106 		 */
   16107 		delay(50 * 1000);
   16108 		/* FALLTHROUGH */
   16109 	case WM_T_PCH2:
   16110 		if (wm_phy_is_accessible_pchlan(sc) == true)
   16111 			break;
   16112 		/* FALLTHROUGH */
   16113 	case WM_T_PCH:
   16114 		if (sc->sc_type == WM_T_PCH)
   16115 			if ((fwsm & FWSM_FW_VALID) != 0)
   16116 				break;
   16117 
   16118 		if (wm_phy_resetisblocked(sc) == true) {
   16119 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   16120 			break;
   16121 		}
   16122 
   16123 		/* Toggle LANPHYPC Value bit */
   16124 		wm_toggle_lanphypc_pch_lpt(sc);
   16125 
   16126 		if (sc->sc_type >= WM_T_PCH_LPT) {
   16127 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16128 				break;
   16129 
   16130 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   16131 			 * so ensure that the MAC is also out of SMBus mode
   16132 			 */
   16133 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16134 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16135 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16136 
   16137 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16138 				break;
   16139 			rv = -1;
   16140 		}
   16141 		break;
   16142 	default:
   16143 		break;
   16144 	}
   16145 
   16146 	/* Release semaphore */
   16147 	sc->phy.release(sc);
   16148 
   16149 	if (rv == 0) {
   16150 		/* Check to see if able to reset PHY.  Print error if not */
   16151 		if (wm_phy_resetisblocked(sc)) {
   16152 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   16153 			goto out;
   16154 		}
   16155 
   16156 		/* Reset the PHY before any access to it.  Doing so, ensures
   16157 		 * that the PHY is in a known good state before we read/write
   16158 		 * PHY registers.  The generic reset is sufficient here,
   16159 		 * because we haven't determined the PHY type yet.
   16160 		 */
   16161 		if (wm_reset_phy(sc) != 0)
   16162 			goto out;
   16163 
   16164 		/* On a successful reset, possibly need to wait for the PHY
   16165 		 * to quiesce to an accessible state before returning control
   16166 		 * to the calling function.  If the PHY does not quiesce, then
   16167 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   16168 		 *  the PHY is in.
   16169 		 */
   16170 		if (wm_phy_resetisblocked(sc))
   16171 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   16172 	}
   16173 
   16174 out:
   16175 	/* Ungate automatic PHY configuration on non-managed 82579 */
   16176 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   16177 		delay(10*1000);
   16178 		wm_gate_hw_phy_config_ich8lan(sc, false);
   16179 	}
   16180 
   16181 	return 0;
   16182 }
   16183 
   16184 static void
   16185 wm_init_manageability(struct wm_softc *sc)
   16186 {
   16187 
   16188 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16189 		device_xname(sc->sc_dev), __func__));
   16190 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   16191 
   16192 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16193 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   16194 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16195 
   16196 		/* Disable hardware interception of ARP */
   16197 		manc &= ~MANC_ARP_EN;
   16198 
   16199 		/* Enable receiving management packets to the host */
   16200 		if (sc->sc_type >= WM_T_82571) {
   16201 			manc |= MANC_EN_MNG2HOST;
   16202 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   16203 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   16204 		}
   16205 
   16206 		CSR_WRITE(sc, WMREG_MANC, manc);
   16207 	}
   16208 }
   16209 
   16210 static void
   16211 wm_release_manageability(struct wm_softc *sc)
   16212 {
   16213 
   16214 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16215 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16216 
   16217 		manc |= MANC_ARP_EN;
   16218 		if (sc->sc_type >= WM_T_82571)
   16219 			manc &= ~MANC_EN_MNG2HOST;
   16220 
   16221 		CSR_WRITE(sc, WMREG_MANC, manc);
   16222 	}
   16223 }
   16224 
   16225 static void
   16226 wm_get_wakeup(struct wm_softc *sc)
   16227 {
   16228 
   16229 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   16230 	switch (sc->sc_type) {
   16231 	case WM_T_82573:
   16232 	case WM_T_82583:
   16233 		sc->sc_flags |= WM_F_HAS_AMT;
   16234 		/* FALLTHROUGH */
   16235 	case WM_T_80003:
   16236 	case WM_T_82575:
   16237 	case WM_T_82576:
   16238 	case WM_T_82580:
   16239 	case WM_T_I350:
   16240 	case WM_T_I354:
   16241 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   16242 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   16243 		/* FALLTHROUGH */
   16244 	case WM_T_82541:
   16245 	case WM_T_82541_2:
   16246 	case WM_T_82547:
   16247 	case WM_T_82547_2:
   16248 	case WM_T_82571:
   16249 	case WM_T_82572:
   16250 	case WM_T_82574:
   16251 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16252 		break;
   16253 	case WM_T_ICH8:
   16254 	case WM_T_ICH9:
   16255 	case WM_T_ICH10:
   16256 	case WM_T_PCH:
   16257 	case WM_T_PCH2:
   16258 	case WM_T_PCH_LPT:
   16259 	case WM_T_PCH_SPT:
   16260 	case WM_T_PCH_CNP:
   16261 		sc->sc_flags |= WM_F_HAS_AMT;
   16262 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16263 		break;
   16264 	default:
   16265 		break;
   16266 	}
   16267 
   16268 	/* 1: HAS_MANAGE */
   16269 	if (wm_enable_mng_pass_thru(sc) != 0)
   16270 		sc->sc_flags |= WM_F_HAS_MANAGE;
   16271 
   16272 	/*
   16273 	 * Note that the WOL flags is set after the resetting of the eeprom
   16274 	 * stuff
   16275 	 */
   16276 }
   16277 
   16278 /*
   16279  * Unconfigure Ultra Low Power mode.
   16280  * Only for I217 and newer (see below).
   16281  */
   16282 static int
   16283 wm_ulp_disable(struct wm_softc *sc)
   16284 {
   16285 	uint32_t reg;
   16286 	uint16_t phyreg;
   16287 	int i = 0, rv;
   16288 
   16289 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16290 		device_xname(sc->sc_dev), __func__));
   16291 	/* Exclude old devices */
   16292 	if ((sc->sc_type < WM_T_PCH_LPT)
   16293 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   16294 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   16295 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   16296 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   16297 		return 0;
   16298 
   16299 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   16300 		/* Request ME un-configure ULP mode in the PHY */
   16301 		reg = CSR_READ(sc, WMREG_H2ME);
   16302 		reg &= ~H2ME_ULP;
   16303 		reg |= H2ME_ENFORCE_SETTINGS;
   16304 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16305 
   16306 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   16307 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   16308 			if (i++ == 30) {
   16309 				device_printf(sc->sc_dev, "%s timed out\n",
   16310 				    __func__);
   16311 				return -1;
   16312 			}
   16313 			delay(10 * 1000);
   16314 		}
   16315 		reg = CSR_READ(sc, WMREG_H2ME);
   16316 		reg &= ~H2ME_ENFORCE_SETTINGS;
   16317 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16318 
   16319 		return 0;
   16320 	}
   16321 
   16322 	/* Acquire semaphore */
   16323 	rv = sc->phy.acquire(sc);
   16324 	if (rv != 0) {
   16325 		DPRINTF(sc, WM_DEBUG_INIT,
   16326 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16327 		return rv;
   16328 	}
   16329 
   16330 	/* Toggle LANPHYPC */
   16331 	wm_toggle_lanphypc_pch_lpt(sc);
   16332 
   16333 	/* Unforce SMBus mode in PHY */
   16334 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   16335 	if (rv != 0) {
   16336 		uint32_t reg2;
   16337 
   16338 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   16339 		    __func__);
   16340 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   16341 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   16342 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   16343 		delay(50 * 1000);
   16344 
   16345 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   16346 		    &phyreg);
   16347 		if (rv != 0)
   16348 			goto release;
   16349 	}
   16350 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16351 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   16352 
   16353 	/* Unforce SMBus mode in MAC */
   16354 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16355 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   16356 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16357 
   16358 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   16359 	if (rv != 0)
   16360 		goto release;
   16361 	phyreg |= HV_PM_CTRL_K1_ENA;
   16362 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   16363 
   16364 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   16365 	    &phyreg);
   16366 	if (rv != 0)
   16367 		goto release;
   16368 	phyreg &= ~(I218_ULP_CONFIG1_IND
   16369 	    | I218_ULP_CONFIG1_STICKY_ULP
   16370 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   16371 	    | I218_ULP_CONFIG1_WOL_HOST
   16372 	    | I218_ULP_CONFIG1_INBAND_EXIT
   16373 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   16374 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   16375 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   16376 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16377 	phyreg |= I218_ULP_CONFIG1_START;
   16378 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16379 
   16380 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16381 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   16382 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16383 
   16384 release:
   16385 	/* Release semaphore */
   16386 	sc->phy.release(sc);
   16387 	wm_gmii_reset(sc);
   16388 	delay(50 * 1000);
   16389 
   16390 	return rv;
   16391 }
   16392 
   16393 /* WOL in the newer chipset interfaces (pchlan) */
   16394 static int
   16395 wm_enable_phy_wakeup(struct wm_softc *sc)
   16396 {
   16397 	device_t dev = sc->sc_dev;
   16398 	uint32_t mreg, moff;
   16399 	uint16_t wuce, wuc, wufc, preg;
   16400 	int i, rv;
   16401 
   16402 	KASSERT(sc->sc_type >= WM_T_PCH);
   16403 
   16404 	/* Copy MAC RARs to PHY RARs */
   16405 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   16406 
   16407 	/* Activate PHY wakeup */
   16408 	rv = sc->phy.acquire(sc);
   16409 	if (rv != 0) {
   16410 		device_printf(dev, "%s: failed to acquire semaphore\n",
   16411 		    __func__);
   16412 		return rv;
   16413 	}
   16414 
   16415 	/*
   16416 	 * Enable access to PHY wakeup registers.
   16417 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   16418 	 */
   16419 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   16420 	if (rv != 0) {
   16421 		device_printf(dev,
   16422 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   16423 		goto release;
   16424 	}
   16425 
   16426 	/* Copy MAC MTA to PHY MTA */
   16427 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   16428 		uint16_t lo, hi;
   16429 
   16430 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   16431 		lo = (uint16_t)(mreg & 0xffff);
   16432 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   16433 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   16434 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   16435 	}
   16436 
   16437 	/* Configure PHY Rx Control register */
   16438 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   16439 	mreg = CSR_READ(sc, WMREG_RCTL);
   16440 	if (mreg & RCTL_UPE)
   16441 		preg |= BM_RCTL_UPE;
   16442 	if (mreg & RCTL_MPE)
   16443 		preg |= BM_RCTL_MPE;
   16444 	preg &= ~(BM_RCTL_MO_MASK);
   16445 	moff = __SHIFTOUT(mreg, RCTL_MO);
   16446 	if (moff != 0)
   16447 		preg |= moff << BM_RCTL_MO_SHIFT;
   16448 	if (mreg & RCTL_BAM)
   16449 		preg |= BM_RCTL_BAM;
   16450 	if (mreg & RCTL_PMCF)
   16451 		preg |= BM_RCTL_PMCF;
   16452 	mreg = CSR_READ(sc, WMREG_CTRL);
   16453 	if (mreg & CTRL_RFCE)
   16454 		preg |= BM_RCTL_RFCE;
   16455 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   16456 
   16457 	wuc = WUC_APME | WUC_PME_EN;
   16458 	wufc = WUFC_MAG;
   16459 	/* Enable PHY wakeup in MAC register */
   16460 	CSR_WRITE(sc, WMREG_WUC,
   16461 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   16462 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   16463 
   16464 	/* Configure and enable PHY wakeup in PHY registers */
   16465 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   16466 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   16467 
   16468 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   16469 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16470 
   16471 release:
   16472 	sc->phy.release(sc);
   16473 
   16474 	return 0;
   16475 }
   16476 
   16477 /* Power down workaround on D3 */
   16478 static void
   16479 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   16480 {
   16481 	uint32_t reg;
   16482 	uint16_t phyreg;
   16483 	int i;
   16484 
   16485 	for (i = 0; i < 2; i++) {
   16486 		/* Disable link */
   16487 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16488 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16489 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16490 
   16491 		/*
   16492 		 * Call gig speed drop workaround on Gig disable before
   16493 		 * accessing any PHY registers
   16494 		 */
   16495 		if (sc->sc_type == WM_T_ICH8)
   16496 			wm_gig_downshift_workaround_ich8lan(sc);
   16497 
   16498 		/* Write VR power-down enable */
   16499 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16500 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16501 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16502 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16503 
   16504 		/* Read it back and test */
   16505 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16506 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16507 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16508 			break;
   16509 
   16510 		/* Issue PHY reset and repeat at most one more time */
   16511 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16512 	}
   16513 }
   16514 
   16515 /*
   16516  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16517  *  @sc: pointer to the HW structure
   16518  *
   16519  *  During S0 to Sx transition, it is possible the link remains at gig
   16520  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16521  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16522  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16523  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16524  *  needs to be written.
   16525  *  Parts that support (and are linked to a partner which support) EEE in
   16526  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16527  *  than 10Mbps w/o EEE.
   16528  */
   16529 static void
   16530 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16531 {
   16532 	device_t dev = sc->sc_dev;
   16533 	struct ethercom *ec = &sc->sc_ethercom;
   16534 	uint32_t phy_ctrl;
   16535 	int rv;
   16536 
   16537 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16538 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16539 
   16540 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16541 
   16542 	if (sc->sc_phytype == WMPHY_I217) {
   16543 		uint16_t devid = sc->sc_pcidevid;
   16544 
   16545 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16546 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16547 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16548 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16549 		    (sc->sc_type >= WM_T_PCH_SPT))
   16550 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16551 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16552 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16553 
   16554 		if (sc->phy.acquire(sc) != 0)
   16555 			goto out;
   16556 
   16557 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16558 			uint16_t eee_advert;
   16559 
   16560 			rv = wm_read_emi_reg_locked(dev,
   16561 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16562 			if (rv)
   16563 				goto release;
   16564 
   16565 			/*
   16566 			 * Disable LPLU if both link partners support 100BaseT
   16567 			 * EEE and 100Full is advertised on both ends of the
   16568 			 * link, and enable Auto Enable LPI since there will
   16569 			 * be no driver to enable LPI while in Sx.
   16570 			 */
   16571 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16572 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16573 				uint16_t anar, phy_reg;
   16574 
   16575 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16576 				    &anar);
   16577 				if (anar & ANAR_TX_FD) {
   16578 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16579 					    PHY_CTRL_NOND0A_LPLU);
   16580 
   16581 					/* Set Auto Enable LPI after link up */
   16582 					sc->phy.readreg_locked(dev, 2,
   16583 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16584 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16585 					sc->phy.writereg_locked(dev, 2,
   16586 					    I217_LPI_GPIO_CTRL, phy_reg);
   16587 				}
   16588 			}
   16589 		}
   16590 
   16591 		/*
   16592 		 * For i217 Intel Rapid Start Technology support,
   16593 		 * when the system is going into Sx and no manageability engine
   16594 		 * is present, the driver must configure proxy to reset only on
   16595 		 * power good.	LPI (Low Power Idle) state must also reset only
   16596 		 * on power good, as well as the MTA (Multicast table array).
   16597 		 * The SMBus release must also be disabled on LCD reset.
   16598 		 */
   16599 
   16600 		/*
   16601 		 * Enable MTA to reset for Intel Rapid Start Technology
   16602 		 * Support
   16603 		 */
   16604 
   16605 release:
   16606 		sc->phy.release(sc);
   16607 	}
   16608 out:
   16609 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16610 
   16611 	if (sc->sc_type == WM_T_ICH8)
   16612 		wm_gig_downshift_workaround_ich8lan(sc);
   16613 
   16614 	if (sc->sc_type >= WM_T_PCH) {
   16615 		wm_oem_bits_config_ich8lan(sc, false);
   16616 
   16617 		/* Reset PHY to activate OEM bits on 82577/8 */
   16618 		if (sc->sc_type == WM_T_PCH)
   16619 			wm_reset_phy(sc);
   16620 
   16621 		if (sc->phy.acquire(sc) != 0)
   16622 			return;
   16623 		wm_write_smbus_addr(sc);
   16624 		sc->phy.release(sc);
   16625 	}
   16626 }
   16627 
   16628 /*
   16629  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16630  *  @sc: pointer to the HW structure
   16631  *
   16632  *  During Sx to S0 transitions on non-managed devices or managed devices
   16633  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16634  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16635  *  the PHY.
   16636  *  On i217, setup Intel Rapid Start Technology.
   16637  */
   16638 static int
   16639 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16640 {
   16641 	device_t dev = sc->sc_dev;
   16642 	int rv;
   16643 
   16644 	if (sc->sc_type < WM_T_PCH2)
   16645 		return 0;
   16646 
   16647 	rv = wm_init_phy_workarounds_pchlan(sc);
   16648 	if (rv != 0)
   16649 		return rv;
   16650 
   16651 	/* For i217 Intel Rapid Start Technology support when the system
   16652 	 * is transitioning from Sx and no manageability engine is present
   16653 	 * configure SMBus to restore on reset, disable proxy, and enable
   16654 	 * the reset on MTA (Multicast table array).
   16655 	 */
   16656 	if (sc->sc_phytype == WMPHY_I217) {
   16657 		uint16_t phy_reg;
   16658 
   16659 		rv = sc->phy.acquire(sc);
   16660 		if (rv != 0)
   16661 			return rv;
   16662 
   16663 		/* Clear Auto Enable LPI after link up */
   16664 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16665 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16666 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16667 
   16668 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16669 			/* Restore clear on SMB if no manageability engine
   16670 			 * is present
   16671 			 */
   16672 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16673 			    &phy_reg);
   16674 			if (rv != 0)
   16675 				goto release;
   16676 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16677 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16678 
   16679 			/* Disable Proxy */
   16680 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16681 		}
   16682 		/* Enable reset on MTA */
   16683 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16684 		if (rv != 0)
   16685 			goto release;
   16686 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16687 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16688 
   16689 release:
   16690 		sc->phy.release(sc);
   16691 		return rv;
   16692 	}
   16693 
   16694 	return 0;
   16695 }
   16696 
   16697 static void
   16698 wm_enable_wakeup(struct wm_softc *sc)
   16699 {
   16700 	uint32_t reg, pmreg;
   16701 	pcireg_t pmode;
   16702 	int rv = 0;
   16703 
   16704 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16705 		device_xname(sc->sc_dev), __func__));
   16706 
   16707 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16708 	    &pmreg, NULL) == 0)
   16709 		return;
   16710 
   16711 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16712 		goto pme;
   16713 
   16714 	/* Advertise the wakeup capability */
   16715 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16716 	    | CTRL_SWDPIN(3));
   16717 
   16718 	/* Keep the laser running on fiber adapters */
   16719 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16720 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16721 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16722 		reg |= CTRL_EXT_SWDPIN(3);
   16723 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16724 	}
   16725 
   16726 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16727 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16728 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16729 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16730 		wm_suspend_workarounds_ich8lan(sc);
   16731 
   16732 #if 0	/* For the multicast packet */
   16733 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16734 	reg |= WUFC_MC;
   16735 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16736 #endif
   16737 
   16738 	if (sc->sc_type >= WM_T_PCH) {
   16739 		rv = wm_enable_phy_wakeup(sc);
   16740 		if (rv != 0)
   16741 			goto pme;
   16742 	} else {
   16743 		/* Enable wakeup by the MAC */
   16744 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16745 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16746 	}
   16747 
   16748 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16749 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16750 		|| (sc->sc_type == WM_T_PCH2))
   16751 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16752 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16753 
   16754 pme:
   16755 	/* Request PME */
   16756 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16757 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16758 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16759 		/* For WOL */
   16760 		pmode |= PCI_PMCSR_PME_EN;
   16761 	} else {
   16762 		/* Disable WOL */
   16763 		pmode &= ~PCI_PMCSR_PME_EN;
   16764 	}
   16765 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16766 }
   16767 
   16768 /* Disable ASPM L0s and/or L1 for workaround */
   16769 static void
   16770 wm_disable_aspm(struct wm_softc *sc)
   16771 {
   16772 	pcireg_t reg, mask = 0;
   16773 	unsigned const char *str = "";
   16774 
   16775 	/*
   16776 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16777 	 * space.
   16778 	 */
   16779 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16780 		return;
   16781 
   16782 	switch (sc->sc_type) {
   16783 	case WM_T_82571:
   16784 	case WM_T_82572:
   16785 		/*
   16786 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16787 		 * State Power management L1 State (ASPM L1).
   16788 		 */
   16789 		mask = PCIE_LCSR_ASPM_L1;
   16790 		str = "L1 is";
   16791 		break;
   16792 	case WM_T_82573:
   16793 	case WM_T_82574:
   16794 	case WM_T_82583:
   16795 		/*
   16796 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16797 		 *
   16798 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16799 		 * some chipset.  The document of 82574 and 82583 says that
   16800 		 * disabling L0s with some specific chipset is sufficient,
   16801 		 * but we follow as of the Intel em driver does.
   16802 		 *
   16803 		 * References:
   16804 		 * Errata 8 of the Specification Update of i82573.
   16805 		 * Errata 20 of the Specification Update of i82574.
   16806 		 * Errata 9 of the Specification Update of i82583.
   16807 		 */
   16808 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16809 		str = "L0s and L1 are";
   16810 		break;
   16811 	default:
   16812 		return;
   16813 	}
   16814 
   16815 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16816 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16817 	reg &= ~mask;
   16818 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16819 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16820 
   16821 	/* Print only in wm_attach() */
   16822 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16823 		aprint_verbose_dev(sc->sc_dev,
   16824 		    "ASPM %s disabled to workaround the errata.\n", str);
   16825 }
   16826 
   16827 /* LPLU */
   16828 
   16829 static void
   16830 wm_lplu_d0_disable(struct wm_softc *sc)
   16831 {
   16832 	struct mii_data *mii = &sc->sc_mii;
   16833 	uint32_t reg;
   16834 	uint16_t phyval;
   16835 
   16836 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16837 		device_xname(sc->sc_dev), __func__));
   16838 
   16839 	if (sc->sc_phytype == WMPHY_IFE)
   16840 		return;
   16841 
   16842 	switch (sc->sc_type) {
   16843 	case WM_T_82571:
   16844 	case WM_T_82572:
   16845 	case WM_T_82573:
   16846 	case WM_T_82575:
   16847 	case WM_T_82576:
   16848 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16849 		phyval &= ~PMR_D0_LPLU;
   16850 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16851 		break;
   16852 	case WM_T_82580:
   16853 	case WM_T_I350:
   16854 	case WM_T_I210:
   16855 	case WM_T_I211:
   16856 		reg = CSR_READ(sc, WMREG_PHPM);
   16857 		reg &= ~PHPM_D0A_LPLU;
   16858 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16859 		break;
   16860 	case WM_T_82574:
   16861 	case WM_T_82583:
   16862 	case WM_T_ICH8:
   16863 	case WM_T_ICH9:
   16864 	case WM_T_ICH10:
   16865 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16866 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16867 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16868 		CSR_WRITE_FLUSH(sc);
   16869 		break;
   16870 	case WM_T_PCH:
   16871 	case WM_T_PCH2:
   16872 	case WM_T_PCH_LPT:
   16873 	case WM_T_PCH_SPT:
   16874 	case WM_T_PCH_CNP:
   16875 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16876 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16877 		if (wm_phy_resetisblocked(sc) == false)
   16878 			phyval |= HV_OEM_BITS_ANEGNOW;
   16879 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16880 		break;
   16881 	default:
   16882 		break;
   16883 	}
   16884 }
   16885 
   16886 /* EEE */
   16887 
   16888 static int
   16889 wm_set_eee_i350(struct wm_softc *sc)
   16890 {
   16891 	struct ethercom *ec = &sc->sc_ethercom;
   16892 	uint32_t ipcnfg, eeer;
   16893 	uint32_t ipcnfg_mask
   16894 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16895 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16896 
   16897 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16898 
   16899 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16900 	eeer = CSR_READ(sc, WMREG_EEER);
   16901 
   16902 	/* Enable or disable per user setting */
   16903 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16904 		ipcnfg |= ipcnfg_mask;
   16905 		eeer |= eeer_mask;
   16906 	} else {
   16907 		ipcnfg &= ~ipcnfg_mask;
   16908 		eeer &= ~eeer_mask;
   16909 	}
   16910 
   16911 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16912 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16913 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16914 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16915 
   16916 	return 0;
   16917 }
   16918 
   16919 static int
   16920 wm_set_eee_pchlan(struct wm_softc *sc)
   16921 {
   16922 	device_t dev = sc->sc_dev;
   16923 	struct ethercom *ec = &sc->sc_ethercom;
   16924 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16925 	int rv;
   16926 
   16927 	switch (sc->sc_phytype) {
   16928 	case WMPHY_82579:
   16929 		lpa = I82579_EEE_LP_ABILITY;
   16930 		pcs_status = I82579_EEE_PCS_STATUS;
   16931 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16932 		break;
   16933 	case WMPHY_I217:
   16934 		lpa = I217_EEE_LP_ABILITY;
   16935 		pcs_status = I217_EEE_PCS_STATUS;
   16936 		adv_addr = I217_EEE_ADVERTISEMENT;
   16937 		break;
   16938 	default:
   16939 		return 0;
   16940 	}
   16941 
   16942 	rv = sc->phy.acquire(sc);
   16943 	if (rv != 0) {
   16944 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16945 		return rv;
   16946 	}
   16947 
   16948 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16949 	if (rv != 0)
   16950 		goto release;
   16951 
   16952 	/* Clear bits that enable EEE in various speeds */
   16953 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16954 
   16955 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16956 		/* Save off link partner's EEE ability */
   16957 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16958 		if (rv != 0)
   16959 			goto release;
   16960 
   16961 		/* Read EEE advertisement */
   16962 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16963 			goto release;
   16964 
   16965 		/*
   16966 		 * Enable EEE only for speeds in which the link partner is
   16967 		 * EEE capable and for which we advertise EEE.
   16968 		 */
   16969 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16970 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16971 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16972 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16973 			if ((data & ANLPAR_TX_FD) != 0)
   16974 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16975 			else {
   16976 				/*
   16977 				 * EEE is not supported in 100Half, so ignore
   16978 				 * partner's EEE in 100 ability if full-duplex
   16979 				 * is not advertised.
   16980 				 */
   16981 				sc->eee_lp_ability
   16982 				    &= ~AN_EEEADVERT_100_TX;
   16983 			}
   16984 		}
   16985 	}
   16986 
   16987 	if (sc->sc_phytype == WMPHY_82579) {
   16988 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16989 		if (rv != 0)
   16990 			goto release;
   16991 
   16992 		data &= ~I82579_LPI_PLL_SHUT_100;
   16993 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16994 	}
   16995 
   16996 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16997 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16998 		goto release;
   16999 
   17000 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   17001 release:
   17002 	sc->phy.release(sc);
   17003 
   17004 	return rv;
   17005 }
   17006 
   17007 static int
   17008 wm_set_eee(struct wm_softc *sc)
   17009 {
   17010 	struct ethercom *ec = &sc->sc_ethercom;
   17011 
   17012 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   17013 		return 0;
   17014 
   17015 	if (sc->sc_type == WM_T_I354) {
   17016 		/* I354 uses an external PHY */
   17017 		return 0; /* not yet */
   17018 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   17019 		return wm_set_eee_i350(sc);
   17020 	else if (sc->sc_type >= WM_T_PCH2)
   17021 		return wm_set_eee_pchlan(sc);
   17022 
   17023 	return 0;
   17024 }
   17025 
   17026 /*
   17027  * Workarounds (mainly PHY related).
   17028  * Basically, PHY's workarounds are in the PHY drivers.
   17029  */
   17030 
   17031 /* Workaround for 82566 Kumeran PCS lock loss */
   17032 static int
   17033 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   17034 {
   17035 	struct mii_data *mii = &sc->sc_mii;
   17036 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17037 	int i, reg, rv;
   17038 	uint16_t phyreg;
   17039 
   17040 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17041 		device_xname(sc->sc_dev), __func__));
   17042 
   17043 	/* If the link is not up, do nothing */
   17044 	if ((status & STATUS_LU) == 0)
   17045 		return 0;
   17046 
   17047 	/* Nothing to do if the link is other than 1Gbps */
   17048 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   17049 		return 0;
   17050 
   17051 	for (i = 0; i < 10; i++) {
   17052 		/* read twice */
   17053 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   17054 		if (rv != 0)
   17055 			return rv;
   17056 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   17057 		if (rv != 0)
   17058 			return rv;
   17059 
   17060 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   17061 			goto out;	/* GOOD! */
   17062 
   17063 		/* Reset the PHY */
   17064 		wm_reset_phy(sc);
   17065 		delay(5*1000);
   17066 	}
   17067 
   17068 	/* Disable GigE link negotiation */
   17069 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   17070 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   17071 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   17072 
   17073 	/*
   17074 	 * Call gig speed drop workaround on Gig disable before accessing
   17075 	 * any PHY registers.
   17076 	 */
   17077 	wm_gig_downshift_workaround_ich8lan(sc);
   17078 
   17079 out:
   17080 	return 0;
   17081 }
   17082 
   17083 /*
   17084  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   17085  *  @sc: pointer to the HW structure
   17086  *
   17087  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   17088  *  LPLU, Gig disable, MDIC PHY reset):
   17089  *    1) Set Kumeran Near-end loopback
   17090  *    2) Clear Kumeran Near-end loopback
   17091  *  Should only be called for ICH8[m] devices with any 1G Phy.
   17092  */
   17093 static void
   17094 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   17095 {
   17096 	uint16_t kmreg;
   17097 
   17098 	/* Only for igp3 */
   17099 	if (sc->sc_phytype == WMPHY_IGP_3) {
   17100 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   17101 			return;
   17102 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   17103 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   17104 			return;
   17105 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   17106 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   17107 	}
   17108 }
   17109 
   17110 /*
   17111  * Workaround for pch's PHYs
   17112  * XXX should be moved to new PHY driver?
   17113  */
   17114 static int
   17115 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17116 {
   17117 	device_t dev = sc->sc_dev;
   17118 	struct mii_data *mii = &sc->sc_mii;
   17119 	struct mii_softc *child;
   17120 	uint16_t phy_data, phyrev = 0;
   17121 	int phytype = sc->sc_phytype;
   17122 	int rv;
   17123 
   17124 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17125 		device_xname(dev), __func__));
   17126 	KASSERT(sc->sc_type == WM_T_PCH);
   17127 
   17128 	/* Set MDIO slow mode before any other MDIO access */
   17129 	if (phytype == WMPHY_82577)
   17130 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   17131 			return rv;
   17132 
   17133 	child = LIST_FIRST(&mii->mii_phys);
   17134 	if (child != NULL)
   17135 		phyrev = child->mii_mpd_rev;
   17136 
   17137 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   17138 	if ((child != NULL) &&
   17139 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   17140 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   17141 		/* Disable generation of early preamble (0x4431) */
   17142 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17143 		    &phy_data);
   17144 		if (rv != 0)
   17145 			return rv;
   17146 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   17147 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   17148 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17149 		    phy_data);
   17150 		if (rv != 0)
   17151 			return rv;
   17152 
   17153 		/* Preamble tuning for SSC */
   17154 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   17155 		if (rv != 0)
   17156 			return rv;
   17157 	}
   17158 
   17159 	/* 82578 */
   17160 	if (phytype == WMPHY_82578) {
   17161 		/*
   17162 		 * Return registers to default by doing a soft reset then
   17163 		 * writing 0x3140 to the control register
   17164 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   17165 		 */
   17166 		if ((child != NULL) && (phyrev < 2)) {
   17167 			PHY_RESET(child);
   17168 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   17169 			if (rv != 0)
   17170 				return rv;
   17171 		}
   17172 	}
   17173 
   17174 	/* Select page 0 */
   17175 	if ((rv = sc->phy.acquire(sc)) != 0)
   17176 		return rv;
   17177 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   17178 	sc->phy.release(sc);
   17179 	if (rv != 0)
   17180 		return rv;
   17181 
   17182 	/*
   17183 	 * Configure the K1 Si workaround during phy reset assuming there is
   17184 	 * link so that it disables K1 if link is in 1Gbps.
   17185 	 */
   17186 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   17187 		return rv;
   17188 
   17189 	/* Workaround for link disconnects on a busy hub in half duplex */
   17190 	rv = sc->phy.acquire(sc);
   17191 	if (rv)
   17192 		return rv;
   17193 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   17194 	if (rv)
   17195 		goto release;
   17196 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   17197 	    phy_data & 0x00ff);
   17198 	if (rv)
   17199 		goto release;
   17200 
   17201 	/* Set MSE higher to enable link to stay up when noise is high */
   17202 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   17203 release:
   17204 	sc->phy.release(sc);
   17205 
   17206 	return rv;
   17207 }
   17208 
   17209 /*
   17210  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   17211  *  @sc:   pointer to the HW structure
   17212  */
   17213 static void
   17214 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   17215 {
   17216 
   17217 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17218 		device_xname(sc->sc_dev), __func__));
   17219 
   17220 	if (sc->phy.acquire(sc) != 0)
   17221 		return;
   17222 
   17223 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17224 
   17225 	sc->phy.release(sc);
   17226 }
   17227 
   17228 static void
   17229 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   17230 {
   17231 	device_t dev = sc->sc_dev;
   17232 	uint32_t mac_reg;
   17233 	uint16_t i, wuce;
   17234 	int count;
   17235 
   17236 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17237 		device_xname(dev), __func__));
   17238 
   17239 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   17240 		return;
   17241 
   17242 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   17243 	count = wm_rar_count(sc);
   17244 	for (i = 0; i < count; i++) {
   17245 		uint16_t lo, hi;
   17246 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17247 		lo = (uint16_t)(mac_reg & 0xffff);
   17248 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   17249 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   17250 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   17251 
   17252 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17253 		lo = (uint16_t)(mac_reg & 0xffff);
   17254 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   17255 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   17256 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   17257 	}
   17258 
   17259 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   17260 }
   17261 
   17262 /*
   17263  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   17264  *  with 82579 PHY
   17265  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   17266  */
   17267 static int
   17268 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   17269 {
   17270 	device_t dev = sc->sc_dev;
   17271 	int rar_count;
   17272 	int rv;
   17273 	uint32_t mac_reg;
   17274 	uint16_t dft_ctrl, data;
   17275 	uint16_t i;
   17276 
   17277 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17278 		device_xname(dev), __func__));
   17279 
   17280 	if (sc->sc_type < WM_T_PCH2)
   17281 		return 0;
   17282 
   17283 	/* Acquire PHY semaphore */
   17284 	rv = sc->phy.acquire(sc);
   17285 	if (rv != 0)
   17286 		return rv;
   17287 
   17288 	/* Disable Rx path while enabling/disabling workaround */
   17289 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   17290 	if (rv != 0)
   17291 		goto out;
   17292 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17293 	    dft_ctrl | (1 << 14));
   17294 	if (rv != 0)
   17295 		goto out;
   17296 
   17297 	if (enable) {
   17298 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   17299 		 * SHRAL/H) and initial CRC values to the MAC
   17300 		 */
   17301 		rar_count = wm_rar_count(sc);
   17302 		for (i = 0; i < rar_count; i++) {
   17303 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   17304 			uint32_t addr_high, addr_low;
   17305 
   17306 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17307 			if (!(addr_high & RAL_AV))
   17308 				continue;
   17309 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17310 			mac_addr[0] = (addr_low & 0xFF);
   17311 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   17312 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   17313 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   17314 			mac_addr[4] = (addr_high & 0xFF);
   17315 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   17316 
   17317 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   17318 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   17319 		}
   17320 
   17321 		/* Write Rx addresses to the PHY */
   17322 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17323 	}
   17324 
   17325 	/*
   17326 	 * If enable ==
   17327 	 *	true: Enable jumbo frame workaround in the MAC.
   17328 	 *	false: Write MAC register values back to h/w defaults.
   17329 	 */
   17330 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   17331 	if (enable) {
   17332 		mac_reg &= ~(1 << 14);
   17333 		mac_reg |= (7 << 15);
   17334 	} else
   17335 		mac_reg &= ~(0xf << 14);
   17336 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   17337 
   17338 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   17339 	if (enable) {
   17340 		mac_reg |= RCTL_SECRC;
   17341 		sc->sc_rctl |= RCTL_SECRC;
   17342 		sc->sc_flags |= WM_F_CRC_STRIP;
   17343 	} else {
   17344 		mac_reg &= ~RCTL_SECRC;
   17345 		sc->sc_rctl &= ~RCTL_SECRC;
   17346 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   17347 	}
   17348 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   17349 
   17350 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   17351 	if (rv != 0)
   17352 		goto out;
   17353 	if (enable)
   17354 		data |= 1 << 0;
   17355 	else
   17356 		data &= ~(1 << 0);
   17357 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   17358 	if (rv != 0)
   17359 		goto out;
   17360 
   17361 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   17362 	if (rv != 0)
   17363 		goto out;
   17364 	/*
   17365 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   17366 	 * on both the enable case and the disable case. Is it correct?
   17367 	 */
   17368 	data &= ~(0xf << 8);
   17369 	data |= (0xb << 8);
   17370 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   17371 	if (rv != 0)
   17372 		goto out;
   17373 
   17374 	/*
   17375 	 * If enable ==
   17376 	 *	true: Enable jumbo frame workaround in the PHY.
   17377 	 *	false: Write PHY register values back to h/w defaults.
   17378 	 */
   17379 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   17380 	if (rv != 0)
   17381 		goto out;
   17382 	data &= ~(0x7F << 5);
   17383 	if (enable)
   17384 		data |= (0x37 << 5);
   17385 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   17386 	if (rv != 0)
   17387 		goto out;
   17388 
   17389 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   17390 	if (rv != 0)
   17391 		goto out;
   17392 	if (enable)
   17393 		data &= ~(1 << 13);
   17394 	else
   17395 		data |= (1 << 13);
   17396 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   17397 	if (rv != 0)
   17398 		goto out;
   17399 
   17400 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   17401 	if (rv != 0)
   17402 		goto out;
   17403 	data &= ~(0x3FF << 2);
   17404 	if (enable)
   17405 		data |= (I82579_TX_PTR_GAP << 2);
   17406 	else
   17407 		data |= (0x8 << 2);
   17408 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   17409 	if (rv != 0)
   17410 		goto out;
   17411 
   17412 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   17413 	    enable ? 0xf100 : 0x7e00);
   17414 	if (rv != 0)
   17415 		goto out;
   17416 
   17417 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   17418 	if (rv != 0)
   17419 		goto out;
   17420 	if (enable)
   17421 		data |= 1 << 10;
   17422 	else
   17423 		data &= ~(1 << 10);
   17424 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   17425 	if (rv != 0)
   17426 		goto out;
   17427 
   17428 	/* Re-enable Rx path after enabling/disabling workaround */
   17429 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17430 	    dft_ctrl & ~(1 << 14));
   17431 
   17432 out:
   17433 	sc->phy.release(sc);
   17434 
   17435 	return rv;
   17436 }
   17437 
   17438 /*
   17439  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   17440  *  done after every PHY reset.
   17441  */
   17442 static int
   17443 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17444 {
   17445 	device_t dev = sc->sc_dev;
   17446 	int rv;
   17447 
   17448 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17449 		device_xname(dev), __func__));
   17450 	KASSERT(sc->sc_type == WM_T_PCH2);
   17451 
   17452 	/* Set MDIO slow mode before any other MDIO access */
   17453 	rv = wm_set_mdio_slow_mode_hv(sc);
   17454 	if (rv != 0)
   17455 		return rv;
   17456 
   17457 	rv = sc->phy.acquire(sc);
   17458 	if (rv != 0)
   17459 		return rv;
   17460 	/* Set MSE higher to enable link to stay up when noise is high */
   17461 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   17462 	if (rv != 0)
   17463 		goto release;
   17464 	/* Drop link after 5 times MSE threshold was reached */
   17465 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   17466 release:
   17467 	sc->phy.release(sc);
   17468 
   17469 	return rv;
   17470 }
   17471 
   17472 /**
   17473  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   17474  *  @link: link up bool flag
   17475  *
   17476  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   17477  *  preventing further DMA write requests.  Workaround the issue by disabling
   17478  *  the de-assertion of the clock request when in 1Gpbs mode.
   17479  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   17480  *  speeds in order to avoid Tx hangs.
   17481  **/
   17482 static int
   17483 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   17484 {
   17485 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   17486 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17487 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   17488 	uint16_t phyreg;
   17489 
   17490 	if (link && (speed == STATUS_SPEED_1000)) {
   17491 		int rv;
   17492 
   17493 		rv = sc->phy.acquire(sc);
   17494 		if (rv != 0)
   17495 			return rv;
   17496 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17497 		    &phyreg);
   17498 		if (rv != 0)
   17499 			goto release;
   17500 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17501 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17502 		if (rv != 0)
   17503 			goto release;
   17504 		delay(20);
   17505 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17506 
   17507 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17508 		    &phyreg);
   17509 release:
   17510 		sc->phy.release(sc);
   17511 		return rv;
   17512 	}
   17513 
   17514 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17515 
   17516 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17517 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17518 	    || !link
   17519 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17520 		goto update_fextnvm6;
   17521 
   17522 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17523 
   17524 	/* Clear link status transmit timeout */
   17525 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17526 	if (speed == STATUS_SPEED_100) {
   17527 		/* Set inband Tx timeout to 5x10us for 100Half */
   17528 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17529 
   17530 		/* Do not extend the K1 entry latency for 100Half */
   17531 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17532 	} else {
   17533 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17534 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17535 
   17536 		/* Extend the K1 entry latency for 10 Mbps */
   17537 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17538 	}
   17539 
   17540 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17541 
   17542 update_fextnvm6:
   17543 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17544 	return 0;
   17545 }
   17546 
   17547 /*
   17548  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17549  *  @sc:   pointer to the HW structure
   17550  *  @link: link up bool flag
   17551  *
   17552  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17553  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17554  *  If link is down, the function will restore the default K1 setting located
   17555  *  in the NVM.
   17556  */
   17557 static int
   17558 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17559 {
   17560 	int k1_enable = sc->sc_nvm_k1_enabled;
   17561 	int rv;
   17562 
   17563 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17564 		device_xname(sc->sc_dev), __func__));
   17565 
   17566 	rv = sc->phy.acquire(sc);
   17567 	if (rv != 0)
   17568 		return rv;
   17569 
   17570 	if (link) {
   17571 		k1_enable = 0;
   17572 
   17573 		/* Link stall fix for link up */
   17574 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17575 		    0x0100);
   17576 	} else {
   17577 		/* Link stall fix for link down */
   17578 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17579 		    0x4100);
   17580 	}
   17581 
   17582 	wm_configure_k1_ich8lan(sc, k1_enable);
   17583 	sc->phy.release(sc);
   17584 
   17585 	return 0;
   17586 }
   17587 
   17588 /*
   17589  *  wm_k1_workaround_lv - K1 Si workaround
   17590  *  @sc:   pointer to the HW structure
   17591  *
   17592  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17593  *  Disable K1 for 1000 and 100 speeds
   17594  */
   17595 static int
   17596 wm_k1_workaround_lv(struct wm_softc *sc)
   17597 {
   17598 	uint32_t reg;
   17599 	uint16_t phyreg;
   17600 	int rv;
   17601 
   17602 	if (sc->sc_type != WM_T_PCH2)
   17603 		return 0;
   17604 
   17605 	/* Set K1 beacon duration based on 10Mbps speed */
   17606 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17607 	if (rv != 0)
   17608 		return rv;
   17609 
   17610 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17611 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17612 		if (phyreg &
   17613 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17614 			/* LV 1G/100 Packet drop issue wa  */
   17615 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17616 			    &phyreg);
   17617 			if (rv != 0)
   17618 				return rv;
   17619 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17620 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17621 			    phyreg);
   17622 			if (rv != 0)
   17623 				return rv;
   17624 		} else {
   17625 			/* For 10Mbps */
   17626 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17627 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17628 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17629 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17630 		}
   17631 	}
   17632 
   17633 	return 0;
   17634 }
   17635 
   17636 /*
   17637  *  wm_link_stall_workaround_hv - Si workaround
   17638  *  @sc: pointer to the HW structure
   17639  *
   17640  *  This function works around a Si bug where the link partner can get
   17641  *  a link up indication before the PHY does. If small packets are sent
   17642  *  by the link partner they can be placed in the packet buffer without
   17643  *  being properly accounted for by the PHY and will stall preventing
   17644  *  further packets from being received.  The workaround is to clear the
   17645  *  packet buffer after the PHY detects link up.
   17646  */
   17647 static int
   17648 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17649 {
   17650 	uint16_t phyreg;
   17651 
   17652 	if (sc->sc_phytype != WMPHY_82578)
   17653 		return 0;
   17654 
   17655 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17656 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17657 	if ((phyreg & BMCR_LOOP) != 0)
   17658 		return 0;
   17659 
   17660 	/* Check if link is up and at 1Gbps */
   17661 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17662 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17663 	    | BM_CS_STATUS_SPEED_MASK;
   17664 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17665 		| BM_CS_STATUS_SPEED_1000))
   17666 		return 0;
   17667 
   17668 	delay(200 * 1000);	/* XXX too big */
   17669 
   17670 	/* Flush the packets in the fifo buffer */
   17671 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17672 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17673 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17674 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17675 
   17676 	return 0;
   17677 }
   17678 
   17679 static int
   17680 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17681 {
   17682 	int rv;
   17683 
   17684 	rv = sc->phy.acquire(sc);
   17685 	if (rv != 0) {
   17686 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17687 		    __func__);
   17688 		return rv;
   17689 	}
   17690 
   17691 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17692 
   17693 	sc->phy.release(sc);
   17694 
   17695 	return rv;
   17696 }
   17697 
   17698 static int
   17699 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17700 {
   17701 	int rv;
   17702 	uint16_t reg;
   17703 
   17704 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17705 	if (rv != 0)
   17706 		return rv;
   17707 
   17708 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17709 	    reg | HV_KMRN_MDIO_SLOW);
   17710 }
   17711 
   17712 /*
   17713  *  wm_configure_k1_ich8lan - Configure K1 power state
   17714  *  @sc: pointer to the HW structure
   17715  *  @enable: K1 state to configure
   17716  *
   17717  *  Configure the K1 power state based on the provided parameter.
   17718  *  Assumes semaphore already acquired.
   17719  */
   17720 static void
   17721 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17722 {
   17723 	uint32_t ctrl, ctrl_ext, tmp;
   17724 	uint16_t kmreg;
   17725 	int rv;
   17726 
   17727 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17728 
   17729 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17730 	if (rv != 0)
   17731 		return;
   17732 
   17733 	if (k1_enable)
   17734 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17735 	else
   17736 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17737 
   17738 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17739 	if (rv != 0)
   17740 		return;
   17741 
   17742 	delay(20);
   17743 
   17744 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17745 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17746 
   17747 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17748 	tmp |= CTRL_FRCSPD;
   17749 
   17750 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17751 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17752 	CSR_WRITE_FLUSH(sc);
   17753 	delay(20);
   17754 
   17755 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17756 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17757 	CSR_WRITE_FLUSH(sc);
   17758 	delay(20);
   17759 
   17760 	return;
   17761 }
   17762 
   17763 /* special case - for 82575 - need to do manual init ... */
   17764 static void
   17765 wm_reset_init_script_82575(struct wm_softc *sc)
   17766 {
   17767 	/*
   17768 	 * Remark: this is untested code - we have no board without EEPROM
   17769 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17770 	 */
   17771 
   17772 	/* SerDes configuration via SERDESCTRL */
   17773 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17774 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17775 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17776 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17777 
   17778 	/* CCM configuration via CCMCTL register */
   17779 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17780 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17781 
   17782 	/* PCIe lanes configuration */
   17783 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17784 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17785 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17786 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17787 
   17788 	/* PCIe PLL Configuration */
   17789 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17790 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17791 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17792 }
   17793 
   17794 static void
   17795 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17796 {
   17797 	uint32_t reg;
   17798 	uint16_t nvmword;
   17799 	int rv;
   17800 
   17801 	if (sc->sc_type != WM_T_82580)
   17802 		return;
   17803 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17804 		return;
   17805 
   17806 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17807 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17808 	if (rv != 0) {
   17809 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17810 		    __func__);
   17811 		return;
   17812 	}
   17813 
   17814 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17815 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17816 		reg |= MDICNFG_DEST;
   17817 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17818 		reg |= MDICNFG_COM_MDIO;
   17819 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17820 }
   17821 
   17822 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17823 
   17824 static bool
   17825 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17826 {
   17827 	uint32_t reg;
   17828 	uint16_t id1, id2;
   17829 	int i, rv;
   17830 
   17831 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17832 		device_xname(sc->sc_dev), __func__));
   17833 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17834 
   17835 	id1 = id2 = 0xffff;
   17836 	for (i = 0; i < 2; i++) {
   17837 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17838 		    &id1);
   17839 		if ((rv != 0) || MII_INVALIDID(id1))
   17840 			continue;
   17841 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17842 		    &id2);
   17843 		if ((rv != 0) || MII_INVALIDID(id2))
   17844 			continue;
   17845 		break;
   17846 	}
   17847 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17848 		goto out;
   17849 
   17850 	/*
   17851 	 * In case the PHY needs to be in mdio slow mode,
   17852 	 * set slow mode and try to get the PHY id again.
   17853 	 */
   17854 	rv = 0;
   17855 	if (sc->sc_type < WM_T_PCH_LPT) {
   17856 		wm_set_mdio_slow_mode_hv_locked(sc);
   17857 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17858 		    &id1);
   17859 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17860 		    &id2);
   17861 	}
   17862 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17863 		device_printf(sc->sc_dev, "XXX return with false\n");
   17864 		return false;
   17865 	}
   17866 out:
   17867 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17868 		/* Only unforce SMBus if ME is not active */
   17869 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17870 			uint16_t phyreg;
   17871 
   17872 			/* Unforce SMBus mode in PHY */
   17873 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17874 			    CV_SMB_CTRL, &phyreg);
   17875 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17876 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17877 			    CV_SMB_CTRL, phyreg);
   17878 
   17879 			/* Unforce SMBus mode in MAC */
   17880 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17881 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17882 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17883 		}
   17884 	}
   17885 	return true;
   17886 }
   17887 
   17888 static void
   17889 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17890 {
   17891 	uint32_t reg;
   17892 	int i;
   17893 
   17894 	/* Set PHY Config Counter to 50msec */
   17895 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17896 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17897 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17898 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17899 
   17900 	/* Toggle LANPHYPC */
   17901 	reg = CSR_READ(sc, WMREG_CTRL);
   17902 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17903 	reg &= ~CTRL_LANPHYPC_VALUE;
   17904 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17905 	CSR_WRITE_FLUSH(sc);
   17906 	delay(1000);
   17907 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17908 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17909 	CSR_WRITE_FLUSH(sc);
   17910 
   17911 	if (sc->sc_type < WM_T_PCH_LPT)
   17912 		delay(50 * 1000);
   17913 	else {
   17914 		i = 20;
   17915 
   17916 		do {
   17917 			delay(5 * 1000);
   17918 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17919 		    && i--);
   17920 
   17921 		delay(30 * 1000);
   17922 	}
   17923 }
   17924 
   17925 static int
   17926 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17927 {
   17928 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17929 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17930 	uint32_t rxa;
   17931 	uint16_t scale = 0, lat_enc = 0;
   17932 	int32_t obff_hwm = 0;
   17933 	int64_t lat_ns, value;
   17934 
   17935 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17936 		device_xname(sc->sc_dev), __func__));
   17937 
   17938 	if (link) {
   17939 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17940 		uint32_t status;
   17941 		uint16_t speed;
   17942 		pcireg_t preg;
   17943 
   17944 		status = CSR_READ(sc, WMREG_STATUS);
   17945 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17946 		case STATUS_SPEED_10:
   17947 			speed = 10;
   17948 			break;
   17949 		case STATUS_SPEED_100:
   17950 			speed = 100;
   17951 			break;
   17952 		case STATUS_SPEED_1000:
   17953 			speed = 1000;
   17954 			break;
   17955 		default:
   17956 			device_printf(sc->sc_dev, "Unknown speed "
   17957 			    "(status = %08x)\n", status);
   17958 			return -1;
   17959 		}
   17960 
   17961 		/* Rx Packet Buffer Allocation size (KB) */
   17962 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17963 
   17964 		/*
   17965 		 * Determine the maximum latency tolerated by the device.
   17966 		 *
   17967 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17968 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17969 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17970 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17971 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17972 		 */
   17973 		lat_ns = ((int64_t)rxa * 1024 -
   17974 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17975 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17976 		if (lat_ns < 0)
   17977 			lat_ns = 0;
   17978 		else
   17979 			lat_ns /= speed;
   17980 		value = lat_ns;
   17981 
   17982 		while (value > LTRV_VALUE) {
   17983 			scale ++;
   17984 			value = howmany(value, __BIT(5));
   17985 		}
   17986 		if (scale > LTRV_SCALE_MAX) {
   17987 			device_printf(sc->sc_dev,
   17988 			    "Invalid LTR latency scale %d\n", scale);
   17989 			return -1;
   17990 		}
   17991 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17992 
   17993 		/* Determine the maximum latency tolerated by the platform */
   17994 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17995 		    WM_PCI_LTR_CAP_LPT);
   17996 		max_snoop = preg & 0xffff;
   17997 		max_nosnoop = preg >> 16;
   17998 
   17999 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   18000 
   18001 		if (lat_enc > max_ltr_enc) {
   18002 			lat_enc = max_ltr_enc;
   18003 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   18004 			    * PCI_LTR_SCALETONS(
   18005 				    __SHIFTOUT(lat_enc,
   18006 					PCI_LTR_MAXSNOOPLAT_SCALE));
   18007 		}
   18008 
   18009 		if (lat_ns) {
   18010 			lat_ns *= speed * 1000;
   18011 			lat_ns /= 8;
   18012 			lat_ns /= 1000000000;
   18013 			obff_hwm = (int32_t)(rxa - lat_ns);
   18014 		}
   18015 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   18016 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   18017 			    "(rxa = %d, lat_ns = %d)\n",
   18018 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   18019 			return -1;
   18020 		}
   18021 	}
   18022 	/* Snoop and No-Snoop latencies the same */
   18023 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   18024 	CSR_WRITE(sc, WMREG_LTRV, reg);
   18025 
   18026 	/* Set OBFF high water mark */
   18027 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   18028 	reg |= obff_hwm;
   18029 	CSR_WRITE(sc, WMREG_SVT, reg);
   18030 
   18031 	/* Enable OBFF */
   18032 	reg = CSR_READ(sc, WMREG_SVCR);
   18033 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   18034 	CSR_WRITE(sc, WMREG_SVCR, reg);
   18035 
   18036 	return 0;
   18037 }
   18038 
   18039 /*
   18040  * I210 Errata 25 and I211 Errata 10
   18041  * Slow System Clock.
   18042  *
   18043  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   18044  */
   18045 static int
   18046 wm_pll_workaround_i210(struct wm_softc *sc)
   18047 {
   18048 	uint32_t mdicnfg, wuc;
   18049 	uint32_t reg;
   18050 	pcireg_t pcireg;
   18051 	uint32_t pmreg;
   18052 	uint16_t nvmword, tmp_nvmword;
   18053 	uint16_t phyval;
   18054 	bool wa_done = false;
   18055 	int i, rv = 0;
   18056 
   18057 	/* Get Power Management cap offset */
   18058 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   18059 	    &pmreg, NULL) == 0)
   18060 		return -1;
   18061 
   18062 	/* Save WUC and MDICNFG registers */
   18063 	wuc = CSR_READ(sc, WMREG_WUC);
   18064 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   18065 
   18066 	reg = mdicnfg & ~MDICNFG_DEST;
   18067 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   18068 
   18069 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   18070 		/*
   18071 		 * The default value of the Initialization Control Word 1
   18072 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   18073 		 */
   18074 		nvmword = INVM_DEFAULT_AL;
   18075 	}
   18076 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   18077 
   18078 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   18079 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   18080 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   18081 
   18082 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   18083 			rv = 0;
   18084 			break; /* OK */
   18085 		} else
   18086 			rv = -1;
   18087 
   18088 		wa_done = true;
   18089 		/* Directly reset the internal PHY */
   18090 		reg = CSR_READ(sc, WMREG_CTRL);
   18091 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   18092 
   18093 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   18094 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   18095 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   18096 
   18097 		CSR_WRITE(sc, WMREG_WUC, 0);
   18098 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   18099 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18100 
   18101 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   18102 		    pmreg + PCI_PMCSR);
   18103 		pcireg |= PCI_PMCSR_STATE_D3;
   18104 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   18105 		    pmreg + PCI_PMCSR, pcireg);
   18106 		delay(1000);
   18107 		pcireg &= ~PCI_PMCSR_STATE_D3;
   18108 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   18109 		    pmreg + PCI_PMCSR, pcireg);
   18110 
   18111 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   18112 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18113 
   18114 		/* Restore WUC register */
   18115 		CSR_WRITE(sc, WMREG_WUC, wuc);
   18116 	}
   18117 
   18118 	/* Restore MDICNFG setting */
   18119 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   18120 	if (wa_done)
   18121 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   18122 	return rv;
   18123 }
   18124 
   18125 static void
   18126 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   18127 {
   18128 	uint32_t reg;
   18129 
   18130 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   18131 		device_xname(sc->sc_dev), __func__));
   18132 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   18133 	    || (sc->sc_type == WM_T_PCH_CNP));
   18134 
   18135 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   18136 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   18137 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   18138 
   18139 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   18140 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   18141 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   18142 }
   18143 
   18144 /* Sysctl functions */
   18145 static int
   18146 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   18147 {
   18148 	struct sysctlnode node = *rnode;
   18149 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18150 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18151 	struct wm_softc *sc = txq->txq_sc;
   18152 	uint32_t reg;
   18153 
   18154 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   18155 	node.sysctl_data = &reg;
   18156 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18157 }
   18158 
   18159 static int
   18160 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   18161 {
   18162 	struct sysctlnode node = *rnode;
   18163 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18164 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18165 	struct wm_softc *sc = txq->txq_sc;
   18166 	uint32_t reg;
   18167 
   18168 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   18169 	node.sysctl_data = &reg;
   18170 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18171 }
   18172 
   18173 #ifdef WM_DEBUG
   18174 static int
   18175 wm_sysctl_debug(SYSCTLFN_ARGS)
   18176 {
   18177 	struct sysctlnode node = *rnode;
   18178 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   18179 	uint32_t dflags;
   18180 	int error;
   18181 
   18182 	dflags = sc->sc_debug;
   18183 	node.sysctl_data = &dflags;
   18184 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   18185 
   18186 	if (error || newp == NULL)
   18187 		return error;
   18188 
   18189 	sc->sc_debug = dflags;
   18190 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   18191 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   18192 
   18193 	return 0;
   18194 }
   18195 #endif
   18196